1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/allocation.inline.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "services/attachListener.hpp"
  67 #include "services/memTracker.hpp"
  68 #include "services/runtimeService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/growableArray.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 #include "symbolengine.hpp"
  77 #include "windbghelp.hpp"
  78 
  79 
  80 #ifdef _DEBUG
  81 #include <crtdbg.h>
  82 #endif
  83 
  84 
  85 #include <windows.h>
  86 #include <sys/types.h>
  87 #include <sys/stat.h>
  88 #include <sys/timeb.h>
  89 #include <objidl.h>
  90 #include <shlobj.h>
  91 
  92 #include <malloc.h>
  93 #include <signal.h>
  94 #include <direct.h>
  95 #include <errno.h>
  96 #include <fcntl.h>
  97 #include <io.h>
  98 #include <process.h>              // For _beginthreadex(), _endthreadex()
  99 #include <imagehlp.h>             // For os::dll_address_to_function_name
 100 // for enumerating dll libraries
 101 #include <vdmdbg.h>
 102 #include <psapi.h>
 103 #include <mmsystem.h>
 104 #include <winsock2.h>
 105 
 106 // for timer info max values which include all bits
 107 #define ALL_64_BITS CONST64(-1)
 108 
 109 // For DLL loading/load error detection
 110 // Values of PE COFF
 111 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 112 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 113 
 114 static HANDLE main_process;
 115 static HANDLE main_thread;
 116 static int    main_thread_id;
 117 
 118 static FILETIME process_creation_time;
 119 static FILETIME process_exit_time;
 120 static FILETIME process_user_time;
 121 static FILETIME process_kernel_time;
 122 
 123 #ifdef _M_AMD64
 124   #define __CPU__ amd64
 125 #else
 126   #define __CPU__ i486
 127 #endif
 128 
 129 #if INCLUDE_AOT
 130 PVOID  topLevelVectoredExceptionHandler = NULL;
 131 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 132 #endif
 133 
 134 // save DLL module handle, used by GetModuleFileName
 135 
 136 HINSTANCE vm_lib_handle;
 137 
 138 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 139   switch (reason) {
 140   case DLL_PROCESS_ATTACH:
 141     vm_lib_handle = hinst;
 142     if (ForceTimeHighResolution) {
 143       timeBeginPeriod(1L);
 144     }
 145     WindowsDbgHelp::pre_initialize();
 146     SymbolEngine::pre_initialize();
 147     break;
 148   case DLL_PROCESS_DETACH:
 149     if (ForceTimeHighResolution) {
 150       timeEndPeriod(1L);
 151     }
 152 #if INCLUDE_AOT
 153     if (topLevelVectoredExceptionHandler != NULL) {
 154       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 155       topLevelVectoredExceptionHandler = NULL;
 156     }
 157 #endif
 158     break;
 159   default:
 160     break;
 161   }
 162   return true;
 163 }
 164 
 165 static inline double fileTimeAsDouble(FILETIME* time) {
 166   const double high  = (double) ((unsigned int) ~0);
 167   const double split = 10000000.0;
 168   double result = (time->dwLowDateTime / split) +
 169                    time->dwHighDateTime * (high/split);
 170   return result;
 171 }
 172 
 173 // Implementation of os
 174 
 175 bool os::unsetenv(const char* name) {
 176   assert(name != NULL, "Null pointer");
 177   return (SetEnvironmentVariable(name, NULL) == TRUE);
 178 }
 179 
 180 // No setuid programs under Windows.
 181 bool os::have_special_privileges() {
 182   return false;
 183 }
 184 
 185 
 186 // This method is  a periodic task to check for misbehaving JNI applications
 187 // under CheckJNI, we can add any periodic checks here.
 188 // For Windows at the moment does nothing
 189 void os::run_periodic_checks() {
 190   return;
 191 }
 192 
 193 // previous UnhandledExceptionFilter, if there is one
 194 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 195 
 196 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 197 
 198 void os::init_system_properties_values() {
 199   // sysclasspath, java_home, dll_dir
 200   {
 201     char *home_path;
 202     char *dll_path;
 203     char *pslash;
 204     const char *bin = "\\bin";
 205     char home_dir[MAX_PATH + 1];
 206     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 207 
 208     if (alt_home_dir != NULL)  {
 209       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 210       home_dir[MAX_PATH] = '\0';
 211     } else {
 212       os::jvm_path(home_dir, sizeof(home_dir));
 213       // Found the full path to jvm.dll.
 214       // Now cut the path to <java_home>/jre if we can.
 215       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 216       pslash = strrchr(home_dir, '\\');
 217       if (pslash != NULL) {
 218         *pslash = '\0';                   // get rid of \{client|server}
 219         pslash = strrchr(home_dir, '\\');
 220         if (pslash != NULL) {
 221           *pslash = '\0';                 // get rid of \bin
 222         }
 223       }
 224     }
 225 
 226     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 227     if (home_path == NULL) {
 228       return;
 229     }
 230     strcpy(home_path, home_dir);
 231     Arguments::set_java_home(home_path);
 232     FREE_C_HEAP_ARRAY(char, home_path);
 233 
 234     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 235                                 mtInternal);
 236     if (dll_path == NULL) {
 237       return;
 238     }
 239     strcpy(dll_path, home_dir);
 240     strcat(dll_path, bin);
 241     Arguments::set_dll_dir(dll_path);
 242     FREE_C_HEAP_ARRAY(char, dll_path);
 243 
 244     if (!set_boot_path('\\', ';')) {
 245       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 246     }
 247   }
 248 
 249 // library_path
 250 #define EXT_DIR "\\lib\\ext"
 251 #define BIN_DIR "\\bin"
 252 #define PACKAGE_DIR "\\Sun\\Java"
 253   {
 254     // Win32 library search order (See the documentation for LoadLibrary):
 255     //
 256     // 1. The directory from which application is loaded.
 257     // 2. The system wide Java Extensions directory (Java only)
 258     // 3. System directory (GetSystemDirectory)
 259     // 4. Windows directory (GetWindowsDirectory)
 260     // 5. The PATH environment variable
 261     // 6. The current directory
 262 
 263     char *library_path;
 264     char tmp[MAX_PATH];
 265     char *path_str = ::getenv("PATH");
 266 
 267     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 268                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 269 
 270     library_path[0] = '\0';
 271 
 272     GetModuleFileName(NULL, tmp, sizeof(tmp));
 273     *(strrchr(tmp, '\\')) = '\0';
 274     strcat(library_path, tmp);
 275 
 276     GetWindowsDirectory(tmp, sizeof(tmp));
 277     strcat(library_path, ";");
 278     strcat(library_path, tmp);
 279     strcat(library_path, PACKAGE_DIR BIN_DIR);
 280 
 281     GetSystemDirectory(tmp, sizeof(tmp));
 282     strcat(library_path, ";");
 283     strcat(library_path, tmp);
 284 
 285     GetWindowsDirectory(tmp, sizeof(tmp));
 286     strcat(library_path, ";");
 287     strcat(library_path, tmp);
 288 
 289     if (path_str) {
 290       strcat(library_path, ";");
 291       strcat(library_path, path_str);
 292     }
 293 
 294     strcat(library_path, ";.");
 295 
 296     Arguments::set_library_path(library_path);
 297     FREE_C_HEAP_ARRAY(char, library_path);
 298   }
 299 
 300   // Default extensions directory
 301   {
 302     char path[MAX_PATH];
 303     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 304     GetWindowsDirectory(path, MAX_PATH);
 305     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 306             path, PACKAGE_DIR, EXT_DIR);
 307     Arguments::set_ext_dirs(buf);
 308   }
 309   #undef EXT_DIR
 310   #undef BIN_DIR
 311   #undef PACKAGE_DIR
 312 
 313 #ifndef _WIN64
 314   // set our UnhandledExceptionFilter and save any previous one
 315   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 316 #endif
 317 
 318   // Done
 319   return;
 320 }
 321 
 322 void os::breakpoint() {
 323   DebugBreak();
 324 }
 325 
 326 // Invoked from the BREAKPOINT Macro
 327 extern "C" void breakpoint() {
 328   os::breakpoint();
 329 }
 330 
 331 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 332 // So far, this method is only used by Native Memory Tracking, which is
 333 // only supported on Windows XP or later.
 334 //
 335 int os::get_native_stack(address* stack, int frames, int toSkip) {
 336   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 337   for (int index = captured; index < frames; index ++) {
 338     stack[index] = NULL;
 339   }
 340   return captured;
 341 }
 342 
 343 
 344 // os::current_stack_base()
 345 //
 346 //   Returns the base of the stack, which is the stack's
 347 //   starting address.  This function must be called
 348 //   while running on the stack of the thread being queried.
 349 
 350 address os::current_stack_base() {
 351   MEMORY_BASIC_INFORMATION minfo;
 352   address stack_bottom;
 353   size_t stack_size;
 354 
 355   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 356   stack_bottom =  (address)minfo.AllocationBase;
 357   stack_size = minfo.RegionSize;
 358 
 359   // Add up the sizes of all the regions with the same
 360   // AllocationBase.
 361   while (1) {
 362     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 363     if (stack_bottom == (address)minfo.AllocationBase) {
 364       stack_size += minfo.RegionSize;
 365     } else {
 366       break;
 367     }
 368   }
 369   return stack_bottom + stack_size;
 370 }
 371 
 372 size_t os::current_stack_size() {
 373   size_t sz;
 374   MEMORY_BASIC_INFORMATION minfo;
 375   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 376   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 377   return sz;
 378 }
 379 
 380 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 381   MEMORY_BASIC_INFORMATION minfo;
 382   committed_start = NULL;
 383   committed_size = 0;
 384   address top = start + size;
 385   const address start_addr = start;
 386   while (start < top) {
 387     VirtualQuery(start, &minfo, sizeof(minfo));
 388     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 389       if (committed_start != NULL) {
 390         break;
 391       }
 392     } else {  // committed
 393       if (committed_start == NULL) {
 394         committed_start = start;
 395       }
 396       size_t offset = start - (address)minfo.BaseAddress;
 397       committed_size += minfo.RegionSize - offset;
 398     }
 399     start = (address)minfo.BaseAddress + minfo.RegionSize;
 400   }
 401 
 402   if (committed_start == NULL) {
 403     assert(committed_size == 0, "Sanity");
 404     return false;
 405   } else {
 406     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 407     // current region may go beyond the limit, trim to the limit
 408     committed_size = MIN2(committed_size, size_t(top - committed_start));
 409     return true;
 410   }
 411 }
 412 
 413 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 414   const struct tm* time_struct_ptr = localtime(clock);
 415   if (time_struct_ptr != NULL) {
 416     *res = *time_struct_ptr;
 417     return res;
 418   }
 419   return NULL;
 420 }
 421 
 422 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 423   const struct tm* time_struct_ptr = gmtime(clock);
 424   if (time_struct_ptr != NULL) {
 425     *res = *time_struct_ptr;
 426     return res;
 427   }
 428   return NULL;
 429 }
 430 
 431 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 432 
 433 // Thread start routine for all newly created threads
 434 static unsigned __stdcall thread_native_entry(Thread* thread) {
 435 
 436   thread->record_stack_base_and_size();
 437 
 438   // Try to randomize the cache line index of hot stack frames.
 439   // This helps when threads of the same stack traces evict each other's
 440   // cache lines. The threads can be either from the same JVM instance, or
 441   // from different JVM instances. The benefit is especially true for
 442   // processors with hyperthreading technology.
 443   static int counter = 0;
 444   int pid = os::current_process_id();
 445   _alloca(((pid ^ counter++) & 7) * 128);
 446 
 447   thread->initialize_thread_current();
 448 
 449   OSThread* osthr = thread->osthread();
 450   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 451 
 452   if (UseNUMA) {
 453     int lgrp_id = os::numa_get_group_id();
 454     if (lgrp_id != -1) {
 455       thread->set_lgrp_id(lgrp_id);
 456     }
 457   }
 458 
 459   // Diagnostic code to investigate JDK-6573254
 460   int res = 30115;  // non-java thread
 461   if (thread->is_Java_thread()) {
 462     res = 20115;    // java thread
 463   }
 464 
 465   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 466 
 467   // Install a win32 structured exception handler around every thread created
 468   // by VM, so VM can generate error dump when an exception occurred in non-
 469   // Java thread (e.g. VM thread).
 470   __try {
 471     thread->call_run();
 472   } __except(topLevelExceptionFilter(
 473                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 474     // Nothing to do.
 475   }
 476 
 477   // Note: at this point the thread object may already have deleted itself.
 478   // Do not dereference it from here on out.
 479 
 480   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 481 
 482   // One less thread is executing
 483   // When the VMThread gets here, the main thread may have already exited
 484   // which frees the CodeHeap containing the Atomic::add code
 485   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 486     Atomic::dec(&os::win32::_os_thread_count);
 487   }
 488 
 489   // Thread must not return from exit_process_or_thread(), but if it does,
 490   // let it proceed to exit normally
 491   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 492 }
 493 
 494 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 495                                   int thread_id) {
 496   // Allocate the OSThread object
 497   OSThread* osthread = new OSThread(NULL, NULL);
 498   if (osthread == NULL) return NULL;
 499 
 500   // Initialize the JDK library's interrupt event.
 501   // This should really be done when OSThread is constructed,
 502   // but there is no way for a constructor to report failure to
 503   // allocate the event.
 504   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 505   if (interrupt_event == NULL) {
 506     delete osthread;
 507     return NULL;
 508   }
 509   osthread->set_interrupt_event(interrupt_event);
 510 
 511   // Store info on the Win32 thread into the OSThread
 512   osthread->set_thread_handle(thread_handle);
 513   osthread->set_thread_id(thread_id);
 514 
 515   if (UseNUMA) {
 516     int lgrp_id = os::numa_get_group_id();
 517     if (lgrp_id != -1) {
 518       thread->set_lgrp_id(lgrp_id);
 519     }
 520   }
 521 
 522   // Initial thread state is INITIALIZED, not SUSPENDED
 523   osthread->set_state(INITIALIZED);
 524 
 525   return osthread;
 526 }
 527 
 528 
 529 bool os::create_attached_thread(JavaThread* thread) {
 530 #ifdef ASSERT
 531   thread->verify_not_published();
 532 #endif
 533   HANDLE thread_h;
 534   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 535                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 536     fatal("DuplicateHandle failed\n");
 537   }
 538   OSThread* osthread = create_os_thread(thread, thread_h,
 539                                         (int)current_thread_id());
 540   if (osthread == NULL) {
 541     return false;
 542   }
 543 
 544   // Initial thread state is RUNNABLE
 545   osthread->set_state(RUNNABLE);
 546 
 547   thread->set_osthread(osthread);
 548 
 549   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 550     os::current_thread_id());
 551 
 552   return true;
 553 }
 554 
 555 bool os::create_main_thread(JavaThread* thread) {
 556 #ifdef ASSERT
 557   thread->verify_not_published();
 558 #endif
 559   if (_starting_thread == NULL) {
 560     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 561     if (_starting_thread == NULL) {
 562       return false;
 563     }
 564   }
 565 
 566   // The primordial thread is runnable from the start)
 567   _starting_thread->set_state(RUNNABLE);
 568 
 569   thread->set_osthread(_starting_thread);
 570   return true;
 571 }
 572 
 573 // Helper function to trace _beginthreadex attributes,
 574 //  similar to os::Posix::describe_pthread_attr()
 575 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 576                                                size_t stacksize, unsigned initflag) {
 577   stringStream ss(buf, buflen);
 578   if (stacksize == 0) {
 579     ss.print("stacksize: default, ");
 580   } else {
 581     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 582   }
 583   ss.print("flags: ");
 584   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 585   #define ALL(X) \
 586     X(CREATE_SUSPENDED) \
 587     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 588   ALL(PRINT_FLAG)
 589   #undef ALL
 590   #undef PRINT_FLAG
 591   return buf;
 592 }
 593 
 594 // Allocate and initialize a new OSThread
 595 bool os::create_thread(Thread* thread, ThreadType thr_type,
 596                        size_t stack_size) {
 597   unsigned thread_id;
 598 
 599   // Allocate the OSThread object
 600   OSThread* osthread = new OSThread(NULL, NULL);
 601   if (osthread == NULL) {
 602     return false;
 603   }
 604 
 605   // Initialize the JDK library's interrupt event.
 606   // This should really be done when OSThread is constructed,
 607   // but there is no way for a constructor to report failure to
 608   // allocate the event.
 609   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 610   if (interrupt_event == NULL) {
 611     delete osthread;
 612     return false;
 613   }
 614   osthread->set_interrupt_event(interrupt_event);
 615   // We don't call set_interrupted(false) as it will trip the assert in there
 616   // as we are not operating on the current thread. We don't need to call it
 617   // because the initial state is already correct.
 618 
 619   thread->set_osthread(osthread);
 620 
 621   if (stack_size == 0) {
 622     switch (thr_type) {
 623     case os::java_thread:
 624       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 625       if (JavaThread::stack_size_at_create() > 0) {
 626         stack_size = JavaThread::stack_size_at_create();
 627       }
 628       break;
 629     case os::compiler_thread:
 630       if (CompilerThreadStackSize > 0) {
 631         stack_size = (size_t)(CompilerThreadStackSize * K);
 632         break;
 633       } // else fall through:
 634         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 635     case os::vm_thread:
 636     case os::pgc_thread:
 637     case os::cgc_thread:
 638     case os::watcher_thread:
 639       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 640       break;
 641     }
 642   }
 643 
 644   // Create the Win32 thread
 645   //
 646   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 647   // does not specify stack size. Instead, it specifies the size of
 648   // initially committed space. The stack size is determined by
 649   // PE header in the executable. If the committed "stack_size" is larger
 650   // than default value in the PE header, the stack is rounded up to the
 651   // nearest multiple of 1MB. For example if the launcher has default
 652   // stack size of 320k, specifying any size less than 320k does not
 653   // affect the actual stack size at all, it only affects the initial
 654   // commitment. On the other hand, specifying 'stack_size' larger than
 655   // default value may cause significant increase in memory usage, because
 656   // not only the stack space will be rounded up to MB, but also the
 657   // entire space is committed upfront.
 658   //
 659   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 660   // for CreateThread() that can treat 'stack_size' as stack size. However we
 661   // are not supposed to call CreateThread() directly according to MSDN
 662   // document because JVM uses C runtime library. The good news is that the
 663   // flag appears to work with _beginthredex() as well.
 664 
 665   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 666   HANDLE thread_handle =
 667     (HANDLE)_beginthreadex(NULL,
 668                            (unsigned)stack_size,
 669                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 670                            thread,
 671                            initflag,
 672                            &thread_id);
 673 
 674   char buf[64];
 675   if (thread_handle != NULL) {
 676     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 677       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 678   } else {
 679     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 680       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 681     // Log some OS information which might explain why creating the thread failed.
 682     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 683     LogStream st(Log(os, thread)::info());
 684     os::print_memory_info(&st);
 685   }
 686 
 687   if (thread_handle == NULL) {
 688     // Need to clean up stuff we've allocated so far
 689     thread->set_osthread(NULL);
 690     delete osthread;
 691     return false;
 692   }
 693 
 694   Atomic::inc(&os::win32::_os_thread_count);
 695 
 696   // Store info on the Win32 thread into the OSThread
 697   osthread->set_thread_handle(thread_handle);
 698   osthread->set_thread_id(thread_id);
 699 
 700   // Initial thread state is INITIALIZED, not SUSPENDED
 701   osthread->set_state(INITIALIZED);
 702 
 703   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 704   return true;
 705 }
 706 
 707 
 708 // Free Win32 resources related to the OSThread
 709 void os::free_thread(OSThread* osthread) {
 710   assert(osthread != NULL, "osthread not set");
 711 
 712   // We are told to free resources of the argument thread,
 713   // but we can only really operate on the current thread.
 714   assert(Thread::current()->osthread() == osthread,
 715          "os::free_thread but not current thread");
 716 
 717   CloseHandle(osthread->thread_handle());
 718   delete osthread;
 719 }
 720 
 721 static jlong first_filetime;
 722 static jlong initial_performance_count;
 723 static jlong performance_frequency;
 724 
 725 
 726 jlong as_long(LARGE_INTEGER x) {
 727   jlong result = 0; // initialization to avoid warning
 728   set_high(&result, x.HighPart);
 729   set_low(&result, x.LowPart);
 730   return result;
 731 }
 732 
 733 
 734 jlong os::elapsed_counter() {
 735   LARGE_INTEGER count;
 736   QueryPerformanceCounter(&count);
 737   return as_long(count) - initial_performance_count;
 738 }
 739 
 740 
 741 jlong os::elapsed_frequency() {
 742   return performance_frequency;
 743 }
 744 
 745 
 746 julong os::available_memory() {
 747   return win32::available_memory();
 748 }
 749 
 750 julong os::win32::available_memory() {
 751   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 752   // value if total memory is larger than 4GB
 753   MEMORYSTATUSEX ms;
 754   ms.dwLength = sizeof(ms);
 755   GlobalMemoryStatusEx(&ms);
 756 
 757   return (julong)ms.ullAvailPhys;
 758 }
 759 
 760 julong os::physical_memory() {
 761   return win32::physical_memory();
 762 }
 763 
 764 bool os::has_allocatable_memory_limit(julong* limit) {
 765   MEMORYSTATUSEX ms;
 766   ms.dwLength = sizeof(ms);
 767   GlobalMemoryStatusEx(&ms);
 768 #ifdef _LP64
 769   *limit = (julong)ms.ullAvailVirtual;
 770   return true;
 771 #else
 772   // Limit to 1400m because of the 2gb address space wall
 773   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 774   return true;
 775 #endif
 776 }
 777 
 778 int os::active_processor_count() {
 779   // User has overridden the number of active processors
 780   if (ActiveProcessorCount > 0) {
 781     log_trace(os)("active_processor_count: "
 782                   "active processor count set by user : %d",
 783                   ActiveProcessorCount);
 784     return ActiveProcessorCount;
 785   }
 786 
 787   DWORD_PTR lpProcessAffinityMask = 0;
 788   DWORD_PTR lpSystemAffinityMask = 0;
 789   int proc_count = processor_count();
 790   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 791       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 792     // Nof active processors is number of bits in process affinity mask
 793     int bitcount = 0;
 794     while (lpProcessAffinityMask != 0) {
 795       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 796       bitcount++;
 797     }
 798     return bitcount;
 799   } else {
 800     return proc_count;
 801   }
 802 }
 803 
 804 void os::set_native_thread_name(const char *name) {
 805 
 806   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 807   //
 808   // Note that unfortunately this only works if the process
 809   // is already attached to a debugger; debugger must observe
 810   // the exception below to show the correct name.
 811 
 812   // If there is no debugger attached skip raising the exception
 813   if (!IsDebuggerPresent()) {
 814     return;
 815   }
 816 
 817   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 818   struct {
 819     DWORD dwType;     // must be 0x1000
 820     LPCSTR szName;    // pointer to name (in user addr space)
 821     DWORD dwThreadID; // thread ID (-1=caller thread)
 822     DWORD dwFlags;    // reserved for future use, must be zero
 823   } info;
 824 
 825   info.dwType = 0x1000;
 826   info.szName = name;
 827   info.dwThreadID = -1;
 828   info.dwFlags = 0;
 829 
 830   __try {
 831     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 832   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 833 }
 834 
 835 bool os::distribute_processes(uint length, uint* distribution) {
 836   // Not yet implemented.
 837   return false;
 838 }
 839 
 840 bool os::bind_to_processor(uint processor_id) {
 841   // Not yet implemented.
 842   return false;
 843 }
 844 
 845 void os::win32::initialize_performance_counter() {
 846   LARGE_INTEGER count;
 847   QueryPerformanceFrequency(&count);
 848   performance_frequency = as_long(count);
 849   QueryPerformanceCounter(&count);
 850   initial_performance_count = as_long(count);
 851 }
 852 
 853 
 854 double os::elapsedTime() {
 855   return (double) elapsed_counter() / (double) elapsed_frequency();
 856 }
 857 
 858 
 859 // Windows format:
 860 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 861 // Java format:
 862 //   Java standards require the number of milliseconds since 1/1/1970
 863 
 864 // Constant offset - calculated using offset()
 865 static jlong  _offset   = 116444736000000000;
 866 // Fake time counter for reproducible results when debugging
 867 static jlong  fake_time = 0;
 868 
 869 #ifdef ASSERT
 870 // Just to be safe, recalculate the offset in debug mode
 871 static jlong _calculated_offset = 0;
 872 static int   _has_calculated_offset = 0;
 873 
 874 jlong offset() {
 875   if (_has_calculated_offset) return _calculated_offset;
 876   SYSTEMTIME java_origin;
 877   java_origin.wYear          = 1970;
 878   java_origin.wMonth         = 1;
 879   java_origin.wDayOfWeek     = 0; // ignored
 880   java_origin.wDay           = 1;
 881   java_origin.wHour          = 0;
 882   java_origin.wMinute        = 0;
 883   java_origin.wSecond        = 0;
 884   java_origin.wMilliseconds  = 0;
 885   FILETIME jot;
 886   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 887     fatal("Error = %d\nWindows error", GetLastError());
 888   }
 889   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 890   _has_calculated_offset = 1;
 891   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 892   return _calculated_offset;
 893 }
 894 #else
 895 jlong offset() {
 896   return _offset;
 897 }
 898 #endif
 899 
 900 jlong windows_to_java_time(FILETIME wt) {
 901   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 902   return (a - offset()) / 10000;
 903 }
 904 
 905 // Returns time ticks in (10th of micro seconds)
 906 jlong windows_to_time_ticks(FILETIME wt) {
 907   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 908   return (a - offset());
 909 }
 910 
 911 FILETIME java_to_windows_time(jlong l) {
 912   jlong a = (l * 10000) + offset();
 913   FILETIME result;
 914   result.dwHighDateTime = high(a);
 915   result.dwLowDateTime  = low(a);
 916   return result;
 917 }
 918 
 919 bool os::supports_vtime() { return true; }
 920 bool os::enable_vtime() { return false; }
 921 bool os::vtime_enabled() { return false; }
 922 
 923 double os::elapsedVTime() {
 924   FILETIME created;
 925   FILETIME exited;
 926   FILETIME kernel;
 927   FILETIME user;
 928   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 929     // the resolution of windows_to_java_time() should be sufficient (ms)
 930     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 931   } else {
 932     return elapsedTime();
 933   }
 934 }
 935 
 936 jlong os::javaTimeMillis() {
 937   FILETIME wt;
 938   GetSystemTimeAsFileTime(&wt);
 939   return windows_to_java_time(wt);
 940 }
 941 
 942 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 943   FILETIME wt;
 944   GetSystemTimeAsFileTime(&wt);
 945   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 946   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 947   seconds = secs;
 948   nanos = jlong(ticks - (secs*10000000)) * 100;
 949 }
 950 
 951 jlong os::javaTimeNanos() {
 952     LARGE_INTEGER current_count;
 953     QueryPerformanceCounter(&current_count);
 954     double current = as_long(current_count);
 955     double freq = performance_frequency;
 956     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 957     return time;
 958 }
 959 
 960 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 961   jlong freq = performance_frequency;
 962   if (freq < NANOSECS_PER_SEC) {
 963     // the performance counter is 64 bits and we will
 964     // be multiplying it -- so no wrap in 64 bits
 965     info_ptr->max_value = ALL_64_BITS;
 966   } else if (freq > NANOSECS_PER_SEC) {
 967     // use the max value the counter can reach to
 968     // determine the max value which could be returned
 969     julong max_counter = (julong)ALL_64_BITS;
 970     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 971   } else {
 972     // the performance counter is 64 bits and we will
 973     // be using it directly -- so no wrap in 64 bits
 974     info_ptr->max_value = ALL_64_BITS;
 975   }
 976 
 977   // using a counter, so no skipping
 978   info_ptr->may_skip_backward = false;
 979   info_ptr->may_skip_forward = false;
 980 
 981   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 982 }
 983 
 984 char* os::local_time_string(char *buf, size_t buflen) {
 985   SYSTEMTIME st;
 986   GetLocalTime(&st);
 987   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 988                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 989   return buf;
 990 }
 991 
 992 bool os::getTimesSecs(double* process_real_time,
 993                       double* process_user_time,
 994                       double* process_system_time) {
 995   HANDLE h_process = GetCurrentProcess();
 996   FILETIME create_time, exit_time, kernel_time, user_time;
 997   BOOL result = GetProcessTimes(h_process,
 998                                 &create_time,
 999                                 &exit_time,
1000                                 &kernel_time,
1001                                 &user_time);
1002   if (result != 0) {
1003     FILETIME wt;
1004     GetSystemTimeAsFileTime(&wt);
1005     jlong rtc_millis = windows_to_java_time(wt);
1006     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
1007     *process_user_time =
1008       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1009     *process_system_time =
1010       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1011     return true;
1012   } else {
1013     return false;
1014   }
1015 }
1016 
1017 void os::shutdown() {
1018   // allow PerfMemory to attempt cleanup of any persistent resources
1019   perfMemory_exit();
1020 
1021   // flush buffered output, finish log files
1022   ostream_abort();
1023 
1024   // Check for abort hook
1025   abort_hook_t abort_hook = Arguments::abort_hook();
1026   if (abort_hook != NULL) {
1027     abort_hook();
1028   }
1029 }
1030 
1031 
1032 static HANDLE dumpFile = NULL;
1033 
1034 // Check if dump file can be created.
1035 void os::check_dump_limit(char* buffer, size_t buffsz) {
1036   bool status = true;
1037   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1038     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1039     status = false;
1040   }
1041 
1042 #ifndef ASSERT
1043   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1044     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1045     status = false;
1046   }
1047 #endif
1048 
1049   if (status) {
1050     const char* cwd = get_current_directory(NULL, 0);
1051     int pid = current_process_id();
1052     if (cwd != NULL) {
1053       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1054     } else {
1055       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1056     }
1057 
1058     if (dumpFile == NULL &&
1059        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1060                  == INVALID_HANDLE_VALUE) {
1061       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1062       status = false;
1063     }
1064   }
1065   VMError::record_coredump_status(buffer, status);
1066 }
1067 
1068 void os::abort(bool dump_core, void* siginfo, const void* context) {
1069   EXCEPTION_POINTERS ep;
1070   MINIDUMP_EXCEPTION_INFORMATION mei;
1071   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1072 
1073   HANDLE hProcess = GetCurrentProcess();
1074   DWORD processId = GetCurrentProcessId();
1075   MINIDUMP_TYPE dumpType;
1076 
1077   shutdown();
1078   if (!dump_core || dumpFile == NULL) {
1079     if (dumpFile != NULL) {
1080       CloseHandle(dumpFile);
1081     }
1082     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1083   }
1084 
1085   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1086     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1087 
1088   if (siginfo != NULL && context != NULL) {
1089     ep.ContextRecord = (PCONTEXT) context;
1090     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1091 
1092     mei.ThreadId = GetCurrentThreadId();
1093     mei.ExceptionPointers = &ep;
1094     pmei = &mei;
1095   } else {
1096     pmei = NULL;
1097   }
1098 
1099   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1100   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1101   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1102       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1103     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1104   }
1105   CloseHandle(dumpFile);
1106   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1107 }
1108 
1109 // Die immediately, no exit hook, no abort hook, no cleanup.
1110 void os::die() {
1111   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1112 }
1113 
1114 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1115 //  * dirent_md.c       1.15 00/02/02
1116 //
1117 // The declarations for DIR and struct dirent are in jvm_win32.h.
1118 
1119 // Caller must have already run dirname through JVM_NativePath, which removes
1120 // duplicate slashes and converts all instances of '/' into '\\'.
1121 
1122 DIR * os::opendir(const char *dirname) {
1123   assert(dirname != NULL, "just checking");   // hotspot change
1124   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1125   DWORD fattr;                                // hotspot change
1126   char alt_dirname[4] = { 0, 0, 0, 0 };
1127 
1128   if (dirp == 0) {
1129     errno = ENOMEM;
1130     return 0;
1131   }
1132 
1133   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1134   // as a directory in FindFirstFile().  We detect this case here and
1135   // prepend the current drive name.
1136   //
1137   if (dirname[1] == '\0' && dirname[0] == '\\') {
1138     alt_dirname[0] = _getdrive() + 'A' - 1;
1139     alt_dirname[1] = ':';
1140     alt_dirname[2] = '\\';
1141     alt_dirname[3] = '\0';
1142     dirname = alt_dirname;
1143   }
1144 
1145   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1146   if (dirp->path == 0) {
1147     free(dirp);
1148     errno = ENOMEM;
1149     return 0;
1150   }
1151   strcpy(dirp->path, dirname);
1152 
1153   fattr = GetFileAttributes(dirp->path);
1154   if (fattr == 0xffffffff) {
1155     free(dirp->path);
1156     free(dirp);
1157     errno = ENOENT;
1158     return 0;
1159   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1160     free(dirp->path);
1161     free(dirp);
1162     errno = ENOTDIR;
1163     return 0;
1164   }
1165 
1166   // Append "*.*", or possibly "\\*.*", to path
1167   if (dirp->path[1] == ':' &&
1168       (dirp->path[2] == '\0' ||
1169       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1170     // No '\\' needed for cases like "Z:" or "Z:\"
1171     strcat(dirp->path, "*.*");
1172   } else {
1173     strcat(dirp->path, "\\*.*");
1174   }
1175 
1176   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1177   if (dirp->handle == INVALID_HANDLE_VALUE) {
1178     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1179       free(dirp->path);
1180       free(dirp);
1181       errno = EACCES;
1182       return 0;
1183     }
1184   }
1185   return dirp;
1186 }
1187 
1188 struct dirent * os::readdir(DIR *dirp) {
1189   assert(dirp != NULL, "just checking");      // hotspot change
1190   if (dirp->handle == INVALID_HANDLE_VALUE) {
1191     return NULL;
1192   }
1193 
1194   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1195 
1196   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1197     if (GetLastError() == ERROR_INVALID_HANDLE) {
1198       errno = EBADF;
1199       return NULL;
1200     }
1201     FindClose(dirp->handle);
1202     dirp->handle = INVALID_HANDLE_VALUE;
1203   }
1204 
1205   return &dirp->dirent;
1206 }
1207 
1208 int os::closedir(DIR *dirp) {
1209   assert(dirp != NULL, "just checking");      // hotspot change
1210   if (dirp->handle != INVALID_HANDLE_VALUE) {
1211     if (!FindClose(dirp->handle)) {
1212       errno = EBADF;
1213       return -1;
1214     }
1215     dirp->handle = INVALID_HANDLE_VALUE;
1216   }
1217   free(dirp->path);
1218   free(dirp);
1219   return 0;
1220 }
1221 
1222 // This must be hard coded because it's the system's temporary
1223 // directory not the java application's temp directory, ala java.io.tmpdir.
1224 const char* os::get_temp_directory() {
1225   static char path_buf[MAX_PATH];
1226   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1227     return path_buf;
1228   } else {
1229     path_buf[0] = '\0';
1230     return path_buf;
1231   }
1232 }
1233 
1234 // Needs to be in os specific directory because windows requires another
1235 // header file <direct.h>
1236 const char* os::get_current_directory(char *buf, size_t buflen) {
1237   int n = static_cast<int>(buflen);
1238   if (buflen > INT_MAX)  n = INT_MAX;
1239   return _getcwd(buf, n);
1240 }
1241 
1242 //-----------------------------------------------------------
1243 // Helper functions for fatal error handler
1244 #ifdef _WIN64
1245 // Helper routine which returns true if address in
1246 // within the NTDLL address space.
1247 //
1248 static bool _addr_in_ntdll(address addr) {
1249   HMODULE hmod;
1250   MODULEINFO minfo;
1251 
1252   hmod = GetModuleHandle("NTDLL.DLL");
1253   if (hmod == NULL) return false;
1254   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1255                                           &minfo, sizeof(MODULEINFO))) {
1256     return false;
1257   }
1258 
1259   if ((addr >= minfo.lpBaseOfDll) &&
1260       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1261     return true;
1262   } else {
1263     return false;
1264   }
1265 }
1266 #endif
1267 
1268 struct _modinfo {
1269   address addr;
1270   char*   full_path;   // point to a char buffer
1271   int     buflen;      // size of the buffer
1272   address base_addr;
1273 };
1274 
1275 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1276                                   address top_address, void * param) {
1277   struct _modinfo *pmod = (struct _modinfo *)param;
1278   if (!pmod) return -1;
1279 
1280   if (base_addr   <= pmod->addr &&
1281       top_address > pmod->addr) {
1282     // if a buffer is provided, copy path name to the buffer
1283     if (pmod->full_path) {
1284       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1285     }
1286     pmod->base_addr = base_addr;
1287     return 1;
1288   }
1289   return 0;
1290 }
1291 
1292 bool os::dll_address_to_library_name(address addr, char* buf,
1293                                      int buflen, int* offset) {
1294   // buf is not optional, but offset is optional
1295   assert(buf != NULL, "sanity check");
1296 
1297 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1298 //       return the full path to the DLL file, sometimes it returns path
1299 //       to the corresponding PDB file (debug info); sometimes it only
1300 //       returns partial path, which makes life painful.
1301 
1302   struct _modinfo mi;
1303   mi.addr      = addr;
1304   mi.full_path = buf;
1305   mi.buflen    = buflen;
1306   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1307     // buf already contains path name
1308     if (offset) *offset = addr - mi.base_addr;
1309     return true;
1310   }
1311 
1312   buf[0] = '\0';
1313   if (offset) *offset = -1;
1314   return false;
1315 }
1316 
1317 bool os::dll_address_to_function_name(address addr, char *buf,
1318                                       int buflen, int *offset,
1319                                       bool demangle) {
1320   // buf is not optional, but offset is optional
1321   assert(buf != NULL, "sanity check");
1322 
1323   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1324     return true;
1325   }
1326   if (offset != NULL)  *offset  = -1;
1327   buf[0] = '\0';
1328   return false;
1329 }
1330 
1331 // save the start and end address of jvm.dll into param[0] and param[1]
1332 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1333                            address top_address, void * param) {
1334   if (!param) return -1;
1335 
1336   if (base_addr   <= (address)_locate_jvm_dll &&
1337       top_address > (address)_locate_jvm_dll) {
1338     ((address*)param)[0] = base_addr;
1339     ((address*)param)[1] = top_address;
1340     return 1;
1341   }
1342   return 0;
1343 }
1344 
1345 address vm_lib_location[2];    // start and end address of jvm.dll
1346 
1347 // check if addr is inside jvm.dll
1348 bool os::address_is_in_vm(address addr) {
1349   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1350     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1351       assert(false, "Can't find jvm module.");
1352       return false;
1353     }
1354   }
1355 
1356   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1357 }
1358 
1359 // print module info; param is outputStream*
1360 static int _print_module(const char* fname, address base_address,
1361                          address top_address, void* param) {
1362   if (!param) return -1;
1363 
1364   outputStream* st = (outputStream*)param;
1365 
1366   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1367   return 0;
1368 }
1369 
1370 // Loads .dll/.so and
1371 // in case of error it checks if .dll/.so was built for the
1372 // same architecture as Hotspot is running on
1373 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1374   log_info(os)("attempting shared library load of %s", name);
1375 
1376   void * result = LoadLibrary(name);
1377   if (result != NULL) {
1378     Events::log(NULL, "Loaded shared library %s", name);
1379     // Recalculate pdb search path if a DLL was loaded successfully.
1380     SymbolEngine::recalc_search_path();
1381     log_info(os)("shared library load of %s was successful", name);
1382     return result;
1383   }
1384   DWORD errcode = GetLastError();
1385   // Read system error message into ebuf
1386   // It may or may not be overwritten below (in the for loop and just above)
1387   lasterror(ebuf, (size_t) ebuflen);
1388   ebuf[ebuflen - 1] = '\0';
1389   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1390   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1391 
1392   if (errcode == ERROR_MOD_NOT_FOUND) {
1393     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1394     ebuf[ebuflen - 1] = '\0';
1395     return NULL;
1396   }
1397 
1398   // Parsing dll below
1399   // If we can read dll-info and find that dll was built
1400   // for an architecture other than Hotspot is running in
1401   // - then print to buffer "DLL was built for a different architecture"
1402   // else call os::lasterror to obtain system error message
1403   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1404   if (fd < 0) {
1405     return NULL;
1406   }
1407 
1408   uint32_t signature_offset;
1409   uint16_t lib_arch = 0;
1410   bool failed_to_get_lib_arch =
1411     ( // Go to position 3c in the dll
1412      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1413      ||
1414      // Read location of signature
1415      (sizeof(signature_offset) !=
1416      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1417      ||
1418      // Go to COFF File Header in dll
1419      // that is located after "signature" (4 bytes long)
1420      (os::seek_to_file_offset(fd,
1421      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1422      ||
1423      // Read field that contains code of architecture
1424      // that dll was built for
1425      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1426     );
1427 
1428   ::close(fd);
1429   if (failed_to_get_lib_arch) {
1430     // file i/o error - report os::lasterror(...) msg
1431     return NULL;
1432   }
1433 
1434   typedef struct {
1435     uint16_t arch_code;
1436     char* arch_name;
1437   } arch_t;
1438 
1439   static const arch_t arch_array[] = {
1440     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1441     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1442   };
1443 #if (defined _M_AMD64)
1444   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1445 #elif (defined _M_IX86)
1446   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1447 #else
1448   #error Method os::dll_load requires that one of following \
1449          is defined :_M_AMD64 or _M_IX86
1450 #endif
1451 
1452 
1453   // Obtain a string for printf operation
1454   // lib_arch_str shall contain string what platform this .dll was built for
1455   // running_arch_str shall string contain what platform Hotspot was built for
1456   char *running_arch_str = NULL, *lib_arch_str = NULL;
1457   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1458     if (lib_arch == arch_array[i].arch_code) {
1459       lib_arch_str = arch_array[i].arch_name;
1460     }
1461     if (running_arch == arch_array[i].arch_code) {
1462       running_arch_str = arch_array[i].arch_name;
1463     }
1464   }
1465 
1466   assert(running_arch_str,
1467          "Didn't find running architecture code in arch_array");
1468 
1469   // If the architecture is right
1470   // but some other error took place - report os::lasterror(...) msg
1471   if (lib_arch == running_arch) {
1472     return NULL;
1473   }
1474 
1475   if (lib_arch_str != NULL) {
1476     ::_snprintf(ebuf, ebuflen - 1,
1477                 "Can't load %s-bit .dll on a %s-bit platform",
1478                 lib_arch_str, running_arch_str);
1479   } else {
1480     // don't know what architecture this dll was build for
1481     ::_snprintf(ebuf, ebuflen - 1,
1482                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1483                 lib_arch, running_arch_str);
1484   }
1485 
1486   return NULL;
1487 }
1488 
1489 void os::print_dll_info(outputStream *st) {
1490   st->print_cr("Dynamic libraries:");
1491   get_loaded_modules_info(_print_module, (void *)st);
1492 }
1493 
1494 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1495   HANDLE   hProcess;
1496 
1497 # define MAX_NUM_MODULES 128
1498   HMODULE     modules[MAX_NUM_MODULES];
1499   static char filename[MAX_PATH];
1500   int         result = 0;
1501 
1502   int pid = os::current_process_id();
1503   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1504                          FALSE, pid);
1505   if (hProcess == NULL) return 0;
1506 
1507   DWORD size_needed;
1508   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1509     CloseHandle(hProcess);
1510     return 0;
1511   }
1512 
1513   // number of modules that are currently loaded
1514   int num_modules = size_needed / sizeof(HMODULE);
1515 
1516   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1517     // Get Full pathname:
1518     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1519       filename[0] = '\0';
1520     }
1521 
1522     MODULEINFO modinfo;
1523     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1524       modinfo.lpBaseOfDll = NULL;
1525       modinfo.SizeOfImage = 0;
1526     }
1527 
1528     // Invoke callback function
1529     result = callback(filename, (address)modinfo.lpBaseOfDll,
1530                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1531     if (result) break;
1532   }
1533 
1534   CloseHandle(hProcess);
1535   return result;
1536 }
1537 
1538 bool os::get_host_name(char* buf, size_t buflen) {
1539   DWORD size = (DWORD)buflen;
1540   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1541 }
1542 
1543 void os::get_summary_os_info(char* buf, size_t buflen) {
1544   stringStream sst(buf, buflen);
1545   os::win32::print_windows_version(&sst);
1546   // chop off newline character
1547   char* nl = strchr(buf, '\n');
1548   if (nl != NULL) *nl = '\0';
1549 }
1550 
1551 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1552 #if _MSC_VER >= 1900
1553   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1554   int result = ::vsnprintf(buf, len, fmt, args);
1555   // If an encoding error occurred (result < 0) then it's not clear
1556   // whether the buffer is NUL terminated, so ensure it is.
1557   if ((result < 0) && (len > 0)) {
1558     buf[len - 1] = '\0';
1559   }
1560   return result;
1561 #else
1562   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1563   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1564   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1565   // go straight to _vscprintf.  The output is going to be truncated in
1566   // that case, except in the unusual case of empty output.  More
1567   // importantly, the documentation for various versions of Visual Studio
1568   // are inconsistent about the behavior of _vsnprintf when len == 0,
1569   // including it possibly being an error.
1570   int result = -1;
1571   if (len > 0) {
1572     result = _vsnprintf(buf, len, fmt, args);
1573     // If output (including NUL terminator) is truncated, the buffer
1574     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1575     if ((result < 0) || ((size_t)result >= len)) {
1576       buf[len - 1] = '\0';
1577     }
1578   }
1579   if (result < 0) {
1580     result = _vscprintf(fmt, args);
1581   }
1582   return result;
1583 #endif // _MSC_VER dispatch
1584 }
1585 
1586 static inline time_t get_mtime(const char* filename) {
1587   struct stat st;
1588   int ret = os::stat(filename, &st);
1589   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1590   return st.st_mtime;
1591 }
1592 
1593 int os::compare_file_modified_times(const char* file1, const char* file2) {
1594   time_t t1 = get_mtime(file1);
1595   time_t t2 = get_mtime(file2);
1596   return t1 - t2;
1597 }
1598 
1599 void os::print_os_info_brief(outputStream* st) {
1600   os::print_os_info(st);
1601 }
1602 
1603 void os::print_os_info(outputStream* st) {
1604 #ifdef ASSERT
1605   char buffer[1024];
1606   st->print("HostName: ");
1607   if (get_host_name(buffer, sizeof(buffer))) {
1608     st->print("%s ", buffer);
1609   } else {
1610     st->print("N/A ");
1611   }
1612 #endif
1613   st->print("OS:");
1614   os::win32::print_windows_version(st);
1615 
1616 #ifdef _LP64
1617   VM_Version::print_platform_virtualization_info(st);
1618 #endif
1619 }
1620 
1621 void os::win32::print_windows_version(outputStream* st) {
1622   OSVERSIONINFOEX osvi;
1623   VS_FIXEDFILEINFO *file_info;
1624   TCHAR kernel32_path[MAX_PATH];
1625   UINT len, ret;
1626 
1627   // Use the GetVersionEx information to see if we're on a server or
1628   // workstation edition of Windows. Starting with Windows 8.1 we can't
1629   // trust the OS version information returned by this API.
1630   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1631   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1632   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1633     st->print_cr("Call to GetVersionEx failed");
1634     return;
1635   }
1636   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1637 
1638   // Get the full path to \Windows\System32\kernel32.dll and use that for
1639   // determining what version of Windows we're running on.
1640   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1641   ret = GetSystemDirectory(kernel32_path, len);
1642   if (ret == 0 || ret > len) {
1643     st->print_cr("Call to GetSystemDirectory failed");
1644     return;
1645   }
1646   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1647 
1648   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1649   if (version_size == 0) {
1650     st->print_cr("Call to GetFileVersionInfoSize failed");
1651     return;
1652   }
1653 
1654   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1655   if (version_info == NULL) {
1656     st->print_cr("Failed to allocate version_info");
1657     return;
1658   }
1659 
1660   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1661     os::free(version_info);
1662     st->print_cr("Call to GetFileVersionInfo failed");
1663     return;
1664   }
1665 
1666   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1667     os::free(version_info);
1668     st->print_cr("Call to VerQueryValue failed");
1669     return;
1670   }
1671 
1672   int major_version = HIWORD(file_info->dwProductVersionMS);
1673   int minor_version = LOWORD(file_info->dwProductVersionMS);
1674   int build_number = HIWORD(file_info->dwProductVersionLS);
1675   int build_minor = LOWORD(file_info->dwProductVersionLS);
1676   int os_vers = major_version * 1000 + minor_version;
1677   os::free(version_info);
1678 
1679   st->print(" Windows ");
1680   switch (os_vers) {
1681 
1682   case 6000:
1683     if (is_workstation) {
1684       st->print("Vista");
1685     } else {
1686       st->print("Server 2008");
1687     }
1688     break;
1689 
1690   case 6001:
1691     if (is_workstation) {
1692       st->print("7");
1693     } else {
1694       st->print("Server 2008 R2");
1695     }
1696     break;
1697 
1698   case 6002:
1699     if (is_workstation) {
1700       st->print("8");
1701     } else {
1702       st->print("Server 2012");
1703     }
1704     break;
1705 
1706   case 6003:
1707     if (is_workstation) {
1708       st->print("8.1");
1709     } else {
1710       st->print("Server 2012 R2");
1711     }
1712     break;
1713 
1714   case 10000:
1715     if (is_workstation) {
1716       st->print("10");
1717     } else {
1718       // distinguish Windows Server 2016 and 2019 by build number
1719       // Windows server 2019 GA 10/2018 build number is 17763
1720       if (build_number > 17762) {
1721         st->print("Server 2019");
1722       } else {
1723         st->print("Server 2016");
1724       }
1725     }
1726     break;
1727 
1728   default:
1729     // Unrecognized windows, print out its major and minor versions
1730     st->print("%d.%d", major_version, minor_version);
1731     break;
1732   }
1733 
1734   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1735   // find out whether we are running on 64 bit processor or not
1736   SYSTEM_INFO si;
1737   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1738   GetNativeSystemInfo(&si);
1739   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1740     st->print(" , 64 bit");
1741   }
1742 
1743   st->print(" Build %d", build_number);
1744   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1745   st->cr();
1746 }
1747 
1748 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1749   // Nothing to do for now.
1750 }
1751 
1752 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1753   HKEY key;
1754   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1755                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1756   if (status == ERROR_SUCCESS) {
1757     DWORD size = (DWORD)buflen;
1758     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1759     if (status != ERROR_SUCCESS) {
1760         strncpy(buf, "## __CPU__", buflen);
1761     }
1762     RegCloseKey(key);
1763   } else {
1764     // Put generic cpu info to return
1765     strncpy(buf, "## __CPU__", buflen);
1766   }
1767 }
1768 
1769 void os::print_memory_info(outputStream* st) {
1770   st->print("Memory:");
1771   st->print(" %dk page", os::vm_page_size()>>10);
1772 
1773   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1774   // value if total memory is larger than 4GB
1775   MEMORYSTATUSEX ms;
1776   ms.dwLength = sizeof(ms);
1777   int r1 = GlobalMemoryStatusEx(&ms);
1778 
1779   if (r1 != 0) {
1780     st->print(", system-wide physical " INT64_FORMAT "M ",
1781              (int64_t) ms.ullTotalPhys >> 20);
1782     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1783 
1784     st->print("TotalPageFile size " INT64_FORMAT "M ",
1785              (int64_t) ms.ullTotalPageFile >> 20);
1786     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1787              (int64_t) ms.ullAvailPageFile >> 20);
1788 
1789     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1790 #if defined(_M_IX86)
1791     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1792              (int64_t) ms.ullTotalVirtual >> 20);
1793     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1794 #endif
1795   } else {
1796     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1797   }
1798 
1799   // extended memory statistics for a process
1800   PROCESS_MEMORY_COUNTERS_EX pmex;
1801   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1802   pmex.cb = sizeof(pmex);
1803   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1804 
1805   if (r2 != 0) {
1806     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1807              (int64_t) pmex.WorkingSetSize >> 20);
1808     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1809 
1810     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1811              (int64_t) pmex.PrivateUsage >> 20);
1812     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1813   } else {
1814     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1815   }
1816 
1817   st->cr();
1818 }
1819 
1820 bool os::signal_sent_by_kill(const void* siginfo) {
1821   // TODO: Is this possible?
1822   return false;
1823 }
1824 
1825 void os::print_siginfo(outputStream *st, const void* siginfo) {
1826   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1827   st->print("siginfo:");
1828 
1829   char tmp[64];
1830   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1831     strcpy(tmp, "EXCEPTION_??");
1832   }
1833   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1834 
1835   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1836        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1837        er->NumberParameters >= 2) {
1838     switch (er->ExceptionInformation[0]) {
1839     case 0: st->print(", reading address"); break;
1840     case 1: st->print(", writing address"); break;
1841     case 8: st->print(", data execution prevention violation at address"); break;
1842     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1843                        er->ExceptionInformation[0]);
1844     }
1845     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1846   } else {
1847     int num = er->NumberParameters;
1848     if (num > 0) {
1849       st->print(", ExceptionInformation=");
1850       for (int i = 0; i < num; i++) {
1851         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1852       }
1853     }
1854   }
1855   st->cr();
1856 }
1857 
1858 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1859   // TODO: Can we kill thread?
1860   return false;
1861 }
1862 
1863 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1864   // do nothing
1865 }
1866 
1867 static char saved_jvm_path[MAX_PATH] = {0};
1868 
1869 // Find the full path to the current module, jvm.dll
1870 void os::jvm_path(char *buf, jint buflen) {
1871   // Error checking.
1872   if (buflen < MAX_PATH) {
1873     assert(false, "must use a large-enough buffer");
1874     buf[0] = '\0';
1875     return;
1876   }
1877   // Lazy resolve the path to current module.
1878   if (saved_jvm_path[0] != 0) {
1879     strcpy(buf, saved_jvm_path);
1880     return;
1881   }
1882 
1883   buf[0] = '\0';
1884   if (Arguments::sun_java_launcher_is_altjvm()) {
1885     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1886     // for a JAVA_HOME environment variable and fix up the path so it
1887     // looks like jvm.dll is installed there (append a fake suffix
1888     // hotspot/jvm.dll).
1889     char* java_home_var = ::getenv("JAVA_HOME");
1890     if (java_home_var != NULL && java_home_var[0] != 0 &&
1891         strlen(java_home_var) < (size_t)buflen) {
1892       strncpy(buf, java_home_var, buflen);
1893 
1894       // determine if this is a legacy image or modules image
1895       // modules image doesn't have "jre" subdirectory
1896       size_t len = strlen(buf);
1897       char* jrebin_p = buf + len;
1898       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1899       if (0 != _access(buf, 0)) {
1900         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1901       }
1902       len = strlen(buf);
1903       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1904     }
1905   }
1906 
1907   if (buf[0] == '\0') {
1908     GetModuleFileName(vm_lib_handle, buf, buflen);
1909   }
1910   strncpy(saved_jvm_path, buf, MAX_PATH);
1911   saved_jvm_path[MAX_PATH - 1] = '\0';
1912 }
1913 
1914 
1915 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1916 #ifndef _WIN64
1917   st->print("_");
1918 #endif
1919 }
1920 
1921 
1922 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1923 #ifndef _WIN64
1924   st->print("@%d", args_size  * sizeof(int));
1925 #endif
1926 }
1927 
1928 // This method is a copy of JDK's sysGetLastErrorString
1929 // from src/windows/hpi/src/system_md.c
1930 
1931 size_t os::lasterror(char* buf, size_t len) {
1932   DWORD errval;
1933 
1934   if ((errval = GetLastError()) != 0) {
1935     // DOS error
1936     size_t n = (size_t)FormatMessage(
1937                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1938                                      NULL,
1939                                      errval,
1940                                      0,
1941                                      buf,
1942                                      (DWORD)len,
1943                                      NULL);
1944     if (n > 3) {
1945       // Drop final '.', CR, LF
1946       if (buf[n - 1] == '\n') n--;
1947       if (buf[n - 1] == '\r') n--;
1948       if (buf[n - 1] == '.') n--;
1949       buf[n] = '\0';
1950     }
1951     return n;
1952   }
1953 
1954   if (errno != 0) {
1955     // C runtime error that has no corresponding DOS error code
1956     const char* s = os::strerror(errno);
1957     size_t n = strlen(s);
1958     if (n >= len) n = len - 1;
1959     strncpy(buf, s, n);
1960     buf[n] = '\0';
1961     return n;
1962   }
1963 
1964   return 0;
1965 }
1966 
1967 int os::get_last_error() {
1968   DWORD error = GetLastError();
1969   if (error == 0) {
1970     error = errno;
1971   }
1972   return (int)error;
1973 }
1974 
1975 // sun.misc.Signal
1976 // NOTE that this is a workaround for an apparent kernel bug where if
1977 // a signal handler for SIGBREAK is installed then that signal handler
1978 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1979 // See bug 4416763.
1980 static void (*sigbreakHandler)(int) = NULL;
1981 
1982 static void UserHandler(int sig, void *siginfo, void *context) {
1983   os::signal_notify(sig);
1984   // We need to reinstate the signal handler each time...
1985   os::signal(sig, (void*)UserHandler);
1986 }
1987 
1988 void* os::user_handler() {
1989   return (void*) UserHandler;
1990 }
1991 
1992 void* os::signal(int signal_number, void* handler) {
1993   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1994     void (*oldHandler)(int) = sigbreakHandler;
1995     sigbreakHandler = (void (*)(int)) handler;
1996     return (void*) oldHandler;
1997   } else {
1998     return (void*)::signal(signal_number, (void (*)(int))handler);
1999   }
2000 }
2001 
2002 void os::signal_raise(int signal_number) {
2003   raise(signal_number);
2004 }
2005 
2006 // The Win32 C runtime library maps all console control events other than ^C
2007 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2008 // logoff, and shutdown events.  We therefore install our own console handler
2009 // that raises SIGTERM for the latter cases.
2010 //
2011 static BOOL WINAPI consoleHandler(DWORD event) {
2012   switch (event) {
2013   case CTRL_C_EVENT:
2014     if (VMError::is_error_reported()) {
2015       // Ctrl-C is pressed during error reporting, likely because the error
2016       // handler fails to abort. Let VM die immediately.
2017       os::die();
2018     }
2019 
2020     os::signal_raise(SIGINT);
2021     return TRUE;
2022     break;
2023   case CTRL_BREAK_EVENT:
2024     if (sigbreakHandler != NULL) {
2025       (*sigbreakHandler)(SIGBREAK);
2026     }
2027     return TRUE;
2028     break;
2029   case CTRL_LOGOFF_EVENT: {
2030     // Don't terminate JVM if it is running in a non-interactive session,
2031     // such as a service process.
2032     USEROBJECTFLAGS flags;
2033     HANDLE handle = GetProcessWindowStation();
2034     if (handle != NULL &&
2035         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2036         sizeof(USEROBJECTFLAGS), NULL)) {
2037       // If it is a non-interactive session, let next handler to deal
2038       // with it.
2039       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2040         return FALSE;
2041       }
2042     }
2043   }
2044   case CTRL_CLOSE_EVENT:
2045   case CTRL_SHUTDOWN_EVENT:
2046     os::signal_raise(SIGTERM);
2047     return TRUE;
2048     break;
2049   default:
2050     break;
2051   }
2052   return FALSE;
2053 }
2054 
2055 // The following code is moved from os.cpp for making this
2056 // code platform specific, which it is by its very nature.
2057 
2058 // Return maximum OS signal used + 1 for internal use only
2059 // Used as exit signal for signal_thread
2060 int os::sigexitnum_pd() {
2061   return NSIG;
2062 }
2063 
2064 // a counter for each possible signal value, including signal_thread exit signal
2065 static volatile jint pending_signals[NSIG+1] = { 0 };
2066 static Semaphore* sig_sem = NULL;
2067 
2068 static void jdk_misc_signal_init() {
2069   // Initialize signal structures
2070   memset((void*)pending_signals, 0, sizeof(pending_signals));
2071 
2072   // Initialize signal semaphore
2073   sig_sem = new Semaphore();
2074 
2075   // Programs embedding the VM do not want it to attempt to receive
2076   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2077   // shutdown hooks mechanism introduced in 1.3.  For example, when
2078   // the VM is run as part of a Windows NT service (i.e., a servlet
2079   // engine in a web server), the correct behavior is for any console
2080   // control handler to return FALSE, not TRUE, because the OS's
2081   // "final" handler for such events allows the process to continue if
2082   // it is a service (while terminating it if it is not a service).
2083   // To make this behavior uniform and the mechanism simpler, we
2084   // completely disable the VM's usage of these console events if -Xrs
2085   // (=ReduceSignalUsage) is specified.  This means, for example, that
2086   // the CTRL-BREAK thread dump mechanism is also disabled in this
2087   // case.  See bugs 4323062, 4345157, and related bugs.
2088 
2089   // Add a CTRL-C handler
2090   SetConsoleCtrlHandler(consoleHandler, TRUE);
2091 }
2092 
2093 void os::signal_notify(int sig) {
2094   if (sig_sem != NULL) {
2095     Atomic::inc(&pending_signals[sig]);
2096     sig_sem->signal();
2097   } else {
2098     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2099     // initialization isn't called.
2100     assert(ReduceSignalUsage, "signal semaphore should be created");
2101   }
2102 }
2103 
2104 static int check_pending_signals() {
2105   while (true) {
2106     for (int i = 0; i < NSIG + 1; i++) {
2107       jint n = pending_signals[i];
2108       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2109         return i;
2110       }
2111     }
2112     JavaThread *thread = JavaThread::current();
2113 
2114     ThreadBlockInVM tbivm(thread);
2115 
2116     bool threadIsSuspended;
2117     do {
2118       thread->set_suspend_equivalent();
2119       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2120       sig_sem->wait();
2121 
2122       // were we externally suspended while we were waiting?
2123       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2124       if (threadIsSuspended) {
2125         // The semaphore has been incremented, but while we were waiting
2126         // another thread suspended us. We don't want to continue running
2127         // while suspended because that would surprise the thread that
2128         // suspended us.
2129         sig_sem->signal();
2130 
2131         thread->java_suspend_self();
2132       }
2133     } while (threadIsSuspended);
2134   }
2135 }
2136 
2137 int os::signal_wait() {
2138   return check_pending_signals();
2139 }
2140 
2141 // Implicit OS exception handling
2142 
2143 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2144                       address handler) {
2145   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2146   // Save pc in thread
2147 #ifdef _M_AMD64
2148   // Do not blow up if no thread info available.
2149   if (thread) {
2150     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2151   }
2152   // Set pc to handler
2153   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2154 #else
2155   // Do not blow up if no thread info available.
2156   if (thread) {
2157     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2158   }
2159   // Set pc to handler
2160   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2161 #endif
2162 
2163   // Continue the execution
2164   return EXCEPTION_CONTINUE_EXECUTION;
2165 }
2166 
2167 
2168 // Used for PostMortemDump
2169 extern "C" void safepoints();
2170 extern "C" void find(int x);
2171 extern "C" void events();
2172 
2173 // According to Windows API documentation, an illegal instruction sequence should generate
2174 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2175 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2176 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2177 
2178 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2179 
2180 // From "Execution Protection in the Windows Operating System" draft 0.35
2181 // Once a system header becomes available, the "real" define should be
2182 // included or copied here.
2183 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2184 
2185 // Windows Vista/2008 heap corruption check
2186 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2187 
2188 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2189 // C++ compiler contain this error code. Because this is a compiler-generated
2190 // error, the code is not listed in the Win32 API header files.
2191 // The code is actually a cryptic mnemonic device, with the initial "E"
2192 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2193 // ASCII values of "msc".
2194 
2195 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2196 
2197 #define def_excpt(val) { #val, (val) }
2198 
2199 static const struct { const char* name; uint number; } exceptlabels[] = {
2200     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2201     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2202     def_excpt(EXCEPTION_BREAKPOINT),
2203     def_excpt(EXCEPTION_SINGLE_STEP),
2204     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2205     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2206     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2207     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2208     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2209     def_excpt(EXCEPTION_FLT_OVERFLOW),
2210     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2211     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2212     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2213     def_excpt(EXCEPTION_INT_OVERFLOW),
2214     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2215     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2216     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2217     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2218     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2219     def_excpt(EXCEPTION_STACK_OVERFLOW),
2220     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2221     def_excpt(EXCEPTION_GUARD_PAGE),
2222     def_excpt(EXCEPTION_INVALID_HANDLE),
2223     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2224     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2225 };
2226 
2227 #undef def_excpt
2228 
2229 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2230   uint code = static_cast<uint>(exception_code);
2231   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2232     if (exceptlabels[i].number == code) {
2233       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2234       return buf;
2235     }
2236   }
2237 
2238   return NULL;
2239 }
2240 
2241 //-----------------------------------------------------------------------------
2242 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2243   // handle exception caused by idiv; should only happen for -MinInt/-1
2244   // (division by zero is handled explicitly)
2245 #ifdef  _M_AMD64
2246   PCONTEXT ctx = exceptionInfo->ContextRecord;
2247   address pc = (address)ctx->Rip;
2248   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2249   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2250   if (pc[0] == 0xF7) {
2251     // set correct result values and continue after idiv instruction
2252     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2253   } else {
2254     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2255   }
2256   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2257   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2258   // idiv opcode (0xF7).
2259   ctx->Rdx = (DWORD)0;             // remainder
2260   // Continue the execution
2261 #else
2262   PCONTEXT ctx = exceptionInfo->ContextRecord;
2263   address pc = (address)ctx->Eip;
2264   assert(pc[0] == 0xF7, "not an idiv opcode");
2265   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2266   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2267   // set correct result values and continue after idiv instruction
2268   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2269   ctx->Eax = (DWORD)min_jint;      // result
2270   ctx->Edx = (DWORD)0;             // remainder
2271   // Continue the execution
2272 #endif
2273   return EXCEPTION_CONTINUE_EXECUTION;
2274 }
2275 
2276 //-----------------------------------------------------------------------------
2277 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2278   PCONTEXT ctx = exceptionInfo->ContextRecord;
2279 #ifndef  _WIN64
2280   // handle exception caused by native method modifying control word
2281   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2282 
2283   switch (exception_code) {
2284   case EXCEPTION_FLT_DENORMAL_OPERAND:
2285   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2286   case EXCEPTION_FLT_INEXACT_RESULT:
2287   case EXCEPTION_FLT_INVALID_OPERATION:
2288   case EXCEPTION_FLT_OVERFLOW:
2289   case EXCEPTION_FLT_STACK_CHECK:
2290   case EXCEPTION_FLT_UNDERFLOW:
2291     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2292     if (fp_control_word != ctx->FloatSave.ControlWord) {
2293       // Restore FPCW and mask out FLT exceptions
2294       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2295       // Mask out pending FLT exceptions
2296       ctx->FloatSave.StatusWord &=  0xffffff00;
2297       return EXCEPTION_CONTINUE_EXECUTION;
2298     }
2299   }
2300 
2301   if (prev_uef_handler != NULL) {
2302     // We didn't handle this exception so pass it to the previous
2303     // UnhandledExceptionFilter.
2304     return (prev_uef_handler)(exceptionInfo);
2305   }
2306 #else // !_WIN64
2307   // On Windows, the mxcsr control bits are non-volatile across calls
2308   // See also CR 6192333
2309   //
2310   jint MxCsr = INITIAL_MXCSR;
2311   // we can't use StubRoutines::addr_mxcsr_std()
2312   // because in Win64 mxcsr is not saved there
2313   if (MxCsr != ctx->MxCsr) {
2314     ctx->MxCsr = MxCsr;
2315     return EXCEPTION_CONTINUE_EXECUTION;
2316   }
2317 #endif // !_WIN64
2318 
2319   return EXCEPTION_CONTINUE_SEARCH;
2320 }
2321 
2322 static inline void report_error(Thread* t, DWORD exception_code,
2323                                 address addr, void* siginfo, void* context) {
2324   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2325 
2326   // If UseOsErrorReporting, this will return here and save the error file
2327   // somewhere where we can find it in the minidump.
2328 }
2329 
2330 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2331         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2332   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2333   address addr = (address) exceptionRecord->ExceptionInformation[1];
2334   if (Interpreter::contains(pc)) {
2335     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2336     if (!fr->is_first_java_frame()) {
2337       // get_frame_at_stack_banging_point() is only called when we
2338       // have well defined stacks so java_sender() calls do not need
2339       // to assert safe_for_sender() first.
2340       *fr = fr->java_sender();
2341     }
2342   } else {
2343     // more complex code with compiled code
2344     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2345     CodeBlob* cb = CodeCache::find_blob(pc);
2346     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2347       // Not sure where the pc points to, fallback to default
2348       // stack overflow handling
2349       return false;
2350     } else {
2351       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2352       // in compiled code, the stack banging is performed just after the return pc
2353       // has been pushed on the stack
2354       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2355       if (!fr->is_java_frame()) {
2356         // See java_sender() comment above.
2357         *fr = fr->java_sender();
2358       }
2359     }
2360   }
2361   assert(fr->is_java_frame(), "Safety check");
2362   return true;
2363 }
2364 
2365 #if INCLUDE_AOT
2366 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2367   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2368   address addr = (address) exceptionRecord->ExceptionInformation[1];
2369   address pc = (address) exceptionInfo->ContextRecord->Rip;
2370 
2371   // Handle the case where we get an implicit exception in AOT generated
2372   // code.  AOT DLL's loaded are not registered for structured exceptions.
2373   // If the exception occurred in the codeCache or AOT code, pass control
2374   // to our normal exception handler.
2375   CodeBlob* cb = CodeCache::find_blob(pc);
2376   if (cb != NULL) {
2377     return topLevelExceptionFilter(exceptionInfo);
2378   }
2379 
2380   return EXCEPTION_CONTINUE_SEARCH;
2381 }
2382 #endif
2383 
2384 //-----------------------------------------------------------------------------
2385 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2386   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2387   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2388 #ifdef _M_AMD64
2389   address pc = (address) exceptionInfo->ContextRecord->Rip;
2390 #else
2391   address pc = (address) exceptionInfo->ContextRecord->Eip;
2392 #endif
2393   Thread* t = Thread::current_or_null_safe();
2394 
2395   // Handle SafeFetch32 and SafeFetchN exceptions.
2396   if (StubRoutines::is_safefetch_fault(pc)) {
2397     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2398   }
2399 
2400 #ifndef _WIN64
2401   // Execution protection violation - win32 running on AMD64 only
2402   // Handled first to avoid misdiagnosis as a "normal" access violation;
2403   // This is safe to do because we have a new/unique ExceptionInformation
2404   // code for this condition.
2405   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2406     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2407     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2408     address addr = (address) exceptionRecord->ExceptionInformation[1];
2409 
2410     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2411       int page_size = os::vm_page_size();
2412 
2413       // Make sure the pc and the faulting address are sane.
2414       //
2415       // If an instruction spans a page boundary, and the page containing
2416       // the beginning of the instruction is executable but the following
2417       // page is not, the pc and the faulting address might be slightly
2418       // different - we still want to unguard the 2nd page in this case.
2419       //
2420       // 15 bytes seems to be a (very) safe value for max instruction size.
2421       bool pc_is_near_addr =
2422         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2423       bool instr_spans_page_boundary =
2424         (align_down((intptr_t) pc ^ (intptr_t) addr,
2425                          (intptr_t) page_size) > 0);
2426 
2427       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2428         static volatile address last_addr =
2429           (address) os::non_memory_address_word();
2430 
2431         // In conservative mode, don't unguard unless the address is in the VM
2432         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2433             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2434 
2435           // Set memory to RWX and retry
2436           address page_start = align_down(addr, page_size);
2437           bool res = os::protect_memory((char*) page_start, page_size,
2438                                         os::MEM_PROT_RWX);
2439 
2440           log_debug(os)("Execution protection violation "
2441                         "at " INTPTR_FORMAT
2442                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2443                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2444 
2445           // Set last_addr so if we fault again at the same address, we don't
2446           // end up in an endless loop.
2447           //
2448           // There are two potential complications here.  Two threads trapping
2449           // at the same address at the same time could cause one of the
2450           // threads to think it already unguarded, and abort the VM.  Likely
2451           // very rare.
2452           //
2453           // The other race involves two threads alternately trapping at
2454           // different addresses and failing to unguard the page, resulting in
2455           // an endless loop.  This condition is probably even more unlikely
2456           // than the first.
2457           //
2458           // Although both cases could be avoided by using locks or thread
2459           // local last_addr, these solutions are unnecessary complication:
2460           // this handler is a best-effort safety net, not a complete solution.
2461           // It is disabled by default and should only be used as a workaround
2462           // in case we missed any no-execute-unsafe VM code.
2463 
2464           last_addr = addr;
2465 
2466           return EXCEPTION_CONTINUE_EXECUTION;
2467         }
2468       }
2469 
2470       // Last unguard failed or not unguarding
2471       tty->print_raw_cr("Execution protection violation");
2472       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2473                    exceptionInfo->ContextRecord);
2474       return EXCEPTION_CONTINUE_SEARCH;
2475     }
2476   }
2477 #endif // _WIN64
2478 
2479   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2480       VM_Version::is_cpuinfo_segv_addr(pc)) {
2481     // Verify that OS save/restore AVX registers.
2482     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2483   }
2484 
2485   if (t != NULL && t->is_Java_thread()) {
2486     JavaThread* thread = (JavaThread*) t;
2487     bool in_java = thread->thread_state() == _thread_in_Java;
2488 
2489     // Handle potential stack overflows up front.
2490     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2491       if (thread->stack_guards_enabled()) {
2492         if (in_java) {
2493           frame fr;
2494           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2495           address addr = (address) exceptionRecord->ExceptionInformation[1];
2496           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2497             assert(fr.is_java_frame(), "Must be a Java frame");
2498             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2499           }
2500         }
2501         // Yellow zone violation.  The o/s has unprotected the first yellow
2502         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2503         // update the enabled status, even if the zone contains only one page.
2504         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2505         thread->disable_stack_yellow_reserved_zone();
2506         // If not in java code, return and hope for the best.
2507         return in_java
2508             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2509             :  EXCEPTION_CONTINUE_EXECUTION;
2510       } else {
2511         // Fatal red zone violation.
2512         thread->disable_stack_red_zone();
2513         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2514         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2515                       exceptionInfo->ContextRecord);
2516         return EXCEPTION_CONTINUE_SEARCH;
2517       }
2518     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2519       // Either stack overflow or null pointer exception.
2520       if (in_java) {
2521         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2522         address addr = (address) exceptionRecord->ExceptionInformation[1];
2523         address stack_end = thread->stack_end();
2524         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2525           // Stack overflow.
2526           assert(!os::uses_stack_guard_pages(),
2527                  "should be caught by red zone code above.");
2528           return Handle_Exception(exceptionInfo,
2529                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2530         }
2531         // Check for safepoint polling and implicit null
2532         // We only expect null pointers in the stubs (vtable)
2533         // the rest are checked explicitly now.
2534         CodeBlob* cb = CodeCache::find_blob(pc);
2535         if (cb != NULL) {
2536           if (os::is_poll_address(addr)) {
2537             address stub = SharedRuntime::get_poll_stub(pc);
2538             return Handle_Exception(exceptionInfo, stub);
2539           }
2540         }
2541         {
2542 #ifdef _WIN64
2543           // If it's a legal stack address map the entire region in
2544           //
2545           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2546           address addr = (address) exceptionRecord->ExceptionInformation[1];
2547           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2548             addr = (address)((uintptr_t)addr &
2549                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2550             os::commit_memory((char *)addr, thread->stack_base() - addr,
2551                               !ExecMem);
2552             return EXCEPTION_CONTINUE_EXECUTION;
2553           } else
2554 #endif
2555           {
2556             // Null pointer exception.
2557             if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2558               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2559               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2560             }
2561             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2562                          exceptionInfo->ContextRecord);
2563             return EXCEPTION_CONTINUE_SEARCH;
2564           }
2565         }
2566       }
2567 
2568 #ifdef _WIN64
2569       // Special care for fast JNI field accessors.
2570       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2571       // in and the heap gets shrunk before the field access.
2572       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2573         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2574         if (addr != (address)-1) {
2575           return Handle_Exception(exceptionInfo, addr);
2576         }
2577       }
2578 #endif
2579 
2580       // Stack overflow or null pointer exception in native code.
2581       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2582                    exceptionInfo->ContextRecord);
2583       return EXCEPTION_CONTINUE_SEARCH;
2584     } // /EXCEPTION_ACCESS_VIOLATION
2585     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2586 
2587     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2588       CompiledMethod* nm = NULL;
2589       JavaThread* thread = (JavaThread*)t;
2590       if (in_java) {
2591         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2592         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2593       }
2594 
2595       bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2596       if (((thread->thread_state() == _thread_in_vm ||
2597            thread->thread_state() == _thread_in_native ||
2598            is_unsafe_arraycopy) &&
2599           thread->doing_unsafe_access()) ||
2600           (nm != NULL && nm->has_unsafe_access())) {
2601         address next_pc =  Assembler::locate_next_instruction(pc);
2602         if (is_unsafe_arraycopy) {
2603           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2604         }
2605         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2606       }
2607     }
2608 
2609     if (in_java) {
2610       switch (exception_code) {
2611       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2612         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2613 
2614       case EXCEPTION_INT_OVERFLOW:
2615         return Handle_IDiv_Exception(exceptionInfo);
2616 
2617       } // switch
2618     }
2619     if (((thread->thread_state() == _thread_in_Java) ||
2620          (thread->thread_state() == _thread_in_native)) &&
2621          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2622       LONG result=Handle_FLT_Exception(exceptionInfo);
2623       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2624     }
2625   }
2626 
2627   if (exception_code != EXCEPTION_BREAKPOINT) {
2628     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2629                  exceptionInfo->ContextRecord);
2630   }
2631   return EXCEPTION_CONTINUE_SEARCH;
2632 }
2633 
2634 #ifndef _WIN64
2635 // Special care for fast JNI accessors.
2636 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2637 // the heap gets shrunk before the field access.
2638 // Need to install our own structured exception handler since native code may
2639 // install its own.
2640 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2641   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2642   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2643     address pc = (address) exceptionInfo->ContextRecord->Eip;
2644     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2645     if (addr != (address)-1) {
2646       return Handle_Exception(exceptionInfo, addr);
2647     }
2648   }
2649   return EXCEPTION_CONTINUE_SEARCH;
2650 }
2651 
2652 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2653   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2654                                                      jobject obj,           \
2655                                                      jfieldID fieldID) {    \
2656     __try {                                                                 \
2657       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2658                                                                  obj,       \
2659                                                                  fieldID);  \
2660     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2661                                               _exception_info())) {         \
2662     }                                                                       \
2663     return 0;                                                               \
2664   }
2665 
2666 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2667 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2668 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2669 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2670 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2671 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2672 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2673 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2674 
2675 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2676   switch (type) {
2677   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2678   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2679   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2680   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2681   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2682   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2683   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2684   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2685   default:        ShouldNotReachHere();
2686   }
2687   return (address)-1;
2688 }
2689 #endif
2690 
2691 // Virtual Memory
2692 
2693 int os::vm_page_size() { return os::win32::vm_page_size(); }
2694 int os::vm_allocation_granularity() {
2695   return os::win32::vm_allocation_granularity();
2696 }
2697 
2698 // Windows large page support is available on Windows 2003. In order to use
2699 // large page memory, the administrator must first assign additional privilege
2700 // to the user:
2701 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2702 //   + select Local Policies -> User Rights Assignment
2703 //   + double click "Lock pages in memory", add users and/or groups
2704 //   + reboot
2705 // Note the above steps are needed for administrator as well, as administrators
2706 // by default do not have the privilege to lock pages in memory.
2707 //
2708 // Note about Windows 2003: although the API supports committing large page
2709 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2710 // scenario, I found through experiment it only uses large page if the entire
2711 // memory region is reserved and committed in a single VirtualAlloc() call.
2712 // This makes Windows large page support more or less like Solaris ISM, in
2713 // that the entire heap must be committed upfront. This probably will change
2714 // in the future, if so the code below needs to be revisited.
2715 
2716 #ifndef MEM_LARGE_PAGES
2717   #define MEM_LARGE_PAGES 0x20000000
2718 #endif
2719 
2720 static HANDLE    _hProcess;
2721 static HANDLE    _hToken;
2722 
2723 // Container for NUMA node list info
2724 class NUMANodeListHolder {
2725  private:
2726   int *_numa_used_node_list;  // allocated below
2727   int _numa_used_node_count;
2728 
2729   void free_node_list() {
2730     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2731   }
2732 
2733  public:
2734   NUMANodeListHolder() {
2735     _numa_used_node_count = 0;
2736     _numa_used_node_list = NULL;
2737     // do rest of initialization in build routine (after function pointers are set up)
2738   }
2739 
2740   ~NUMANodeListHolder() {
2741     free_node_list();
2742   }
2743 
2744   bool build() {
2745     DWORD_PTR proc_aff_mask;
2746     DWORD_PTR sys_aff_mask;
2747     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2748     ULONG highest_node_number;
2749     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2750     free_node_list();
2751     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2752     for (unsigned int i = 0; i <= highest_node_number; i++) {
2753       ULONGLONG proc_mask_numa_node;
2754       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2755       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2756         _numa_used_node_list[_numa_used_node_count++] = i;
2757       }
2758     }
2759     return (_numa_used_node_count > 1);
2760   }
2761 
2762   int get_count() { return _numa_used_node_count; }
2763   int get_node_list_entry(int n) {
2764     // for indexes out of range, returns -1
2765     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2766   }
2767 
2768 } numa_node_list_holder;
2769 
2770 
2771 
2772 static size_t _large_page_size = 0;
2773 
2774 static bool request_lock_memory_privilege() {
2775   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2776                           os::current_process_id());
2777 
2778   LUID luid;
2779   if (_hProcess != NULL &&
2780       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2781       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2782 
2783     TOKEN_PRIVILEGES tp;
2784     tp.PrivilegeCount = 1;
2785     tp.Privileges[0].Luid = luid;
2786     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2787 
2788     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2789     // privilege. Check GetLastError() too. See MSDN document.
2790     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2791         (GetLastError() == ERROR_SUCCESS)) {
2792       return true;
2793     }
2794   }
2795 
2796   return false;
2797 }
2798 
2799 static void cleanup_after_large_page_init() {
2800   if (_hProcess) CloseHandle(_hProcess);
2801   _hProcess = NULL;
2802   if (_hToken) CloseHandle(_hToken);
2803   _hToken = NULL;
2804 }
2805 
2806 static bool numa_interleaving_init() {
2807   bool success = false;
2808   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2809 
2810   // print a warning if UseNUMAInterleaving flag is specified on command line
2811   bool warn_on_failure = use_numa_interleaving_specified;
2812 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2813 
2814   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2815   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2816   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2817 
2818   if (numa_node_list_holder.build()) {
2819     if (log_is_enabled(Debug, os, cpu)) {
2820       Log(os, cpu) log;
2821       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2822       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2823         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2824       }
2825     }
2826     success = true;
2827   } else {
2828     WARN("Process does not cover multiple NUMA nodes.");
2829   }
2830   if (!success) {
2831     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2832   }
2833   return success;
2834 #undef WARN
2835 }
2836 
2837 // this routine is used whenever we need to reserve a contiguous VA range
2838 // but we need to make separate VirtualAlloc calls for each piece of the range
2839 // Reasons for doing this:
2840 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2841 //  * UseNUMAInterleaving requires a separate node for each piece
2842 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2843                                          DWORD prot,
2844                                          bool should_inject_error = false) {
2845   char * p_buf;
2846   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2847   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2848   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2849 
2850   // first reserve enough address space in advance since we want to be
2851   // able to break a single contiguous virtual address range into multiple
2852   // large page commits but WS2003 does not allow reserving large page space
2853   // so we just use 4K pages for reserve, this gives us a legal contiguous
2854   // address space. then we will deallocate that reservation, and re alloc
2855   // using large pages
2856   const size_t size_of_reserve = bytes + chunk_size;
2857   if (bytes > size_of_reserve) {
2858     // Overflowed.
2859     return NULL;
2860   }
2861   p_buf = (char *) VirtualAlloc(addr,
2862                                 size_of_reserve,  // size of Reserve
2863                                 MEM_RESERVE,
2864                                 PAGE_READWRITE);
2865   // If reservation failed, return NULL
2866   if (p_buf == NULL) return NULL;
2867   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2868   os::release_memory(p_buf, bytes + chunk_size);
2869 
2870   // we still need to round up to a page boundary (in case we are using large pages)
2871   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2872   // instead we handle this in the bytes_to_rq computation below
2873   p_buf = align_up(p_buf, page_size);
2874 
2875   // now go through and allocate one chunk at a time until all bytes are
2876   // allocated
2877   size_t  bytes_remaining = bytes;
2878   // An overflow of align_up() would have been caught above
2879   // in the calculation of size_of_reserve.
2880   char * next_alloc_addr = p_buf;
2881   HANDLE hProc = GetCurrentProcess();
2882 
2883 #ifdef ASSERT
2884   // Variable for the failure injection
2885   int ran_num = os::random();
2886   size_t fail_after = ran_num % bytes;
2887 #endif
2888 
2889   int count=0;
2890   while (bytes_remaining) {
2891     // select bytes_to_rq to get to the next chunk_size boundary
2892 
2893     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2894     // Note allocate and commit
2895     char * p_new;
2896 
2897 #ifdef ASSERT
2898     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2899 #else
2900     const bool inject_error_now = false;
2901 #endif
2902 
2903     if (inject_error_now) {
2904       p_new = NULL;
2905     } else {
2906       if (!UseNUMAInterleaving) {
2907         p_new = (char *) VirtualAlloc(next_alloc_addr,
2908                                       bytes_to_rq,
2909                                       flags,
2910                                       prot);
2911       } else {
2912         // get the next node to use from the used_node_list
2913         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2914         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2915         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2916       }
2917     }
2918 
2919     if (p_new == NULL) {
2920       // Free any allocated pages
2921       if (next_alloc_addr > p_buf) {
2922         // Some memory was committed so release it.
2923         size_t bytes_to_release = bytes - bytes_remaining;
2924         // NMT has yet to record any individual blocks, so it
2925         // need to create a dummy 'reserve' record to match
2926         // the release.
2927         MemTracker::record_virtual_memory_reserve((address)p_buf,
2928                                                   bytes_to_release, CALLER_PC);
2929         os::release_memory(p_buf, bytes_to_release);
2930       }
2931 #ifdef ASSERT
2932       if (should_inject_error) {
2933         log_develop_debug(pagesize)("Reserving pages individually failed.");
2934       }
2935 #endif
2936       return NULL;
2937     }
2938 
2939     bytes_remaining -= bytes_to_rq;
2940     next_alloc_addr += bytes_to_rq;
2941     count++;
2942   }
2943   // Although the memory is allocated individually, it is returned as one.
2944   // NMT records it as one block.
2945   if ((flags & MEM_COMMIT) != 0) {
2946     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
2947   } else {
2948     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
2949   }
2950 
2951   // made it this far, success
2952   return p_buf;
2953 }
2954 
2955 
2956 
2957 void os::large_page_init() {
2958   if (!UseLargePages) return;
2959 
2960   // print a warning if any large page related flag is specified on command line
2961   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2962                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2963   bool success = false;
2964 
2965 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2966   if (request_lock_memory_privilege()) {
2967     size_t s = GetLargePageMinimum();
2968     if (s) {
2969 #if defined(IA32) || defined(AMD64)
2970       if (s > 4*M || LargePageSizeInBytes > 4*M) {
2971         WARN("JVM cannot use large pages bigger than 4mb.");
2972       } else {
2973 #endif
2974         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
2975           _large_page_size = LargePageSizeInBytes;
2976         } else {
2977           _large_page_size = s;
2978         }
2979         success = true;
2980 #if defined(IA32) || defined(AMD64)
2981       }
2982 #endif
2983     } else {
2984       WARN("Large page is not supported by the processor.");
2985     }
2986   } else {
2987     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
2988   }
2989 #undef WARN
2990 
2991   const size_t default_page_size = (size_t) vm_page_size();
2992   if (success && _large_page_size > default_page_size) {
2993     _page_sizes[0] = _large_page_size;
2994     _page_sizes[1] = default_page_size;
2995     _page_sizes[2] = 0;
2996   }
2997 
2998   cleanup_after_large_page_init();
2999   UseLargePages = success;
3000 }
3001 
3002 int os::create_file_for_heap(const char* dir) {
3003 
3004   const char name_template[] = "/jvmheap.XXXXXX";
3005 
3006   size_t fullname_len = strlen(dir) + strlen(name_template);
3007   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3008   if (fullname == NULL) {
3009     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3010     return -1;
3011   }
3012   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3013   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3014 
3015   os::native_path(fullname);
3016 
3017   char *path = _mktemp(fullname);
3018   if (path == NULL) {
3019     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3020     os::free(fullname);
3021     return -1;
3022   }
3023 
3024   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3025 
3026   os::free(fullname);
3027   if (fd < 0) {
3028     warning("Problem opening file for heap (%s)", os::strerror(errno));
3029     return -1;
3030   }
3031   return fd;
3032 }
3033 
3034 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3035 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3036   assert(fd != -1, "File descriptor is not valid");
3037 
3038   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3039 #ifdef _LP64
3040   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3041     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3042 #else
3043   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3044     0, (DWORD)size, NULL);
3045 #endif
3046   if (fileMapping == NULL) {
3047     if (GetLastError() == ERROR_DISK_FULL) {
3048       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3049     }
3050     else {
3051       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3052     }
3053 
3054     return NULL;
3055   }
3056 
3057   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3058 
3059   CloseHandle(fileMapping);
3060 
3061   return (char*)addr;
3062 }
3063 
3064 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3065   assert(fd != -1, "File descriptor is not valid");
3066   assert(base != NULL, "Base address cannot be NULL");
3067 
3068   release_memory(base, size);
3069   return map_memory_to_file(base, size, fd);
3070 }
3071 
3072 // On win32, one cannot release just a part of reserved memory, it's an
3073 // all or nothing deal.  When we split a reservation, we must break the
3074 // reservation into two reservations.
3075 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3076                                   bool realloc) {
3077   if (size > 0) {
3078     release_memory(base, size);
3079     if (realloc) {
3080       reserve_memory(split, base);
3081     }
3082     if (size != split) {
3083       reserve_memory(size - split, base + split);
3084     }
3085   }
3086 }
3087 
3088 // Multiple threads can race in this code but it's not possible to unmap small sections of
3089 // virtual space to get requested alignment, like posix-like os's.
3090 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3091 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3092   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3093          "Alignment must be a multiple of allocation granularity (page size)");
3094   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3095 
3096   size_t extra_size = size + alignment;
3097   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3098 
3099   char* aligned_base = NULL;
3100 
3101   do {
3102     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3103     if (extra_base == NULL) {
3104       return NULL;
3105     }
3106     // Do manual alignment
3107     aligned_base = align_up(extra_base, alignment);
3108 
3109     if (file_desc != -1) {
3110       os::unmap_memory(extra_base, extra_size);
3111     } else {
3112       os::release_memory(extra_base, extra_size);
3113     }
3114 
3115     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3116 
3117   } while (aligned_base == NULL);
3118 
3119   return aligned_base;
3120 }
3121 
3122 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3123   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3124          "reserve alignment");
3125   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3126   char* res;
3127   // note that if UseLargePages is on, all the areas that require interleaving
3128   // will go thru reserve_memory_special rather than thru here.
3129   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3130   if (!use_individual) {
3131     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3132   } else {
3133     elapsedTimer reserveTimer;
3134     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3135     // in numa interleaving, we have to allocate pages individually
3136     // (well really chunks of NUMAInterleaveGranularity size)
3137     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3138     if (res == NULL) {
3139       warning("NUMA page allocation failed");
3140     }
3141     if (Verbose && PrintMiscellaneous) {
3142       reserveTimer.stop();
3143       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3144                     reserveTimer.milliseconds(), reserveTimer.ticks());
3145     }
3146   }
3147   assert(res == NULL || addr == NULL || addr == res,
3148          "Unexpected address from reserve.");
3149 
3150   return res;
3151 }
3152 
3153 // Reserve memory at an arbitrary address, only if that area is
3154 // available (and not reserved for something else).
3155 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3156   // Windows os::reserve_memory() fails of the requested address range is
3157   // not avilable.
3158   return reserve_memory(bytes, requested_addr);
3159 }
3160 
3161 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3162   assert(file_desc >= 0, "file_desc is not valid");
3163   return map_memory_to_file(requested_addr, bytes, file_desc);
3164 }
3165 
3166 size_t os::large_page_size() {
3167   return _large_page_size;
3168 }
3169 
3170 bool os::can_commit_large_page_memory() {
3171   // Windows only uses large page memory when the entire region is reserved
3172   // and committed in a single VirtualAlloc() call. This may change in the
3173   // future, but with Windows 2003 it's not possible to commit on demand.
3174   return false;
3175 }
3176 
3177 bool os::can_execute_large_page_memory() {
3178   return true;
3179 }
3180 
3181 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3182                                  bool exec) {
3183   assert(UseLargePages, "only for large pages");
3184 
3185   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3186     return NULL; // Fallback to small pages.
3187   }
3188 
3189   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3190   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3191 
3192   // with large pages, there are two cases where we need to use Individual Allocation
3193   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3194   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3195   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3196     log_debug(pagesize)("Reserving large pages individually.");
3197 
3198     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3199     if (p_buf == NULL) {
3200       // give an appropriate warning message
3201       if (UseNUMAInterleaving) {
3202         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3203       }
3204       if (UseLargePagesIndividualAllocation) {
3205         warning("Individually allocated large pages failed, "
3206                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3207       }
3208       return NULL;
3209     }
3210 
3211     return p_buf;
3212 
3213   } else {
3214     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3215 
3216     // normal policy just allocate it all at once
3217     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3218     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3219     if (res != NULL) {
3220       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3221     }
3222 
3223     return res;
3224   }
3225 }
3226 
3227 bool os::release_memory_special(char* base, size_t bytes) {
3228   assert(base != NULL, "Sanity check");
3229   return release_memory(base, bytes);
3230 }
3231 
3232 void os::print_statistics() {
3233 }
3234 
3235 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3236   int err = os::get_last_error();
3237   char buf[256];
3238   size_t buf_len = os::lasterror(buf, sizeof(buf));
3239   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3240           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3241           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3242 }
3243 
3244 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3245   if (bytes == 0) {
3246     // Don't bother the OS with noops.
3247     return true;
3248   }
3249   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3250   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3251   // Don't attempt to print anything if the OS call fails. We're
3252   // probably low on resources, so the print itself may cause crashes.
3253 
3254   // unless we have NUMAInterleaving enabled, the range of a commit
3255   // is always within a reserve covered by a single VirtualAlloc
3256   // in that case we can just do a single commit for the requested size
3257   if (!UseNUMAInterleaving) {
3258     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3259       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3260       return false;
3261     }
3262     if (exec) {
3263       DWORD oldprot;
3264       // Windows doc says to use VirtualProtect to get execute permissions
3265       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3266         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3267         return false;
3268       }
3269     }
3270     return true;
3271   } else {
3272 
3273     // when NUMAInterleaving is enabled, the commit might cover a range that
3274     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3275     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3276     // returns represents the number of bytes that can be committed in one step.
3277     size_t bytes_remaining = bytes;
3278     char * next_alloc_addr = addr;
3279     while (bytes_remaining > 0) {
3280       MEMORY_BASIC_INFORMATION alloc_info;
3281       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3282       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3283       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3284                        PAGE_READWRITE) == NULL) {
3285         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3286                                             exec);)
3287         return false;
3288       }
3289       if (exec) {
3290         DWORD oldprot;
3291         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3292                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3293           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3294                                               exec);)
3295           return false;
3296         }
3297       }
3298       bytes_remaining -= bytes_to_rq;
3299       next_alloc_addr += bytes_to_rq;
3300     }
3301   }
3302   // if we made it this far, return true
3303   return true;
3304 }
3305 
3306 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3307                           bool exec) {
3308   // alignment_hint is ignored on this OS
3309   return pd_commit_memory(addr, size, exec);
3310 }
3311 
3312 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3313                                   const char* mesg) {
3314   assert(mesg != NULL, "mesg must be specified");
3315   if (!pd_commit_memory(addr, size, exec)) {
3316     warn_fail_commit_memory(addr, size, exec);
3317     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3318   }
3319 }
3320 
3321 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3322                                   size_t alignment_hint, bool exec,
3323                                   const char* mesg) {
3324   // alignment_hint is ignored on this OS
3325   pd_commit_memory_or_exit(addr, size, exec, mesg);
3326 }
3327 
3328 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3329   if (bytes == 0) {
3330     // Don't bother the OS with noops.
3331     return true;
3332   }
3333   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3334   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3335   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3336 }
3337 
3338 bool os::pd_release_memory(char* addr, size_t bytes) {
3339   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3340 }
3341 
3342 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3343   return os::commit_memory(addr, size, !ExecMem);
3344 }
3345 
3346 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3347   return os::uncommit_memory(addr, size);
3348 }
3349 
3350 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3351   uint count = 0;
3352   bool ret = false;
3353   size_t bytes_remaining = bytes;
3354   char * next_protect_addr = addr;
3355 
3356   // Use VirtualQuery() to get the chunk size.
3357   while (bytes_remaining) {
3358     MEMORY_BASIC_INFORMATION alloc_info;
3359     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3360       return false;
3361     }
3362 
3363     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3364     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3365     // but we don't distinguish here as both cases are protected by same API.
3366     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3367     warning("Failed protecting pages individually for chunk #%u", count);
3368     if (!ret) {
3369       return false;
3370     }
3371 
3372     bytes_remaining -= bytes_to_protect;
3373     next_protect_addr += bytes_to_protect;
3374     count++;
3375   }
3376   return ret;
3377 }
3378 
3379 // Set protections specified
3380 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3381                         bool is_committed) {
3382   unsigned int p = 0;
3383   switch (prot) {
3384   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3385   case MEM_PROT_READ: p = PAGE_READONLY; break;
3386   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3387   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3388   default:
3389     ShouldNotReachHere();
3390   }
3391 
3392   DWORD old_status;
3393 
3394   // Strange enough, but on Win32 one can change protection only for committed
3395   // memory, not a big deal anyway, as bytes less or equal than 64K
3396   if (!is_committed) {
3397     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3398                           "cannot commit protection page");
3399   }
3400   // One cannot use os::guard_memory() here, as on Win32 guard page
3401   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3402   //
3403   // Pages in the region become guard pages. Any attempt to access a guard page
3404   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3405   // the guard page status. Guard pages thus act as a one-time access alarm.
3406   bool ret;
3407   if (UseNUMAInterleaving) {
3408     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3409     // so we must protect the chunks individually.
3410     ret = protect_pages_individually(addr, bytes, p, &old_status);
3411   } else {
3412     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3413   }
3414 #ifdef ASSERT
3415   if (!ret) {
3416     int err = os::get_last_error();
3417     char buf[256];
3418     size_t buf_len = os::lasterror(buf, sizeof(buf));
3419     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3420           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3421           buf_len != 0 ? buf : "<no_error_string>", err);
3422   }
3423 #endif
3424   return ret;
3425 }
3426 
3427 bool os::guard_memory(char* addr, size_t bytes) {
3428   DWORD old_status;
3429   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3430 }
3431 
3432 bool os::unguard_memory(char* addr, size_t bytes) {
3433   DWORD old_status;
3434   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3435 }
3436 
3437 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3438 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3439 void os::numa_make_global(char *addr, size_t bytes)    { }
3440 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3441 bool os::numa_topology_changed()                       { return false; }
3442 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3443 int os::numa_get_group_id()                            { return 0; }
3444 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3445   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3446     // Provide an answer for UMA systems
3447     ids[0] = 0;
3448     return 1;
3449   } else {
3450     // check for size bigger than actual groups_num
3451     size = MIN2(size, numa_get_groups_num());
3452     for (int i = 0; i < (int)size; i++) {
3453       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3454     }
3455     return size;
3456   }
3457 }
3458 
3459 bool os::get_page_info(char *start, page_info* info) {
3460   return false;
3461 }
3462 
3463 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3464                      page_info* page_found) {
3465   return end;
3466 }
3467 
3468 char* os::non_memory_address_word() {
3469   // Must never look like an address returned by reserve_memory,
3470   // even in its subfields (as defined by the CPU immediate fields,
3471   // if the CPU splits constants across multiple instructions).
3472   return (char*)-1;
3473 }
3474 
3475 #define MAX_ERROR_COUNT 100
3476 #define SYS_THREAD_ERROR 0xffffffffUL
3477 
3478 void os::pd_start_thread(Thread* thread) {
3479   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3480   // Returns previous suspend state:
3481   // 0:  Thread was not suspended
3482   // 1:  Thread is running now
3483   // >1: Thread is still suspended.
3484   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3485 }
3486 
3487 
3488 // Short sleep, direct OS call.
3489 //
3490 // ms = 0, means allow others (if any) to run.
3491 //
3492 void os::naked_short_sleep(jlong ms) {
3493   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3494   Sleep(ms);
3495 }
3496 
3497 // Windows does not provide sleep functionality with nanosecond resolution, so we
3498 // try to approximate this with spinning combined with yielding if another thread
3499 // is ready to run on the current processor.
3500 void os::naked_short_nanosleep(jlong ns) {
3501   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3502 
3503   int64_t start = os::javaTimeNanos();
3504   do {
3505     if (SwitchToThread() == 0) {
3506       // Nothing else is ready to run on this cpu, spin a little
3507       SpinPause();
3508     }
3509   } while (os::javaTimeNanos() - start < ns);
3510 }
3511 
3512 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3513 void os::infinite_sleep() {
3514   while (true) {    // sleep forever ...
3515     Sleep(100000);  // ... 100 seconds at a time
3516   }
3517 }
3518 
3519 typedef BOOL (WINAPI * STTSignature)(void);
3520 
3521 void os::naked_yield() {
3522   // Consider passing back the return value from SwitchToThread().
3523   SwitchToThread();
3524 }
3525 
3526 // Win32 only gives you access to seven real priorities at a time,
3527 // so we compress Java's ten down to seven.  It would be better
3528 // if we dynamically adjusted relative priorities.
3529 
3530 int os::java_to_os_priority[CriticalPriority + 1] = {
3531   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3532   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3533   THREAD_PRIORITY_LOWEST,                       // 2
3534   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3535   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3536   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3537   THREAD_PRIORITY_NORMAL,                       // 6
3538   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3539   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3540   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3541   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3542   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3543 };
3544 
3545 int prio_policy1[CriticalPriority + 1] = {
3546   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3547   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3548   THREAD_PRIORITY_LOWEST,                       // 2
3549   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3550   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3551   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3552   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3553   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3554   THREAD_PRIORITY_HIGHEST,                      // 8
3555   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3556   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3557   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3558 };
3559 
3560 static int prio_init() {
3561   // If ThreadPriorityPolicy is 1, switch tables
3562   if (ThreadPriorityPolicy == 1) {
3563     int i;
3564     for (i = 0; i < CriticalPriority + 1; i++) {
3565       os::java_to_os_priority[i] = prio_policy1[i];
3566     }
3567   }
3568   if (UseCriticalJavaThreadPriority) {
3569     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3570   }
3571   return 0;
3572 }
3573 
3574 OSReturn os::set_native_priority(Thread* thread, int priority) {
3575   if (!UseThreadPriorities) return OS_OK;
3576   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3577   return ret ? OS_OK : OS_ERR;
3578 }
3579 
3580 OSReturn os::get_native_priority(const Thread* const thread,
3581                                  int* priority_ptr) {
3582   if (!UseThreadPriorities) {
3583     *priority_ptr = java_to_os_priority[NormPriority];
3584     return OS_OK;
3585   }
3586   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3587   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3588     assert(false, "GetThreadPriority failed");
3589     return OS_ERR;
3590   }
3591   *priority_ptr = os_prio;
3592   return OS_OK;
3593 }
3594 
3595 // GetCurrentThreadId() returns DWORD
3596 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3597 
3598 static int _initial_pid = 0;
3599 
3600 int os::current_process_id() {
3601   return (_initial_pid ? _initial_pid : _getpid());
3602 }
3603 
3604 int    os::win32::_vm_page_size              = 0;
3605 int    os::win32::_vm_allocation_granularity = 0;
3606 int    os::win32::_processor_type            = 0;
3607 // Processor level is not available on non-NT systems, use vm_version instead
3608 int    os::win32::_processor_level           = 0;
3609 julong os::win32::_physical_memory           = 0;
3610 size_t os::win32::_default_stack_size        = 0;
3611 
3612 intx          os::win32::_os_thread_limit    = 0;
3613 volatile intx os::win32::_os_thread_count    = 0;
3614 
3615 bool   os::win32::_is_windows_server         = false;
3616 
3617 // 6573254
3618 // Currently, the bug is observed across all the supported Windows releases,
3619 // including the latest one (as of this writing - Windows Server 2012 R2)
3620 bool   os::win32::_has_exit_bug              = true;
3621 
3622 void os::win32::initialize_system_info() {
3623   SYSTEM_INFO si;
3624   GetSystemInfo(&si);
3625   _vm_page_size    = si.dwPageSize;
3626   _vm_allocation_granularity = si.dwAllocationGranularity;
3627   _processor_type  = si.dwProcessorType;
3628   _processor_level = si.wProcessorLevel;
3629   set_processor_count(si.dwNumberOfProcessors);
3630 
3631   MEMORYSTATUSEX ms;
3632   ms.dwLength = sizeof(ms);
3633 
3634   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3635   // dwMemoryLoad (% of memory in use)
3636   GlobalMemoryStatusEx(&ms);
3637   _physical_memory = ms.ullTotalPhys;
3638 
3639   if (FLAG_IS_DEFAULT(MaxRAM)) {
3640     // Adjust MaxRAM according to the maximum virtual address space available.
3641     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3642   }
3643 
3644   OSVERSIONINFOEX oi;
3645   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3646   GetVersionEx((OSVERSIONINFO*)&oi);
3647   switch (oi.dwPlatformId) {
3648   case VER_PLATFORM_WIN32_NT:
3649     {
3650       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3651       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3652           oi.wProductType == VER_NT_SERVER) {
3653         _is_windows_server = true;
3654       }
3655     }
3656     break;
3657   default: fatal("Unknown platform");
3658   }
3659 
3660   _default_stack_size = os::current_stack_size();
3661   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3662   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3663          "stack size not a multiple of page size");
3664 
3665   initialize_performance_counter();
3666 }
3667 
3668 
3669 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3670                                       int ebuflen) {
3671   char path[MAX_PATH];
3672   DWORD size;
3673   DWORD pathLen = (DWORD)sizeof(path);
3674   HINSTANCE result = NULL;
3675 
3676   // only allow library name without path component
3677   assert(strchr(name, '\\') == NULL, "path not allowed");
3678   assert(strchr(name, ':') == NULL, "path not allowed");
3679   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3680     jio_snprintf(ebuf, ebuflen,
3681                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3682     return NULL;
3683   }
3684 
3685   // search system directory
3686   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3687     if (size >= pathLen) {
3688       return NULL; // truncated
3689     }
3690     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3691       return NULL; // truncated
3692     }
3693     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3694       return result;
3695     }
3696   }
3697 
3698   // try Windows directory
3699   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3700     if (size >= pathLen) {
3701       return NULL; // truncated
3702     }
3703     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3704       return NULL; // truncated
3705     }
3706     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3707       return result;
3708     }
3709   }
3710 
3711   jio_snprintf(ebuf, ebuflen,
3712                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3713   return NULL;
3714 }
3715 
3716 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3717 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3718 
3719 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3720   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3721   return TRUE;
3722 }
3723 
3724 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3725   // Basic approach:
3726   //  - Each exiting thread registers its intent to exit and then does so.
3727   //  - A thread trying to terminate the process must wait for all
3728   //    threads currently exiting to complete their exit.
3729 
3730   if (os::win32::has_exit_bug()) {
3731     // The array holds handles of the threads that have started exiting by calling
3732     // _endthreadex().
3733     // Should be large enough to avoid blocking the exiting thread due to lack of
3734     // a free slot.
3735     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3736     static int handle_count = 0;
3737 
3738     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3739     static CRITICAL_SECTION crit_sect;
3740     static volatile DWORD process_exiting = 0;
3741     int i, j;
3742     DWORD res;
3743     HANDLE hproc, hthr;
3744 
3745     // We only attempt to register threads until a process exiting
3746     // thread manages to set the process_exiting flag. Any threads
3747     // that come through here after the process_exiting flag is set
3748     // are unregistered and will be caught in the SuspendThread()
3749     // infinite loop below.
3750     bool registered = false;
3751 
3752     // The first thread that reached this point, initializes the critical section.
3753     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3754       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3755     } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3756       if (what != EPT_THREAD) {
3757         // Atomically set process_exiting before the critical section
3758         // to increase the visibility between racing threads.
3759         Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0);
3760       }
3761       EnterCriticalSection(&crit_sect);
3762 
3763       if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3764         // Remove from the array those handles of the threads that have completed exiting.
3765         for (i = 0, j = 0; i < handle_count; ++i) {
3766           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3767           if (res == WAIT_TIMEOUT) {
3768             handles[j++] = handles[i];
3769           } else {
3770             if (res == WAIT_FAILED) {
3771               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3772                       GetLastError(), __FILE__, __LINE__);
3773             }
3774             // Don't keep the handle, if we failed waiting for it.
3775             CloseHandle(handles[i]);
3776           }
3777         }
3778 
3779         // If there's no free slot in the array of the kept handles, we'll have to
3780         // wait until at least one thread completes exiting.
3781         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3782           // Raise the priority of the oldest exiting thread to increase its chances
3783           // to complete sooner.
3784           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3785           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3786           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3787             i = (res - WAIT_OBJECT_0);
3788             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3789             for (; i < handle_count; ++i) {
3790               handles[i] = handles[i + 1];
3791             }
3792           } else {
3793             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3794                     (res == WAIT_FAILED ? "failed" : "timed out"),
3795                     GetLastError(), __FILE__, __LINE__);
3796             // Don't keep handles, if we failed waiting for them.
3797             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3798               CloseHandle(handles[i]);
3799             }
3800             handle_count = 0;
3801           }
3802         }
3803 
3804         // Store a duplicate of the current thread handle in the array of handles.
3805         hproc = GetCurrentProcess();
3806         hthr = GetCurrentThread();
3807         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3808                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3809           warning("DuplicateHandle failed (%u) in %s: %d\n",
3810                   GetLastError(), __FILE__, __LINE__);
3811 
3812           // We can't register this thread (no more handles) so this thread
3813           // may be racing with a thread that is calling exit(). If the thread
3814           // that is calling exit() has managed to set the process_exiting
3815           // flag, then this thread will be caught in the SuspendThread()
3816           // infinite loop below which closes that race. A small timing
3817           // window remains before the process_exiting flag is set, but it
3818           // is only exposed when we are out of handles.
3819         } else {
3820           ++handle_count;
3821           registered = true;
3822 
3823           // The current exiting thread has stored its handle in the array, and now
3824           // should leave the critical section before calling _endthreadex().
3825         }
3826 
3827       } else if (what != EPT_THREAD && handle_count > 0) {
3828         jlong start_time, finish_time, timeout_left;
3829         // Before ending the process, make sure all the threads that had called
3830         // _endthreadex() completed.
3831 
3832         // Set the priority level of the current thread to the same value as
3833         // the priority level of exiting threads.
3834         // This is to ensure it will be given a fair chance to execute if
3835         // the timeout expires.
3836         hthr = GetCurrentThread();
3837         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3838         start_time = os::javaTimeNanos();
3839         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3840         for (i = 0; ; ) {
3841           int portion_count = handle_count - i;
3842           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3843             portion_count = MAXIMUM_WAIT_OBJECTS;
3844           }
3845           for (j = 0; j < portion_count; ++j) {
3846             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3847           }
3848           timeout_left = (finish_time - start_time) / 1000000L;
3849           if (timeout_left < 0) {
3850             timeout_left = 0;
3851           }
3852           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3853           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3854             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3855                     (res == WAIT_FAILED ? "failed" : "timed out"),
3856                     GetLastError(), __FILE__, __LINE__);
3857             // Reset portion_count so we close the remaining
3858             // handles due to this error.
3859             portion_count = handle_count - i;
3860           }
3861           for (j = 0; j < portion_count; ++j) {
3862             CloseHandle(handles[i + j]);
3863           }
3864           if ((i += portion_count) >= handle_count) {
3865             break;
3866           }
3867           start_time = os::javaTimeNanos();
3868         }
3869         handle_count = 0;
3870       }
3871 
3872       LeaveCriticalSection(&crit_sect);
3873     }
3874 
3875     if (!registered &&
3876         OrderAccess::load_acquire(&process_exiting) != 0 &&
3877         process_exiting != GetCurrentThreadId()) {
3878       // Some other thread is about to call exit(), so we don't let
3879       // the current unregistered thread proceed to exit() or _endthreadex()
3880       while (true) {
3881         SuspendThread(GetCurrentThread());
3882         // Avoid busy-wait loop, if SuspendThread() failed.
3883         Sleep(EXIT_TIMEOUT);
3884       }
3885     }
3886   }
3887 
3888   // We are here if either
3889   // - there's no 'race at exit' bug on this OS release;
3890   // - initialization of the critical section failed (unlikely);
3891   // - the current thread has registered itself and left the critical section;
3892   // - the process-exiting thread has raised the flag and left the critical section.
3893   if (what == EPT_THREAD) {
3894     _endthreadex((unsigned)exit_code);
3895   } else if (what == EPT_PROCESS) {
3896     ::exit(exit_code);
3897   } else {
3898     _exit(exit_code);
3899   }
3900 
3901   // Should not reach here
3902   return exit_code;
3903 }
3904 
3905 #undef EXIT_TIMEOUT
3906 
3907 void os::win32::setmode_streams() {
3908   _setmode(_fileno(stdin), _O_BINARY);
3909   _setmode(_fileno(stdout), _O_BINARY);
3910   _setmode(_fileno(stderr), _O_BINARY);
3911 }
3912 
3913 
3914 bool os::is_debugger_attached() {
3915   return IsDebuggerPresent() ? true : false;
3916 }
3917 
3918 
3919 void os::wait_for_keypress_at_exit(void) {
3920   if (PauseAtExit) {
3921     fprintf(stderr, "Press any key to continue...\n");
3922     fgetc(stdin);
3923   }
3924 }
3925 
3926 
3927 bool os::message_box(const char* title, const char* message) {
3928   int result = MessageBox(NULL, message, title,
3929                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3930   return result == IDYES;
3931 }
3932 
3933 #ifndef PRODUCT
3934 #ifndef _WIN64
3935 // Helpers to check whether NX protection is enabled
3936 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3937   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3938       pex->ExceptionRecord->NumberParameters > 0 &&
3939       pex->ExceptionRecord->ExceptionInformation[0] ==
3940       EXCEPTION_INFO_EXEC_VIOLATION) {
3941     return EXCEPTION_EXECUTE_HANDLER;
3942   }
3943   return EXCEPTION_CONTINUE_SEARCH;
3944 }
3945 
3946 void nx_check_protection() {
3947   // If NX is enabled we'll get an exception calling into code on the stack
3948   char code[] = { (char)0xC3 }; // ret
3949   void *code_ptr = (void *)code;
3950   __try {
3951     __asm call code_ptr
3952   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3953     tty->print_raw_cr("NX protection detected.");
3954   }
3955 }
3956 #endif // _WIN64
3957 #endif // PRODUCT
3958 
3959 // This is called _before_ the global arguments have been parsed
3960 void os::init(void) {
3961   _initial_pid = _getpid();
3962 
3963   init_random(1234567);
3964 
3965   win32::initialize_system_info();
3966   win32::setmode_streams();
3967   init_page_sizes((size_t) win32::vm_page_size());
3968 
3969   // This may be overridden later when argument processing is done.
3970   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
3971 
3972   // Initialize main_process and main_thread
3973   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
3974   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3975                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3976     fatal("DuplicateHandle failed\n");
3977   }
3978   main_thread_id = (int) GetCurrentThreadId();
3979 
3980   // initialize fast thread access - only used for 32-bit
3981   win32::initialize_thread_ptr_offset();
3982 }
3983 
3984 // To install functions for atexit processing
3985 extern "C" {
3986   static void perfMemory_exit_helper() {
3987     perfMemory_exit();
3988   }
3989 }
3990 
3991 static jint initSock();
3992 
3993 // this is called _after_ the global arguments have been parsed
3994 jint os::init_2(void) {
3995 
3996   // This could be set any time but all platforms
3997   // have to set it the same so we have to mirror Solaris.
3998   DEBUG_ONLY(os::set_mutex_init_done();)
3999 
4000   // Setup Windows Exceptions
4001 
4002 #if INCLUDE_AOT
4003   // If AOT is enabled we need to install a vectored exception handler
4004   // in order to forward implicit exceptions from code in AOT
4005   // generated DLLs.  This is necessary since these DLLs are not
4006   // registered for structured exceptions like codecache methods are.
4007   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4008     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4009   }
4010 #endif
4011 
4012   // for debugging float code generation bugs
4013   if (ForceFloatExceptions) {
4014 #ifndef  _WIN64
4015     static long fp_control_word = 0;
4016     __asm { fstcw fp_control_word }
4017     // see Intel PPro Manual, Vol. 2, p 7-16
4018     const long precision = 0x20;
4019     const long underflow = 0x10;
4020     const long overflow  = 0x08;
4021     const long zero_div  = 0x04;
4022     const long denorm    = 0x02;
4023     const long invalid   = 0x01;
4024     fp_control_word |= invalid;
4025     __asm { fldcw fp_control_word }
4026 #endif
4027   }
4028 
4029   // If stack_commit_size is 0, windows will reserve the default size,
4030   // but only commit a small portion of it.
4031   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4032   size_t default_reserve_size = os::win32::default_stack_size();
4033   size_t actual_reserve_size = stack_commit_size;
4034   if (stack_commit_size < default_reserve_size) {
4035     // If stack_commit_size == 0, we want this too
4036     actual_reserve_size = default_reserve_size;
4037   }
4038 
4039   // Check minimum allowable stack size for thread creation and to initialize
4040   // the java system classes, including StackOverflowError - depends on page
4041   // size.  Add two 4K pages for compiler2 recursion in main thread.
4042   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4043   // class initialization depending on 32 or 64 bit VM.
4044   size_t min_stack_allowed =
4045             (size_t)(JavaThread::stack_guard_zone_size() +
4046                      JavaThread::stack_shadow_zone_size() +
4047                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4048 
4049   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4050 
4051   if (actual_reserve_size < min_stack_allowed) {
4052     tty->print_cr("\nThe Java thread stack size specified is too small. "
4053                   "Specify at least %dk",
4054                   min_stack_allowed / K);
4055     return JNI_ERR;
4056   }
4057 
4058   JavaThread::set_stack_size_at_create(stack_commit_size);
4059 
4060   // Calculate theoretical max. size of Threads to guard gainst artifical
4061   // out-of-memory situations, where all available address-space has been
4062   // reserved by thread stacks.
4063   assert(actual_reserve_size != 0, "Must have a stack");
4064 
4065   // Calculate the thread limit when we should start doing Virtual Memory
4066   // banging. Currently when the threads will have used all but 200Mb of space.
4067   //
4068   // TODO: consider performing a similar calculation for commit size instead
4069   // as reserve size, since on a 64-bit platform we'll run into that more
4070   // often than running out of virtual memory space.  We can use the
4071   // lower value of the two calculations as the os_thread_limit.
4072   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4073   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4074 
4075   // at exit methods are called in the reverse order of their registration.
4076   // there is no limit to the number of functions registered. atexit does
4077   // not set errno.
4078 
4079   if (PerfAllowAtExitRegistration) {
4080     // only register atexit functions if PerfAllowAtExitRegistration is set.
4081     // atexit functions can be delayed until process exit time, which
4082     // can be problematic for embedded VM situations. Embedded VMs should
4083     // call DestroyJavaVM() to assure that VM resources are released.
4084 
4085     // note: perfMemory_exit_helper atexit function may be removed in
4086     // the future if the appropriate cleanup code can be added to the
4087     // VM_Exit VMOperation's doit method.
4088     if (atexit(perfMemory_exit_helper) != 0) {
4089       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4090     }
4091   }
4092 
4093 #ifndef _WIN64
4094   // Print something if NX is enabled (win32 on AMD64)
4095   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4096 #endif
4097 
4098   // initialize thread priority policy
4099   prio_init();
4100 
4101   if (UseNUMA && !ForceNUMA) {
4102     UseNUMA = false; // We don't fully support this yet
4103   }
4104 
4105   if (UseNUMAInterleaving) {
4106     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4107     bool success = numa_interleaving_init();
4108     if (!success) UseNUMAInterleaving = false;
4109   }
4110 
4111   if (initSock() != JNI_OK) {
4112     return JNI_ERR;
4113   }
4114 
4115   SymbolEngine::recalc_search_path();
4116 
4117   // Initialize data for jdk.internal.misc.Signal
4118   if (!ReduceSignalUsage) {
4119     jdk_misc_signal_init();
4120   }
4121 
4122   return JNI_OK;
4123 }
4124 
4125 // Mark the polling page as unreadable
4126 void os::make_polling_page_unreadable(void) {
4127   DWORD old_status;
4128   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4129                       PAGE_NOACCESS, &old_status)) {
4130     fatal("Could not disable polling page");
4131   }
4132 }
4133 
4134 // Mark the polling page as readable
4135 void os::make_polling_page_readable(void) {
4136   DWORD old_status;
4137   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4138                       PAGE_READONLY, &old_status)) {
4139     fatal("Could not enable polling page");
4140   }
4141 }
4142 
4143 // combine the high and low DWORD into a ULONGLONG
4144 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4145   ULONGLONG value = high_word;
4146   value <<= sizeof(high_word) * 8;
4147   value |= low_word;
4148   return value;
4149 }
4150 
4151 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4152 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4153   ::memset((void*)sbuf, 0, sizeof(struct stat));
4154   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4155   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4156                                   file_data.ftLastWriteTime.dwLowDateTime);
4157   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4158                                   file_data.ftCreationTime.dwLowDateTime);
4159   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4160                                   file_data.ftLastAccessTime.dwLowDateTime);
4161   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4162     sbuf->st_mode |= S_IFDIR;
4163   } else {
4164     sbuf->st_mode |= S_IFREG;
4165   }
4166 }
4167 
4168 // The following function is adapted from java.base/windows/native/libjava/canonicalize_md.c
4169 // Creates an UNC path from a single byte path. Return buffer is
4170 // allocated in C heap and needs to be freed by the caller.
4171 // Returns NULL on error.
4172 static wchar_t* create_unc_path(const char* path, errno_t &err) {
4173   wchar_t* wpath = NULL;
4174   size_t converted_chars = 0;
4175   size_t path_len = strlen(path) + 1; // includes the terminating NULL
4176   if (path[0] == '\\' && path[1] == '\\') {
4177     if (path[2] == '?' && path[3] == '\\'){
4178       // if it already has a \\?\ don't do the prefix
4179       wpath = (wchar_t*)os::malloc(path_len * sizeof(wchar_t), mtInternal);
4180       if (wpath != NULL) {
4181         err = ::mbstowcs_s(&converted_chars, wpath, path_len, path, path_len);
4182       } else {
4183         err = ENOMEM;
4184       }
4185     } else {
4186       // only UNC pathname includes double slashes here
4187       wpath = (wchar_t*)os::malloc((path_len + 7) * sizeof(wchar_t), mtInternal);
4188       if (wpath != NULL) {
4189         ::wcscpy(wpath, L"\\\\?\\UNC\0");
4190         err = ::mbstowcs_s(&converted_chars, &wpath[7], path_len, path, path_len);
4191       } else {
4192         err = ENOMEM;
4193       }
4194     }
4195   } else {
4196     wpath = (wchar_t*)os::malloc((path_len + 4) * sizeof(wchar_t), mtInternal);
4197     if (wpath != NULL) {
4198       ::wcscpy(wpath, L"\\\\?\\\0");
4199       err = ::mbstowcs_s(&converted_chars, &wpath[4], path_len, path, path_len);
4200     } else {
4201       err = ENOMEM;
4202     }
4203   }
4204   return wpath;
4205 }
4206 
4207 static void destroy_unc_path(wchar_t* wpath) {
4208   os::free(wpath);
4209 }
4210 
4211 int os::stat(const char *path, struct stat *sbuf) {
4212   char* pathbuf = (char*)os::strdup(path, mtInternal);
4213   if (pathbuf == NULL) {
4214     errno = ENOMEM;
4215     return -1;
4216   }
4217   os::native_path(pathbuf);
4218   int ret;
4219   WIN32_FILE_ATTRIBUTE_DATA file_data;
4220   // Not using stat() to avoid the problem described in JDK-6539723
4221   if (strlen(path) < MAX_PATH) {
4222     BOOL bret = ::GetFileAttributesExA(pathbuf, GetFileExInfoStandard, &file_data);
4223     if (!bret) {
4224       errno = ::GetLastError();
4225       ret = -1;
4226     }
4227     else {
4228       file_attribute_data_to_stat(sbuf, file_data);
4229       ret = 0;
4230     }
4231   } else {
4232     errno_t err = ERROR_SUCCESS;
4233     wchar_t* wpath = create_unc_path(pathbuf, err);
4234     if (err != ERROR_SUCCESS) {
4235       if (wpath != NULL) {
4236         destroy_unc_path(wpath);
4237       }
4238       os::free(pathbuf);
4239       errno = err;
4240       return -1;
4241     }
4242     BOOL bret = ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &file_data);
4243     if (!bret) {
4244       errno = ::GetLastError();
4245       ret = -1;
4246     } else {
4247       file_attribute_data_to_stat(sbuf, file_data);
4248       ret = 0;
4249     }
4250     destroy_unc_path(wpath);
4251   }
4252   os::free(pathbuf);
4253   return ret;
4254 }
4255 
4256 static HANDLE create_read_only_file_handle(const char* file) {
4257   if (file == NULL) {
4258     return INVALID_HANDLE_VALUE;
4259   }
4260 
4261   char* nativepath = (char*)os::strdup(file, mtInternal);
4262   if (nativepath == NULL) {
4263     errno = ENOMEM;
4264     return INVALID_HANDLE_VALUE;
4265   }
4266   os::native_path(nativepath);
4267 
4268   size_t len = strlen(nativepath);
4269   HANDLE handle = INVALID_HANDLE_VALUE;
4270 
4271   if (len < MAX_PATH) {
4272     handle = ::CreateFile(nativepath, 0, FILE_SHARE_READ,
4273                           NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4274   } else {
4275     errno_t err = ERROR_SUCCESS;
4276     wchar_t* wfile = create_unc_path(nativepath, err);
4277     if (err != ERROR_SUCCESS) {
4278       if (wfile != NULL) {
4279         destroy_unc_path(wfile);
4280       }
4281       os::free(nativepath);
4282       return INVALID_HANDLE_VALUE;
4283     }
4284     handle = ::CreateFileW(wfile, 0, FILE_SHARE_READ,
4285                            NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4286     destroy_unc_path(wfile);
4287   }
4288 
4289   os::free(nativepath);
4290   return handle;
4291 }
4292 
4293 bool os::same_files(const char* file1, const char* file2) {
4294 
4295   if (file1 == NULL && file2 == NULL) {
4296     return true;
4297   }
4298 
4299   if (file1 == NULL || file2 == NULL) {
4300     return false;
4301   }
4302 
4303   if (strcmp(file1, file2) == 0) {
4304     return true;
4305   }
4306 
4307   HANDLE handle1 = create_read_only_file_handle(file1);
4308   HANDLE handle2 = create_read_only_file_handle(file2);
4309   bool result = false;
4310 
4311   // if we could open both paths...
4312   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4313     BY_HANDLE_FILE_INFORMATION fileInfo1;
4314     BY_HANDLE_FILE_INFORMATION fileInfo2;
4315     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4316       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4317       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4318       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4319         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4320         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4321         result = true;
4322       }
4323     }
4324   }
4325 
4326   //free the handles
4327   if (handle1 != INVALID_HANDLE_VALUE) {
4328     ::CloseHandle(handle1);
4329   }
4330 
4331   if (handle2 != INVALID_HANDLE_VALUE) {
4332     ::CloseHandle(handle2);
4333   }
4334 
4335   return result;
4336 }
4337 
4338 
4339 #define FT2INT64(ft) \
4340   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4341 
4342 
4343 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4344 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4345 // of a thread.
4346 //
4347 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4348 // the fast estimate available on the platform.
4349 
4350 // current_thread_cpu_time() is not optimized for Windows yet
4351 jlong os::current_thread_cpu_time() {
4352   // return user + sys since the cost is the same
4353   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4354 }
4355 
4356 jlong os::thread_cpu_time(Thread* thread) {
4357   // consistent with what current_thread_cpu_time() returns.
4358   return os::thread_cpu_time(thread, true /* user+sys */);
4359 }
4360 
4361 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4362   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4363 }
4364 
4365 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4366   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4367   // If this function changes, os::is_thread_cpu_time_supported() should too
4368   FILETIME CreationTime;
4369   FILETIME ExitTime;
4370   FILETIME KernelTime;
4371   FILETIME UserTime;
4372 
4373   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4374                       &ExitTime, &KernelTime, &UserTime) == 0) {
4375     return -1;
4376   } else if (user_sys_cpu_time) {
4377     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4378   } else {
4379     return FT2INT64(UserTime) * 100;
4380   }
4381 }
4382 
4383 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4384   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4385   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4386   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4387   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4388 }
4389 
4390 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4391   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4392   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4393   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4394   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4395 }
4396 
4397 bool os::is_thread_cpu_time_supported() {
4398   // see os::thread_cpu_time
4399   FILETIME CreationTime;
4400   FILETIME ExitTime;
4401   FILETIME KernelTime;
4402   FILETIME UserTime;
4403 
4404   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4405                       &KernelTime, &UserTime) == 0) {
4406     return false;
4407   } else {
4408     return true;
4409   }
4410 }
4411 
4412 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4413 // It does have primitives (PDH API) to get CPU usage and run queue length.
4414 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4415 // If we wanted to implement loadavg on Windows, we have a few options:
4416 //
4417 // a) Query CPU usage and run queue length and "fake" an answer by
4418 //    returning the CPU usage if it's under 100%, and the run queue
4419 //    length otherwise.  It turns out that querying is pretty slow
4420 //    on Windows, on the order of 200 microseconds on a fast machine.
4421 //    Note that on the Windows the CPU usage value is the % usage
4422 //    since the last time the API was called (and the first call
4423 //    returns 100%), so we'd have to deal with that as well.
4424 //
4425 // b) Sample the "fake" answer using a sampling thread and store
4426 //    the answer in a global variable.  The call to loadavg would
4427 //    just return the value of the global, avoiding the slow query.
4428 //
4429 // c) Sample a better answer using exponential decay to smooth the
4430 //    value.  This is basically the algorithm used by UNIX kernels.
4431 //
4432 // Note that sampling thread starvation could affect both (b) and (c).
4433 int os::loadavg(double loadavg[], int nelem) {
4434   return -1;
4435 }
4436 
4437 
4438 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4439 bool os::dont_yield() {
4440   return DontYieldALot;
4441 }
4442 
4443 // This method is a slightly reworked copy of JDK's sysOpen
4444 // from src/windows/hpi/src/sys_api_md.c
4445 
4446 int os::open(const char *path, int oflag, int mode) {
4447   char* pathbuf = (char*)os::strdup(path, mtInternal);
4448   if (pathbuf == NULL) {
4449     errno = ENOMEM;
4450     return -1;
4451   }
4452   os::native_path(pathbuf);
4453   int ret;
4454   if (strlen(path) < MAX_PATH) {
4455     ret = ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4456   } else {
4457     errno_t err = ERROR_SUCCESS;
4458     wchar_t* wpath = create_unc_path(pathbuf, err);
4459     if (err != ERROR_SUCCESS) {
4460       if (wpath != NULL) {
4461         destroy_unc_path(wpath);
4462       }
4463       os::free(pathbuf);
4464       errno = err;
4465       return -1;
4466     }
4467     ret = ::_wopen(wpath, oflag | O_BINARY | O_NOINHERIT, mode);
4468     if (ret == -1) {
4469       errno = ::GetLastError();
4470     }
4471     destroy_unc_path(wpath);
4472   }
4473   os::free(pathbuf);
4474   return ret;
4475 }
4476 
4477 FILE* os::open(int fd, const char* mode) {
4478   return ::_fdopen(fd, mode);
4479 }
4480 
4481 // Is a (classpath) directory empty?
4482 bool os::dir_is_empty(const char* path) {
4483   char* search_path = (char*)os::malloc(strlen(path) + 3, mtInternal);
4484   if (search_path == NULL) {
4485     errno = ENOMEM;
4486     return false;
4487   }
4488   strcpy(search_path, path);
4489   os::native_path(search_path);
4490   // Append "*", or possibly "\\*", to path
4491   if (search_path[1] == ':' &&
4492        (search_path[2] == '\0' ||
4493          (search_path[2] == '\\' && search_path[3] == '\0'))) {
4494     // No '\\' needed for cases like "Z:" or "Z:\"
4495     strcat(search_path, "*");
4496   }
4497   else {
4498     strcat(search_path, "\\*");
4499   }
4500   errno_t err = ERROR_SUCCESS;
4501   wchar_t* wpath = create_unc_path(search_path, err);
4502   if (err != ERROR_SUCCESS) {
4503     if (wpath != NULL) {
4504       destroy_unc_path(wpath);
4505     }
4506     os::free(search_path);
4507     errno = err;
4508     return false;
4509   }
4510   WIN32_FIND_DATAW fd;
4511   HANDLE f = ::FindFirstFileW(wpath, &fd);
4512   destroy_unc_path(wpath);
4513   bool is_empty = true;
4514   if (f != INVALID_HANDLE_VALUE) {
4515     while (is_empty && ::FindNextFileW(f, &fd)) {
4516       // An empty directory contains only the current directory file
4517       // and the previous directory file.
4518       if ((wcscmp(fd.cFileName, L".") != 0) &&
4519           (wcscmp(fd.cFileName, L"..") != 0)) {
4520         is_empty = false;
4521       }
4522     }
4523     FindClose(f);
4524   }
4525   os::free(search_path);
4526   return is_empty;
4527 }
4528 
4529 // create binary file, rewriting existing file if required
4530 int os::create_binary_file(const char* path, bool rewrite_existing) {
4531   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4532   if (!rewrite_existing) {
4533     oflags |= _O_EXCL;
4534   }
4535   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4536 }
4537 
4538 // return current position of file pointer
4539 jlong os::current_file_offset(int fd) {
4540   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4541 }
4542 
4543 // move file pointer to the specified offset
4544 jlong os::seek_to_file_offset(int fd, jlong offset) {
4545   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4546 }
4547 
4548 
4549 jlong os::lseek(int fd, jlong offset, int whence) {
4550   return (jlong) ::_lseeki64(fd, offset, whence);
4551 }
4552 
4553 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4554   OVERLAPPED ov;
4555   DWORD nread;
4556   BOOL result;
4557 
4558   ZeroMemory(&ov, sizeof(ov));
4559   ov.Offset = (DWORD)offset;
4560   ov.OffsetHigh = (DWORD)(offset >> 32);
4561 
4562   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4563 
4564   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4565 
4566   return result ? nread : 0;
4567 }
4568 
4569 
4570 // This method is a slightly reworked copy of JDK's sysNativePath
4571 // from src/windows/hpi/src/path_md.c
4572 
4573 // Convert a pathname to native format.  On win32, this involves forcing all
4574 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4575 // sometimes rejects '/') and removing redundant separators.  The input path is
4576 // assumed to have been converted into the character encoding used by the local
4577 // system.  Because this might be a double-byte encoding, care is taken to
4578 // treat double-byte lead characters correctly.
4579 //
4580 // This procedure modifies the given path in place, as the result is never
4581 // longer than the original.  There is no error return; this operation always
4582 // succeeds.
4583 char * os::native_path(char *path) {
4584   char *src = path, *dst = path, *end = path;
4585   char *colon = NULL;  // If a drive specifier is found, this will
4586                        // point to the colon following the drive letter
4587 
4588   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4589   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4590           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4591 
4592   // Check for leading separators
4593 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4594   while (isfilesep(*src)) {
4595     src++;
4596   }
4597 
4598   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4599     // Remove leading separators if followed by drive specifier.  This
4600     // hack is necessary to support file URLs containing drive
4601     // specifiers (e.g., "file://c:/path").  As a side effect,
4602     // "/c:/path" can be used as an alternative to "c:/path".
4603     *dst++ = *src++;
4604     colon = dst;
4605     *dst++ = ':';
4606     src++;
4607   } else {
4608     src = path;
4609     if (isfilesep(src[0]) && isfilesep(src[1])) {
4610       // UNC pathname: Retain first separator; leave src pointed at
4611       // second separator so that further separators will be collapsed
4612       // into the second separator.  The result will be a pathname
4613       // beginning with "\\\\" followed (most likely) by a host name.
4614       src = dst = path + 1;
4615       path[0] = '\\';     // Force first separator to '\\'
4616     }
4617   }
4618 
4619   end = dst;
4620 
4621   // Remove redundant separators from remainder of path, forcing all
4622   // separators to be '\\' rather than '/'. Also, single byte space
4623   // characters are removed from the end of the path because those
4624   // are not legal ending characters on this operating system.
4625   //
4626   while (*src != '\0') {
4627     if (isfilesep(*src)) {
4628       *dst++ = '\\'; src++;
4629       while (isfilesep(*src)) src++;
4630       if (*src == '\0') {
4631         // Check for trailing separator
4632         end = dst;
4633         if (colon == dst - 2) break;  // "z:\\"
4634         if (dst == path + 1) break;   // "\\"
4635         if (dst == path + 2 && isfilesep(path[0])) {
4636           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4637           // beginning of a UNC pathname.  Even though it is not, by
4638           // itself, a valid UNC pathname, we leave it as is in order
4639           // to be consistent with the path canonicalizer as well
4640           // as the win32 APIs, which treat this case as an invalid
4641           // UNC pathname rather than as an alias for the root
4642           // directory of the current drive.
4643           break;
4644         }
4645         end = --dst;  // Path does not denote a root directory, so
4646                       // remove trailing separator
4647         break;
4648       }
4649       end = dst;
4650     } else {
4651       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4652         *dst++ = *src++;
4653         if (*src) *dst++ = *src++;
4654         end = dst;
4655       } else {  // Copy a single-byte character
4656         char c = *src++;
4657         *dst++ = c;
4658         // Space is not a legal ending character
4659         if (c != ' ') end = dst;
4660       }
4661     }
4662   }
4663 
4664   *end = '\0';
4665 
4666   // For "z:", add "." to work around a bug in the C runtime library
4667   if (colon == dst - 1) {
4668     path[2] = '.';
4669     path[3] = '\0';
4670   }
4671 
4672   return path;
4673 }
4674 
4675 // This code is a copy of JDK's sysSetLength
4676 // from src/windows/hpi/src/sys_api_md.c
4677 
4678 int os::ftruncate(int fd, jlong length) {
4679   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4680   long high = (long)(length >> 32);
4681   DWORD ret;
4682 
4683   if (h == (HANDLE)(-1)) {
4684     return -1;
4685   }
4686 
4687   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4688   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4689     return -1;
4690   }
4691 
4692   if (::SetEndOfFile(h) == FALSE) {
4693     return -1;
4694   }
4695 
4696   return 0;
4697 }
4698 
4699 int os::get_fileno(FILE* fp) {
4700   return _fileno(fp);
4701 }
4702 
4703 // This code is a copy of JDK's sysSync
4704 // from src/windows/hpi/src/sys_api_md.c
4705 // except for the legacy workaround for a bug in Win 98
4706 
4707 int os::fsync(int fd) {
4708   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4709 
4710   if ((!::FlushFileBuffers(handle)) &&
4711       (GetLastError() != ERROR_ACCESS_DENIED)) {
4712     // from winerror.h
4713     return -1;
4714   }
4715   return 0;
4716 }
4717 
4718 static int nonSeekAvailable(int, long *);
4719 static int stdinAvailable(int, long *);
4720 
4721 // This code is a copy of JDK's sysAvailable
4722 // from src/windows/hpi/src/sys_api_md.c
4723 
4724 int os::available(int fd, jlong *bytes) {
4725   jlong cur, end;
4726   struct _stati64 stbuf64;
4727 
4728   if (::_fstati64(fd, &stbuf64) >= 0) {
4729     int mode = stbuf64.st_mode;
4730     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4731       int ret;
4732       long lpbytes;
4733       if (fd == 0) {
4734         ret = stdinAvailable(fd, &lpbytes);
4735       } else {
4736         ret = nonSeekAvailable(fd, &lpbytes);
4737       }
4738       (*bytes) = (jlong)(lpbytes);
4739       return ret;
4740     }
4741     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4742       return FALSE;
4743     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4744       return FALSE;
4745     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4746       return FALSE;
4747     }
4748     *bytes = end - cur;
4749     return TRUE;
4750   } else {
4751     return FALSE;
4752   }
4753 }
4754 
4755 void os::flockfile(FILE* fp) {
4756   _lock_file(fp);
4757 }
4758 
4759 void os::funlockfile(FILE* fp) {
4760   _unlock_file(fp);
4761 }
4762 
4763 // This code is a copy of JDK's nonSeekAvailable
4764 // from src/windows/hpi/src/sys_api_md.c
4765 
4766 static int nonSeekAvailable(int fd, long *pbytes) {
4767   // This is used for available on non-seekable devices
4768   // (like both named and anonymous pipes, such as pipes
4769   //  connected to an exec'd process).
4770   // Standard Input is a special case.
4771   HANDLE han;
4772 
4773   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4774     return FALSE;
4775   }
4776 
4777   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4778     // PeekNamedPipe fails when at EOF.  In that case we
4779     // simply make *pbytes = 0 which is consistent with the
4780     // behavior we get on Solaris when an fd is at EOF.
4781     // The only alternative is to raise an Exception,
4782     // which isn't really warranted.
4783     //
4784     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4785       return FALSE;
4786     }
4787     *pbytes = 0;
4788   }
4789   return TRUE;
4790 }
4791 
4792 #define MAX_INPUT_EVENTS 2000
4793 
4794 // This code is a copy of JDK's stdinAvailable
4795 // from src/windows/hpi/src/sys_api_md.c
4796 
4797 static int stdinAvailable(int fd, long *pbytes) {
4798   HANDLE han;
4799   DWORD numEventsRead = 0;  // Number of events read from buffer
4800   DWORD numEvents = 0;      // Number of events in buffer
4801   DWORD i = 0;              // Loop index
4802   DWORD curLength = 0;      // Position marker
4803   DWORD actualLength = 0;   // Number of bytes readable
4804   BOOL error = FALSE;       // Error holder
4805   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4806 
4807   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4808     return FALSE;
4809   }
4810 
4811   // Construct an array of input records in the console buffer
4812   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4813   if (error == 0) {
4814     return nonSeekAvailable(fd, pbytes);
4815   }
4816 
4817   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4818   if (numEvents > MAX_INPUT_EVENTS) {
4819     numEvents = MAX_INPUT_EVENTS;
4820   }
4821 
4822   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4823   if (lpBuffer == NULL) {
4824     return FALSE;
4825   }
4826 
4827   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4828   if (error == 0) {
4829     os::free(lpBuffer);
4830     return FALSE;
4831   }
4832 
4833   // Examine input records for the number of bytes available
4834   for (i=0; i<numEvents; i++) {
4835     if (lpBuffer[i].EventType == KEY_EVENT) {
4836 
4837       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4838                                       &(lpBuffer[i].Event);
4839       if (keyRecord->bKeyDown == TRUE) {
4840         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4841         curLength++;
4842         if (*keyPressed == '\r') {
4843           actualLength = curLength;
4844         }
4845       }
4846     }
4847   }
4848 
4849   if (lpBuffer != NULL) {
4850     os::free(lpBuffer);
4851   }
4852 
4853   *pbytes = (long) actualLength;
4854   return TRUE;
4855 }
4856 
4857 // Map a block of memory.
4858 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4859                         char *addr, size_t bytes, bool read_only,
4860                         bool allow_exec) {
4861   HANDLE hFile;
4862   char* base;
4863 
4864   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4865                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4866   if (hFile == NULL) {
4867     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4868     return NULL;
4869   }
4870 
4871   if (allow_exec) {
4872     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4873     // unless it comes from a PE image (which the shared archive is not.)
4874     // Even VirtualProtect refuses to give execute access to mapped memory
4875     // that was not previously executable.
4876     //
4877     // Instead, stick the executable region in anonymous memory.  Yuck.
4878     // Penalty is that ~4 pages will not be shareable - in the future
4879     // we might consider DLLizing the shared archive with a proper PE
4880     // header so that mapping executable + sharing is possible.
4881 
4882     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4883                                 PAGE_READWRITE);
4884     if (base == NULL) {
4885       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4886       CloseHandle(hFile);
4887       return NULL;
4888     }
4889 
4890     // Record virtual memory allocation
4891     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
4892 
4893     DWORD bytes_read;
4894     OVERLAPPED overlapped;
4895     overlapped.Offset = (DWORD)file_offset;
4896     overlapped.OffsetHigh = 0;
4897     overlapped.hEvent = NULL;
4898     // ReadFile guarantees that if the return value is true, the requested
4899     // number of bytes were read before returning.
4900     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4901     if (!res) {
4902       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4903       release_memory(base, bytes);
4904       CloseHandle(hFile);
4905       return NULL;
4906     }
4907   } else {
4908     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4909                                     NULL /* file_name */);
4910     if (hMap == NULL) {
4911       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4912       CloseHandle(hFile);
4913       return NULL;
4914     }
4915 
4916     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4917     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4918                                   (DWORD)bytes, addr);
4919     if (base == NULL) {
4920       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4921       CloseHandle(hMap);
4922       CloseHandle(hFile);
4923       return NULL;
4924     }
4925 
4926     if (CloseHandle(hMap) == 0) {
4927       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4928       CloseHandle(hFile);
4929       return base;
4930     }
4931   }
4932 
4933   if (allow_exec) {
4934     DWORD old_protect;
4935     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4936     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4937 
4938     if (!res) {
4939       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4940       // Don't consider this a hard error, on IA32 even if the
4941       // VirtualProtect fails, we should still be able to execute
4942       CloseHandle(hFile);
4943       return base;
4944     }
4945   }
4946 
4947   if (CloseHandle(hFile) == 0) {
4948     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4949     return base;
4950   }
4951 
4952   return base;
4953 }
4954 
4955 
4956 // Remap a block of memory.
4957 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4958                           char *addr, size_t bytes, bool read_only,
4959                           bool allow_exec) {
4960   // This OS does not allow existing memory maps to be remapped so we
4961   // would have to unmap the memory before we remap it.
4962 
4963   // Because there is a small window between unmapping memory and mapping
4964   // it in again with different protections, CDS archives are mapped RW
4965   // on windows, so this function isn't called.
4966   ShouldNotReachHere();
4967   return NULL;
4968 }
4969 
4970 
4971 // Unmap a block of memory.
4972 // Returns true=success, otherwise false.
4973 
4974 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4975   MEMORY_BASIC_INFORMATION mem_info;
4976   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4977     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
4978     return false;
4979   }
4980 
4981   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4982   // Instead, executable region was allocated using VirtualAlloc(). See
4983   // pd_map_memory() above.
4984   //
4985   // The following flags should match the 'exec_access' flages used for
4986   // VirtualProtect() in pd_map_memory().
4987   if (mem_info.Protect == PAGE_EXECUTE_READ ||
4988       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4989     return pd_release_memory(addr, bytes);
4990   }
4991 
4992   BOOL result = UnmapViewOfFile(addr);
4993   if (result == 0) {
4994     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
4995     return false;
4996   }
4997   return true;
4998 }
4999 
5000 void os::pause() {
5001   char filename[MAX_PATH];
5002   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5003     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5004   } else {
5005     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5006   }
5007 
5008   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5009   if (fd != -1) {
5010     struct stat buf;
5011     ::close(fd);
5012     while (::stat(filename, &buf) == 0) {
5013       Sleep(100);
5014     }
5015   } else {
5016     jio_fprintf(stderr,
5017                 "Could not open pause file '%s', continuing immediately.\n", filename);
5018   }
5019 }
5020 
5021 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5022 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5023 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5024 
5025 os::ThreadCrashProtection::ThreadCrashProtection() {
5026 }
5027 
5028 // See the caveats for this class in os_windows.hpp
5029 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5030 // into this method and returns false. If no OS EXCEPTION was raised, returns
5031 // true.
5032 // The callback is supposed to provide the method that should be protected.
5033 //
5034 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5035 
5036   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5037 
5038   _protected_thread = Thread::current_or_null();
5039   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5040 
5041   bool success = true;
5042   __try {
5043     _crash_protection = this;
5044     cb.call();
5045   } __except(EXCEPTION_EXECUTE_HANDLER) {
5046     // only for protection, nothing to do
5047     success = false;
5048   }
5049   _crash_protection = NULL;
5050   _protected_thread = NULL;
5051   Thread::muxRelease(&_crash_mux);
5052   return success;
5053 }
5054 
5055 
5056 class HighResolutionInterval : public CHeapObj<mtThread> {
5057   // The default timer resolution seems to be 10 milliseconds.
5058   // (Where is this written down?)
5059   // If someone wants to sleep for only a fraction of the default,
5060   // then we set the timer resolution down to 1 millisecond for
5061   // the duration of their interval.
5062   // We carefully set the resolution back, since otherwise we
5063   // seem to incur an overhead (3%?) that we don't need.
5064   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5065   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5066   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5067   // timeBeginPeriod() if the relative error exceeded some threshold.
5068   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5069   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5070   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5071   // resolution timers running.
5072  private:
5073   jlong resolution;
5074  public:
5075   HighResolutionInterval(jlong ms) {
5076     resolution = ms % 10L;
5077     if (resolution != 0) {
5078       MMRESULT result = timeBeginPeriod(1L);
5079     }
5080   }
5081   ~HighResolutionInterval() {
5082     if (resolution != 0) {
5083       MMRESULT result = timeEndPeriod(1L);
5084     }
5085     resolution = 0L;
5086   }
5087 };
5088 
5089 // An Event wraps a win32 "CreateEvent" kernel handle.
5090 //
5091 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5092 //
5093 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5094 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5095 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5096 //     In addition, an unpark() operation might fetch the handle field, but the
5097 //     event could recycle between the fetch and the SetEvent() operation.
5098 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5099 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5100 //     on an stale but recycled handle would be harmless, but in practice this might
5101 //     confuse other non-Sun code, so it's not a viable approach.
5102 //
5103 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5104 //     with the Event.  The event handle is never closed.  This could be construed
5105 //     as handle leakage, but only up to the maximum # of threads that have been extant
5106 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5107 //     permit a process to have hundreds of thousands of open handles.
5108 //
5109 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5110 //     and release unused handles.
5111 //
5112 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5113 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5114 //
5115 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5116 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5117 //
5118 // We use (2).
5119 //
5120 // TODO-FIXME:
5121 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5122 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5123 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5124 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5125 //     into a single win32 CreateEvent() handle.
5126 //
5127 // Assumption:
5128 //    Only one parker can exist on an event, which is why we allocate
5129 //    them per-thread. Multiple unparkers can coexist.
5130 //
5131 // _Event transitions in park()
5132 //   -1 => -1 : illegal
5133 //    1 =>  0 : pass - return immediately
5134 //    0 => -1 : block; then set _Event to 0 before returning
5135 //
5136 // _Event transitions in unpark()
5137 //    0 => 1 : just return
5138 //    1 => 1 : just return
5139 //   -1 => either 0 or 1; must signal target thread
5140 //         That is, we can safely transition _Event from -1 to either
5141 //         0 or 1.
5142 //
5143 // _Event serves as a restricted-range semaphore.
5144 //   -1 : thread is blocked, i.e. there is a waiter
5145 //    0 : neutral: thread is running or ready,
5146 //        could have been signaled after a wait started
5147 //    1 : signaled - thread is running or ready
5148 //
5149 // Another possible encoding of _Event would be with
5150 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5151 //
5152 
5153 int os::PlatformEvent::park(jlong Millis) {
5154   // Transitions for _Event:
5155   //   -1 => -1 : illegal
5156   //    1 =>  0 : pass - return immediately
5157   //    0 => -1 : block; then set _Event to 0 before returning
5158 
5159   guarantee(_ParkHandle != NULL , "Invariant");
5160   guarantee(Millis > 0          , "Invariant");
5161 
5162   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5163   // the initial park() operation.
5164   // Consider: use atomic decrement instead of CAS-loop
5165 
5166   int v;
5167   for (;;) {
5168     v = _Event;
5169     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5170   }
5171   guarantee((v == 0) || (v == 1), "invariant");
5172   if (v != 0) return OS_OK;
5173 
5174   // Do this the hard way by blocking ...
5175   // TODO: consider a brief spin here, gated on the success of recent
5176   // spin attempts by this thread.
5177   //
5178   // We decompose long timeouts into series of shorter timed waits.
5179   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5180   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5181   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5182   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5183   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5184   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5185   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5186   // for the already waited time.  This policy does not admit any new outcomes.
5187   // In the future, however, we might want to track the accumulated wait time and
5188   // adjust Millis accordingly if we encounter a spurious wakeup.
5189 
5190   const int MAXTIMEOUT = 0x10000000;
5191   DWORD rv = WAIT_TIMEOUT;
5192   while (_Event < 0 && Millis > 0) {
5193     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5194     if (Millis > MAXTIMEOUT) {
5195       prd = MAXTIMEOUT;
5196     }
5197     HighResolutionInterval *phri = NULL;
5198     if (!ForceTimeHighResolution) {
5199       phri = new HighResolutionInterval(prd);
5200     }
5201     rv = ::WaitForSingleObject(_ParkHandle, prd);
5202     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5203     if (rv == WAIT_TIMEOUT) {
5204       Millis -= prd;
5205     }
5206     delete phri; // if it is NULL, harmless
5207   }
5208   v = _Event;
5209   _Event = 0;
5210   // see comment at end of os::PlatformEvent::park() below:
5211   OrderAccess::fence();
5212   // If we encounter a nearly simultanous timeout expiry and unpark()
5213   // we return OS_OK indicating we awoke via unpark().
5214   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5215   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5216 }
5217 
5218 void os::PlatformEvent::park() {
5219   // Transitions for _Event:
5220   //   -1 => -1 : illegal
5221   //    1 =>  0 : pass - return immediately
5222   //    0 => -1 : block; then set _Event to 0 before returning
5223 
5224   guarantee(_ParkHandle != NULL, "Invariant");
5225   // Invariant: Only the thread associated with the Event/PlatformEvent
5226   // may call park().
5227   // Consider: use atomic decrement instead of CAS-loop
5228   int v;
5229   for (;;) {
5230     v = _Event;
5231     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5232   }
5233   guarantee((v == 0) || (v == 1), "invariant");
5234   if (v != 0) return;
5235 
5236   // Do this the hard way by blocking ...
5237   // TODO: consider a brief spin here, gated on the success of recent
5238   // spin attempts by this thread.
5239   while (_Event < 0) {
5240     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5241     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5242   }
5243 
5244   // Usually we'll find _Event == 0 at this point, but as
5245   // an optional optimization we clear it, just in case can
5246   // multiple unpark() operations drove _Event up to 1.
5247   _Event = 0;
5248   OrderAccess::fence();
5249   guarantee(_Event >= 0, "invariant");
5250 }
5251 
5252 void os::PlatformEvent::unpark() {
5253   guarantee(_ParkHandle != NULL, "Invariant");
5254 
5255   // Transitions for _Event:
5256   //    0 => 1 : just return
5257   //    1 => 1 : just return
5258   //   -1 => either 0 or 1; must signal target thread
5259   //         That is, we can safely transition _Event from -1 to either
5260   //         0 or 1.
5261   // See also: "Semaphores in Plan 9" by Mullender & Cox
5262   //
5263   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5264   // that it will take two back-to-back park() calls for the owning
5265   // thread to block. This has the benefit of forcing a spurious return
5266   // from the first park() call after an unpark() call which will help
5267   // shake out uses of park() and unpark() without condition variables.
5268 
5269   if (Atomic::xchg(1, &_Event) >= 0) return;
5270 
5271   ::SetEvent(_ParkHandle);
5272 }
5273 
5274 
5275 // JSR166
5276 // -------------------------------------------------------
5277 
5278 // The Windows implementation of Park is very straightforward: Basic
5279 // operations on Win32 Events turn out to have the right semantics to
5280 // use them directly. We opportunistically resuse the event inherited
5281 // from Monitor.
5282 
5283 void Parker::park(bool isAbsolute, jlong time) {
5284   guarantee(_ParkEvent != NULL, "invariant");
5285   // First, demultiplex/decode time arguments
5286   if (time < 0) { // don't wait
5287     return;
5288   } else if (time == 0 && !isAbsolute) {
5289     time = INFINITE;
5290   } else if (isAbsolute) {
5291     time -= os::javaTimeMillis(); // convert to relative time
5292     if (time <= 0) {  // already elapsed
5293       return;
5294     }
5295   } else { // relative
5296     time /= 1000000;  // Must coarsen from nanos to millis
5297     if (time == 0) {  // Wait for the minimal time unit if zero
5298       time = 1;
5299     }
5300   }
5301 
5302   JavaThread* thread = JavaThread::current();
5303 
5304   // Don't wait if interrupted or already triggered
5305   if (thread->is_interrupted(false) ||
5306       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5307     ResetEvent(_ParkEvent);
5308     return;
5309   } else {
5310     ThreadBlockInVM tbivm(thread);
5311     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5312     thread->set_suspend_equivalent();
5313 
5314     WaitForSingleObject(_ParkEvent, time);
5315     ResetEvent(_ParkEvent);
5316 
5317     // If externally suspended while waiting, re-suspend
5318     if (thread->handle_special_suspend_equivalent_condition()) {
5319       thread->java_suspend_self();
5320     }
5321   }
5322 }
5323 
5324 void Parker::unpark() {
5325   guarantee(_ParkEvent != NULL, "invariant");
5326   SetEvent(_ParkEvent);
5327 }
5328 
5329 // Platform Monitor implementation
5330 
5331 // Must already be locked
5332 int os::PlatformMonitor::wait(jlong millis) {
5333   assert(millis >= 0, "negative timeout");
5334   int ret = OS_TIMEOUT;
5335   int status = SleepConditionVariableCS(&_cond, &_mutex,
5336                                         millis == 0 ? INFINITE : millis);
5337   if (status != 0) {
5338     ret = OS_OK;
5339   }
5340   #ifndef PRODUCT
5341   else {
5342     DWORD err = GetLastError();
5343     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5344   }
5345   #endif
5346   return ret;
5347 }
5348 
5349 // Run the specified command in a separate process. Return its exit value,
5350 // or -1 on failure (e.g. can't create a new process).
5351 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5352   STARTUPINFO si;
5353   PROCESS_INFORMATION pi;
5354   DWORD exit_code;
5355 
5356   char * cmd_string;
5357   const char * cmd_prefix = "cmd /C ";
5358   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5359   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5360   if (cmd_string == NULL) {
5361     return -1;
5362   }
5363   cmd_string[0] = '\0';
5364   strcat(cmd_string, cmd_prefix);
5365   strcat(cmd_string, cmd);
5366 
5367   // now replace all '\n' with '&'
5368   char * substring = cmd_string;
5369   while ((substring = strchr(substring, '\n')) != NULL) {
5370     substring[0] = '&';
5371     substring++;
5372   }
5373   memset(&si, 0, sizeof(si));
5374   si.cb = sizeof(si);
5375   memset(&pi, 0, sizeof(pi));
5376   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5377                             cmd_string,    // command line
5378                             NULL,   // process security attribute
5379                             NULL,   // thread security attribute
5380                             TRUE,   // inherits system handles
5381                             0,      // no creation flags
5382                             NULL,   // use parent's environment block
5383                             NULL,   // use parent's starting directory
5384                             &si,    // (in) startup information
5385                             &pi);   // (out) process information
5386 
5387   if (rslt) {
5388     // Wait until child process exits.
5389     WaitForSingleObject(pi.hProcess, INFINITE);
5390 
5391     GetExitCodeProcess(pi.hProcess, &exit_code);
5392 
5393     // Close process and thread handles.
5394     CloseHandle(pi.hProcess);
5395     CloseHandle(pi.hThread);
5396   } else {
5397     exit_code = -1;
5398   }
5399 
5400   FREE_C_HEAP_ARRAY(char, cmd_string);
5401   return (int)exit_code;
5402 }
5403 
5404 bool os::find(address addr, outputStream* st) {
5405   int offset = -1;
5406   bool result = false;
5407   char buf[256];
5408   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5409     st->print(PTR_FORMAT " ", addr);
5410     if (strlen(buf) < sizeof(buf) - 1) {
5411       char* p = strrchr(buf, '\\');
5412       if (p) {
5413         st->print("%s", p + 1);
5414       } else {
5415         st->print("%s", buf);
5416       }
5417     } else {
5418         // The library name is probably truncated. Let's omit the library name.
5419         // See also JDK-8147512.
5420     }
5421     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5422       st->print("::%s + 0x%x", buf, offset);
5423     }
5424     st->cr();
5425     result = true;
5426   }
5427   return result;
5428 }
5429 
5430 static jint initSock() {
5431   WSADATA wsadata;
5432 
5433   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5434     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5435                 ::GetLastError());
5436     return JNI_ERR;
5437   }
5438   return JNI_OK;
5439 }
5440 
5441 struct hostent* os::get_host_by_name(char* name) {
5442   return (struct hostent*)gethostbyname(name);
5443 }
5444 
5445 int os::socket_close(int fd) {
5446   return ::closesocket(fd);
5447 }
5448 
5449 int os::socket(int domain, int type, int protocol) {
5450   return ::socket(domain, type, protocol);
5451 }
5452 
5453 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5454   return ::connect(fd, him, len);
5455 }
5456 
5457 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5458   return ::recv(fd, buf, (int)nBytes, flags);
5459 }
5460 
5461 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5462   return ::send(fd, buf, (int)nBytes, flags);
5463 }
5464 
5465 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5466   return ::send(fd, buf, (int)nBytes, flags);
5467 }
5468 
5469 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5470 #if defined(IA32)
5471   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5472 #elif defined (AMD64)
5473   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5474 #endif
5475 
5476 // returns true if thread could be suspended,
5477 // false otherwise
5478 static bool do_suspend(HANDLE* h) {
5479   if (h != NULL) {
5480     if (SuspendThread(*h) != ~0) {
5481       return true;
5482     }
5483   }
5484   return false;
5485 }
5486 
5487 // resume the thread
5488 // calling resume on an active thread is a no-op
5489 static void do_resume(HANDLE* h) {
5490   if (h != NULL) {
5491     ResumeThread(*h);
5492   }
5493 }
5494 
5495 // retrieve a suspend/resume context capable handle
5496 // from the tid. Caller validates handle return value.
5497 void get_thread_handle_for_extended_context(HANDLE* h,
5498                                             OSThread::thread_id_t tid) {
5499   if (h != NULL) {
5500     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5501   }
5502 }
5503 
5504 // Thread sampling implementation
5505 //
5506 void os::SuspendedThreadTask::internal_do_task() {
5507   CONTEXT    ctxt;
5508   HANDLE     h = NULL;
5509 
5510   // get context capable handle for thread
5511   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5512 
5513   // sanity
5514   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5515     return;
5516   }
5517 
5518   // suspend the thread
5519   if (do_suspend(&h)) {
5520     ctxt.ContextFlags = sampling_context_flags;
5521     // get thread context
5522     GetThreadContext(h, &ctxt);
5523     SuspendedThreadTaskContext context(_thread, &ctxt);
5524     // pass context to Thread Sampling impl
5525     do_task(context);
5526     // resume thread
5527     do_resume(&h);
5528   }
5529 
5530   // close handle
5531   CloseHandle(h);
5532 }
5533 
5534 bool os::start_debugging(char *buf, int buflen) {
5535   int len = (int)strlen(buf);
5536   char *p = &buf[len];
5537 
5538   jio_snprintf(p, buflen-len,
5539              "\n\n"
5540              "Do you want to debug the problem?\n\n"
5541              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5542              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5543              "Otherwise, select 'No' to abort...",
5544              os::current_process_id(), os::current_thread_id());
5545 
5546   bool yes = os::message_box("Unexpected Error", buf);
5547 
5548   if (yes) {
5549     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5550     // exception. If VM is running inside a debugger, the debugger will
5551     // catch the exception. Otherwise, the breakpoint exception will reach
5552     // the default windows exception handler, which can spawn a debugger and
5553     // automatically attach to the dying VM.
5554     os::breakpoint();
5555     yes = false;
5556   }
5557   return yes;
5558 }
5559 
5560 void* os::get_default_process_handle() {
5561   return (void*)GetModuleHandle(NULL);
5562 }
5563 
5564 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5565 // which is used to find statically linked in agents.
5566 // Additionally for windows, takes into account __stdcall names.
5567 // Parameters:
5568 //            sym_name: Symbol in library we are looking for
5569 //            lib_name: Name of library to look in, NULL for shared libs.
5570 //            is_absolute_path == true if lib_name is absolute path to agent
5571 //                                     such as "C:/a/b/L.dll"
5572 //            == false if only the base name of the library is passed in
5573 //               such as "L"
5574 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5575                                     bool is_absolute_path) {
5576   char *agent_entry_name;
5577   size_t len;
5578   size_t name_len;
5579   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5580   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5581   const char *start;
5582 
5583   if (lib_name != NULL) {
5584     len = name_len = strlen(lib_name);
5585     if (is_absolute_path) {
5586       // Need to strip path, prefix and suffix
5587       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5588         lib_name = ++start;
5589       } else {
5590         // Need to check for drive prefix
5591         if ((start = strchr(lib_name, ':')) != NULL) {
5592           lib_name = ++start;
5593         }
5594       }
5595       if (len <= (prefix_len + suffix_len)) {
5596         return NULL;
5597       }
5598       lib_name += prefix_len;
5599       name_len = strlen(lib_name) - suffix_len;
5600     }
5601   }
5602   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5603   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5604   if (agent_entry_name == NULL) {
5605     return NULL;
5606   }
5607   if (lib_name != NULL) {
5608     const char *p = strrchr(sym_name, '@');
5609     if (p != NULL && p != sym_name) {
5610       // sym_name == _Agent_OnLoad@XX
5611       strncpy(agent_entry_name, sym_name, (p - sym_name));
5612       agent_entry_name[(p-sym_name)] = '\0';
5613       // agent_entry_name == _Agent_OnLoad
5614       strcat(agent_entry_name, "_");
5615       strncat(agent_entry_name, lib_name, name_len);
5616       strcat(agent_entry_name, p);
5617       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5618     } else {
5619       strcpy(agent_entry_name, sym_name);
5620       strcat(agent_entry_name, "_");
5621       strncat(agent_entry_name, lib_name, name_len);
5622     }
5623   } else {
5624     strcpy(agent_entry_name, sym_name);
5625   }
5626   return agent_entry_name;
5627 }
5628 
5629 #ifndef PRODUCT
5630 
5631 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5632 // contiguous memory block at a particular address.
5633 // The test first tries to find a good approximate address to allocate at by using the same
5634 // method to allocate some memory at any address. The test then tries to allocate memory in
5635 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5636 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5637 // the previously allocated memory is available for allocation. The only actual failure
5638 // that is reported is when the test tries to allocate at a particular location but gets a
5639 // different valid one. A NULL return value at this point is not considered an error but may
5640 // be legitimate.
5641 void TestReserveMemorySpecial_test() {
5642   if (!UseLargePages) {
5643     return;
5644   }
5645   // save current value of globals
5646   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5647   bool old_use_numa_interleaving = UseNUMAInterleaving;
5648 
5649   // set globals to make sure we hit the correct code path
5650   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5651 
5652   // do an allocation at an address selected by the OS to get a good one.
5653   const size_t large_allocation_size = os::large_page_size() * 4;
5654   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5655   if (result == NULL) {
5656   } else {
5657     os::release_memory_special(result, large_allocation_size);
5658 
5659     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5660     // we managed to get it once.
5661     const size_t expected_allocation_size = os::large_page_size();
5662     char* expected_location = result + os::large_page_size();
5663     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5664     if (actual_location == NULL) {
5665     } else {
5666       // release memory
5667       os::release_memory_special(actual_location, expected_allocation_size);
5668       // only now check, after releasing any memory to avoid any leaks.
5669       assert(actual_location == expected_location,
5670              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5671              expected_location, expected_allocation_size, actual_location);
5672     }
5673   }
5674 
5675   // restore globals
5676   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5677   UseNUMAInterleaving = old_use_numa_interleaving;
5678 }
5679 #endif // PRODUCT
5680 
5681 /*
5682   All the defined signal names for Windows.
5683 
5684   NOTE that not all of these names are accepted by FindSignal!
5685 
5686   For various reasons some of these may be rejected at runtime.
5687 
5688   Here are the names currently accepted by a user of sun.misc.Signal with
5689   1.4.1 (ignoring potential interaction with use of chaining, etc):
5690 
5691      (LIST TBD)
5692 
5693 */
5694 int os::get_signal_number(const char* name) {
5695   static const struct {
5696     const char* name;
5697     int         number;
5698   } siglabels [] =
5699     // derived from version 6.0 VC98/include/signal.h
5700   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5701   "FPE",        SIGFPE,         // floating point exception
5702   "SEGV",       SIGSEGV,        // segment violation
5703   "INT",        SIGINT,         // interrupt
5704   "TERM",       SIGTERM,        // software term signal from kill
5705   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5706   "ILL",        SIGILL};        // illegal instruction
5707   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5708     if (strcmp(name, siglabels[i].name) == 0) {
5709       return siglabels[i].number;
5710     }
5711   }
5712   return -1;
5713 }
5714 
5715 // Fast current thread access
5716 
5717 int os::win32::_thread_ptr_offset = 0;
5718 
5719 static void call_wrapper_dummy() {}
5720 
5721 // We need to call the os_exception_wrapper once so that it sets
5722 // up the offset from FS of the thread pointer.
5723 void os::win32::initialize_thread_ptr_offset() {
5724   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5725                            NULL, NULL, NULL, NULL);
5726 }
5727 
5728 bool os::supports_map_sync() {
5729   return false;
5730 }