1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/allocation.inline.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "services/attachListener.hpp"
  67 #include "services/memTracker.hpp"
  68 #include "services/runtimeService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/growableArray.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 #include "symbolengine.hpp"
  77 #include "windbghelp.hpp"
  78 
  79 
  80 #ifdef _DEBUG
  81 #include <crtdbg.h>
  82 #endif
  83 
  84 
  85 #include <windows.h>
  86 #include <sys/types.h>
  87 #include <sys/stat.h>
  88 #include <sys/timeb.h>
  89 #include <objidl.h>
  90 #include <shlobj.h>
  91 
  92 #include <malloc.h>
  93 #include <signal.h>
  94 #include <direct.h>
  95 #include <errno.h>
  96 #include <fcntl.h>
  97 #include <io.h>
  98 #include <process.h>              // For _beginthreadex(), _endthreadex()
  99 #include <imagehlp.h>             // For os::dll_address_to_function_name
 100 // for enumerating dll libraries
 101 #include <vdmdbg.h>
 102 #include <psapi.h>
 103 #include <mmsystem.h>
 104 #include <winsock2.h>
 105 
 106 // for timer info max values which include all bits
 107 #define ALL_64_BITS CONST64(-1)
 108 
 109 // For DLL loading/load error detection
 110 // Values of PE COFF
 111 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 112 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 113 
 114 static HANDLE main_process;
 115 static HANDLE main_thread;
 116 static int    main_thread_id;
 117 
 118 static FILETIME process_creation_time;
 119 static FILETIME process_exit_time;
 120 static FILETIME process_user_time;
 121 static FILETIME process_kernel_time;
 122 
 123 #ifdef _M_AMD64
 124   #define __CPU__ amd64
 125 #else
 126   #define __CPU__ i486
 127 #endif
 128 
 129 #if INCLUDE_AOT
 130 PVOID  topLevelVectoredExceptionHandler = NULL;
 131 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 132 #endif
 133 
 134 // save DLL module handle, used by GetModuleFileName
 135 
 136 HINSTANCE vm_lib_handle;
 137 
 138 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 139   switch (reason) {
 140   case DLL_PROCESS_ATTACH:
 141     vm_lib_handle = hinst;
 142     if (ForceTimeHighResolution) {
 143       timeBeginPeriod(1L);
 144     }
 145     WindowsDbgHelp::pre_initialize();
 146     SymbolEngine::pre_initialize();
 147     break;
 148   case DLL_PROCESS_DETACH:
 149     if (ForceTimeHighResolution) {
 150       timeEndPeriod(1L);
 151     }
 152 #if INCLUDE_AOT
 153     if (topLevelVectoredExceptionHandler != NULL) {
 154       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 155       topLevelVectoredExceptionHandler = NULL;
 156     }
 157 #endif
 158     break;
 159   default:
 160     break;
 161   }
 162   return true;
 163 }
 164 
 165 static inline double fileTimeAsDouble(FILETIME* time) {
 166   const double high  = (double) ((unsigned int) ~0);
 167   const double split = 10000000.0;
 168   double result = (time->dwLowDateTime / split) +
 169                    time->dwHighDateTime * (high/split);
 170   return result;
 171 }
 172 
 173 // Implementation of os
 174 
 175 bool os::unsetenv(const char* name) {
 176   assert(name != NULL, "Null pointer");
 177   return (SetEnvironmentVariable(name, NULL) == TRUE);
 178 }
 179 
 180 // No setuid programs under Windows.
 181 bool os::have_special_privileges() {
 182   return false;
 183 }
 184 
 185 
 186 // This method is  a periodic task to check for misbehaving JNI applications
 187 // under CheckJNI, we can add any periodic checks here.
 188 // For Windows at the moment does nothing
 189 void os::run_periodic_checks() {
 190   return;
 191 }
 192 
 193 // previous UnhandledExceptionFilter, if there is one
 194 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 195 
 196 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 197 
 198 void os::init_system_properties_values() {
 199   // sysclasspath, java_home, dll_dir
 200   {
 201     char *home_path;
 202     char *dll_path;
 203     char *pslash;
 204     const char *bin = "\\bin";
 205     char home_dir[MAX_PATH + 1];
 206     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 207 
 208     if (alt_home_dir != NULL)  {
 209       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 210       home_dir[MAX_PATH] = '\0';
 211     } else {
 212       os::jvm_path(home_dir, sizeof(home_dir));
 213       // Found the full path to jvm.dll.
 214       // Now cut the path to <java_home>/jre if we can.
 215       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 216       pslash = strrchr(home_dir, '\\');
 217       if (pslash != NULL) {
 218         *pslash = '\0';                   // get rid of \{client|server}
 219         pslash = strrchr(home_dir, '\\');
 220         if (pslash != NULL) {
 221           *pslash = '\0';                 // get rid of \bin
 222         }
 223       }
 224     }
 225 
 226     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 227     strcpy(home_path, home_dir);
 228     Arguments::set_java_home(home_path);
 229     FREE_C_HEAP_ARRAY(char, home_path);
 230 
 231     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 232                                 mtInternal);
 233     strcpy(dll_path, home_dir);
 234     strcat(dll_path, bin);
 235     Arguments::set_dll_dir(dll_path);
 236     FREE_C_HEAP_ARRAY(char, dll_path);
 237 
 238     if (!set_boot_path('\\', ';')) {
 239       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 240     }
 241   }
 242 
 243 // library_path
 244 #define EXT_DIR "\\lib\\ext"
 245 #define BIN_DIR "\\bin"
 246 #define PACKAGE_DIR "\\Sun\\Java"
 247   {
 248     // Win32 library search order (See the documentation for LoadLibrary):
 249     //
 250     // 1. The directory from which application is loaded.
 251     // 2. The system wide Java Extensions directory (Java only)
 252     // 3. System directory (GetSystemDirectory)
 253     // 4. Windows directory (GetWindowsDirectory)
 254     // 5. The PATH environment variable
 255     // 6. The current directory
 256 
 257     char *library_path;
 258     char tmp[MAX_PATH];
 259     char *path_str = ::getenv("PATH");
 260 
 261     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 262                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 263 
 264     library_path[0] = '\0';
 265 
 266     GetModuleFileName(NULL, tmp, sizeof(tmp));
 267     *(strrchr(tmp, '\\')) = '\0';
 268     strcat(library_path, tmp);
 269 
 270     GetWindowsDirectory(tmp, sizeof(tmp));
 271     strcat(library_path, ";");
 272     strcat(library_path, tmp);
 273     strcat(library_path, PACKAGE_DIR BIN_DIR);
 274 
 275     GetSystemDirectory(tmp, sizeof(tmp));
 276     strcat(library_path, ";");
 277     strcat(library_path, tmp);
 278 
 279     GetWindowsDirectory(tmp, sizeof(tmp));
 280     strcat(library_path, ";");
 281     strcat(library_path, tmp);
 282 
 283     if (path_str) {
 284       strcat(library_path, ";");
 285       strcat(library_path, path_str);
 286     }
 287 
 288     strcat(library_path, ";.");
 289 
 290     Arguments::set_library_path(library_path);
 291     FREE_C_HEAP_ARRAY(char, library_path);
 292   }
 293 
 294   // Default extensions directory
 295   {
 296     char path[MAX_PATH];
 297     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 298     GetWindowsDirectory(path, MAX_PATH);
 299     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 300             path, PACKAGE_DIR, EXT_DIR);
 301     Arguments::set_ext_dirs(buf);
 302   }
 303   #undef EXT_DIR
 304   #undef BIN_DIR
 305   #undef PACKAGE_DIR
 306 
 307 #ifndef _WIN64
 308   // set our UnhandledExceptionFilter and save any previous one
 309   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 310 #endif
 311 
 312   // Done
 313   return;
 314 }
 315 
 316 void os::breakpoint() {
 317   DebugBreak();
 318 }
 319 
 320 // Invoked from the BREAKPOINT Macro
 321 extern "C" void breakpoint() {
 322   os::breakpoint();
 323 }
 324 
 325 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 326 // So far, this method is only used by Native Memory Tracking, which is
 327 // only supported on Windows XP or later.
 328 //
 329 int os::get_native_stack(address* stack, int frames, int toSkip) {
 330   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 331   for (int index = captured; index < frames; index ++) {
 332     stack[index] = NULL;
 333   }
 334   return captured;
 335 }
 336 
 337 
 338 // os::current_stack_base()
 339 //
 340 //   Returns the base of the stack, which is the stack's
 341 //   starting address.  This function must be called
 342 //   while running on the stack of the thread being queried.
 343 
 344 address os::current_stack_base() {
 345   MEMORY_BASIC_INFORMATION minfo;
 346   address stack_bottom;
 347   size_t stack_size;
 348 
 349   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 350   stack_bottom =  (address)minfo.AllocationBase;
 351   stack_size = minfo.RegionSize;
 352 
 353   // Add up the sizes of all the regions with the same
 354   // AllocationBase.
 355   while (1) {
 356     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 357     if (stack_bottom == (address)minfo.AllocationBase) {
 358       stack_size += minfo.RegionSize;
 359     } else {
 360       break;
 361     }
 362   }
 363   return stack_bottom + stack_size;
 364 }
 365 
 366 size_t os::current_stack_size() {
 367   size_t sz;
 368   MEMORY_BASIC_INFORMATION minfo;
 369   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 370   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 371   return sz;
 372 }
 373 
 374 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 375   MEMORY_BASIC_INFORMATION minfo;
 376   committed_start = NULL;
 377   committed_size = 0;
 378   address top = start + size;
 379   const address start_addr = start;
 380   while (start < top) {
 381     VirtualQuery(start, &minfo, sizeof(minfo));
 382     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 383       if (committed_start != NULL) {
 384         break;
 385       }
 386     } else {  // committed
 387       if (committed_start == NULL) {
 388         committed_start = start;
 389       }
 390       size_t offset = start - (address)minfo.BaseAddress;
 391       committed_size += minfo.RegionSize - offset;
 392     }
 393     start = (address)minfo.BaseAddress + minfo.RegionSize;
 394   }
 395 
 396   if (committed_start == NULL) {
 397     assert(committed_size == 0, "Sanity");
 398     return false;
 399   } else {
 400     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 401     // current region may go beyond the limit, trim to the limit
 402     committed_size = MIN2(committed_size, size_t(top - committed_start));
 403     return true;
 404   }
 405 }
 406 
 407 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 408   const struct tm* time_struct_ptr = localtime(clock);
 409   if (time_struct_ptr != NULL) {
 410     *res = *time_struct_ptr;
 411     return res;
 412   }
 413   return NULL;
 414 }
 415 
 416 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 417   const struct tm* time_struct_ptr = gmtime(clock);
 418   if (time_struct_ptr != NULL) {
 419     *res = *time_struct_ptr;
 420     return res;
 421   }
 422   return NULL;
 423 }
 424 
 425 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 426 
 427 // Thread start routine for all newly created threads
 428 static unsigned __stdcall thread_native_entry(Thread* thread) {
 429 
 430   thread->record_stack_base_and_size();
 431 
 432   // Try to randomize the cache line index of hot stack frames.
 433   // This helps when threads of the same stack traces evict each other's
 434   // cache lines. The threads can be either from the same JVM instance, or
 435   // from different JVM instances. The benefit is especially true for
 436   // processors with hyperthreading technology.
 437   static int counter = 0;
 438   int pid = os::current_process_id();
 439   _alloca(((pid ^ counter++) & 7) * 128);
 440 
 441   thread->initialize_thread_current();
 442 
 443   OSThread* osthr = thread->osthread();
 444   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 445 
 446   if (UseNUMA) {
 447     int lgrp_id = os::numa_get_group_id();
 448     if (lgrp_id != -1) {
 449       thread->set_lgrp_id(lgrp_id);
 450     }
 451   }
 452 
 453   // Diagnostic code to investigate JDK-6573254
 454   int res = 30115;  // non-java thread
 455   if (thread->is_Java_thread()) {
 456     res = 20115;    // java thread
 457   }
 458 
 459   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 460 
 461   // Install a win32 structured exception handler around every thread created
 462   // by VM, so VM can generate error dump when an exception occurred in non-
 463   // Java thread (e.g. VM thread).
 464   __try {
 465     thread->call_run();
 466   } __except(topLevelExceptionFilter(
 467                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 468     // Nothing to do.
 469   }
 470 
 471   // Note: at this point the thread object may already have deleted itself.
 472   // Do not dereference it from here on out.
 473 
 474   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 475 
 476   // One less thread is executing
 477   // When the VMThread gets here, the main thread may have already exited
 478   // which frees the CodeHeap containing the Atomic::add code
 479   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 480     Atomic::dec(&os::win32::_os_thread_count);
 481   }
 482 
 483   // Thread must not return from exit_process_or_thread(), but if it does,
 484   // let it proceed to exit normally
 485   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 486 }
 487 
 488 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 489                                   int thread_id) {
 490   // Allocate the OSThread object
 491   OSThread* osthread = new OSThread(NULL, NULL);
 492   if (osthread == NULL) return NULL;
 493 
 494   // Initialize the JDK library's interrupt event.
 495   // This should really be done when OSThread is constructed,
 496   // but there is no way for a constructor to report failure to
 497   // allocate the event.
 498   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 499   if (interrupt_event == NULL) {
 500     delete osthread;
 501     return NULL;
 502   }
 503   osthread->set_interrupt_event(interrupt_event);
 504 
 505   // Store info on the Win32 thread into the OSThread
 506   osthread->set_thread_handle(thread_handle);
 507   osthread->set_thread_id(thread_id);
 508 
 509   if (UseNUMA) {
 510     int lgrp_id = os::numa_get_group_id();
 511     if (lgrp_id != -1) {
 512       thread->set_lgrp_id(lgrp_id);
 513     }
 514   }
 515 
 516   // Initial thread state is INITIALIZED, not SUSPENDED
 517   osthread->set_state(INITIALIZED);
 518 
 519   return osthread;
 520 }
 521 
 522 
 523 bool os::create_attached_thread(JavaThread* thread) {
 524 #ifdef ASSERT
 525   thread->verify_not_published();
 526 #endif
 527   HANDLE thread_h;
 528   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 529                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 530     fatal("DuplicateHandle failed\n");
 531   }
 532   OSThread* osthread = create_os_thread(thread, thread_h,
 533                                         (int)current_thread_id());
 534   if (osthread == NULL) {
 535     return false;
 536   }
 537 
 538   // Initial thread state is RUNNABLE
 539   osthread->set_state(RUNNABLE);
 540 
 541   thread->set_osthread(osthread);
 542 
 543   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 544     os::current_thread_id());
 545 
 546   return true;
 547 }
 548 
 549 bool os::create_main_thread(JavaThread* thread) {
 550 #ifdef ASSERT
 551   thread->verify_not_published();
 552 #endif
 553   if (_starting_thread == NULL) {
 554     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 555     if (_starting_thread == NULL) {
 556       return false;
 557     }
 558   }
 559 
 560   // The primordial thread is runnable from the start)
 561   _starting_thread->set_state(RUNNABLE);
 562 
 563   thread->set_osthread(_starting_thread);
 564   return true;
 565 }
 566 
 567 // Helper function to trace _beginthreadex attributes,
 568 //  similar to os::Posix::describe_pthread_attr()
 569 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 570                                                size_t stacksize, unsigned initflag) {
 571   stringStream ss(buf, buflen);
 572   if (stacksize == 0) {
 573     ss.print("stacksize: default, ");
 574   } else {
 575     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 576   }
 577   ss.print("flags: ");
 578   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 579   #define ALL(X) \
 580     X(CREATE_SUSPENDED) \
 581     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 582   ALL(PRINT_FLAG)
 583   #undef ALL
 584   #undef PRINT_FLAG
 585   return buf;
 586 }
 587 
 588 // Allocate and initialize a new OSThread
 589 bool os::create_thread(Thread* thread, ThreadType thr_type,
 590                        size_t stack_size) {
 591   unsigned thread_id;
 592 
 593   // Allocate the OSThread object
 594   OSThread* osthread = new OSThread(NULL, NULL);
 595   if (osthread == NULL) {
 596     return false;
 597   }
 598 
 599   // Initialize the JDK library's interrupt event.
 600   // This should really be done when OSThread is constructed,
 601   // but there is no way for a constructor to report failure to
 602   // allocate the event.
 603   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 604   if (interrupt_event == NULL) {
 605     delete osthread;
 606     return false;
 607   }
 608   osthread->set_interrupt_event(interrupt_event);
 609   // We don't call set_interrupted(false) as it will trip the assert in there
 610   // as we are not operating on the current thread. We don't need to call it
 611   // because the initial state is already correct.
 612 
 613   thread->set_osthread(osthread);
 614 
 615   if (stack_size == 0) {
 616     switch (thr_type) {
 617     case os::java_thread:
 618       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 619       if (JavaThread::stack_size_at_create() > 0) {
 620         stack_size = JavaThread::stack_size_at_create();
 621       }
 622       break;
 623     case os::compiler_thread:
 624       if (CompilerThreadStackSize > 0) {
 625         stack_size = (size_t)(CompilerThreadStackSize * K);
 626         break;
 627       } // else fall through:
 628         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 629     case os::vm_thread:
 630     case os::pgc_thread:
 631     case os::cgc_thread:
 632     case os::watcher_thread:
 633       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 634       break;
 635     }
 636   }
 637 
 638   // Create the Win32 thread
 639   //
 640   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 641   // does not specify stack size. Instead, it specifies the size of
 642   // initially committed space. The stack size is determined by
 643   // PE header in the executable. If the committed "stack_size" is larger
 644   // than default value in the PE header, the stack is rounded up to the
 645   // nearest multiple of 1MB. For example if the launcher has default
 646   // stack size of 320k, specifying any size less than 320k does not
 647   // affect the actual stack size at all, it only affects the initial
 648   // commitment. On the other hand, specifying 'stack_size' larger than
 649   // default value may cause significant increase in memory usage, because
 650   // not only the stack space will be rounded up to MB, but also the
 651   // entire space is committed upfront.
 652   //
 653   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 654   // for CreateThread() that can treat 'stack_size' as stack size. However we
 655   // are not supposed to call CreateThread() directly according to MSDN
 656   // document because JVM uses C runtime library. The good news is that the
 657   // flag appears to work with _beginthredex() as well.
 658 
 659   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 660   HANDLE thread_handle =
 661     (HANDLE)_beginthreadex(NULL,
 662                            (unsigned)stack_size,
 663                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 664                            thread,
 665                            initflag,
 666                            &thread_id);
 667 
 668   char buf[64];
 669   if (thread_handle != NULL) {
 670     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 671       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 672   } else {
 673     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 674       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 675     // Log some OS information which might explain why creating the thread failed.
 676     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 677     LogStream st(Log(os, thread)::info());
 678     os::print_memory_info(&st);
 679   }
 680 
 681   if (thread_handle == NULL) {
 682     // Need to clean up stuff we've allocated so far
 683     thread->set_osthread(NULL);
 684     delete osthread;
 685     return false;
 686   }
 687 
 688   Atomic::inc(&os::win32::_os_thread_count);
 689 
 690   // Store info on the Win32 thread into the OSThread
 691   osthread->set_thread_handle(thread_handle);
 692   osthread->set_thread_id(thread_id);
 693 
 694   // Initial thread state is INITIALIZED, not SUSPENDED
 695   osthread->set_state(INITIALIZED);
 696 
 697   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 698   return true;
 699 }
 700 
 701 
 702 // Free Win32 resources related to the OSThread
 703 void os::free_thread(OSThread* osthread) {
 704   assert(osthread != NULL, "osthread not set");
 705 
 706   // We are told to free resources of the argument thread,
 707   // but we can only really operate on the current thread.
 708   assert(Thread::current()->osthread() == osthread,
 709          "os::free_thread but not current thread");
 710 
 711   CloseHandle(osthread->thread_handle());
 712   delete osthread;
 713 }
 714 
 715 static jlong first_filetime;
 716 static jlong initial_performance_count;
 717 static jlong performance_frequency;
 718 
 719 
 720 jlong as_long(LARGE_INTEGER x) {
 721   jlong result = 0; // initialization to avoid warning
 722   set_high(&result, x.HighPart);
 723   set_low(&result, x.LowPart);
 724   return result;
 725 }
 726 
 727 
 728 jlong os::elapsed_counter() {
 729   LARGE_INTEGER count;
 730   QueryPerformanceCounter(&count);
 731   return as_long(count) - initial_performance_count;
 732 }
 733 
 734 
 735 jlong os::elapsed_frequency() {
 736   return performance_frequency;
 737 }
 738 
 739 
 740 julong os::available_memory() {
 741   return win32::available_memory();
 742 }
 743 
 744 julong os::win32::available_memory() {
 745   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 746   // value if total memory is larger than 4GB
 747   MEMORYSTATUSEX ms;
 748   ms.dwLength = sizeof(ms);
 749   GlobalMemoryStatusEx(&ms);
 750 
 751   return (julong)ms.ullAvailPhys;
 752 }
 753 
 754 julong os::physical_memory() {
 755   return win32::physical_memory();
 756 }
 757 
 758 bool os::has_allocatable_memory_limit(julong* limit) {
 759   MEMORYSTATUSEX ms;
 760   ms.dwLength = sizeof(ms);
 761   GlobalMemoryStatusEx(&ms);
 762 #ifdef _LP64
 763   *limit = (julong)ms.ullAvailVirtual;
 764   return true;
 765 #else
 766   // Limit to 1400m because of the 2gb address space wall
 767   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 768   return true;
 769 #endif
 770 }
 771 
 772 int os::active_processor_count() {
 773   // User has overridden the number of active processors
 774   if (ActiveProcessorCount > 0) {
 775     log_trace(os)("active_processor_count: "
 776                   "active processor count set by user : %d",
 777                   ActiveProcessorCount);
 778     return ActiveProcessorCount;
 779   }
 780 
 781   DWORD_PTR lpProcessAffinityMask = 0;
 782   DWORD_PTR lpSystemAffinityMask = 0;
 783   int proc_count = processor_count();
 784   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 785       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 786     // Nof active processors is number of bits in process affinity mask
 787     int bitcount = 0;
 788     while (lpProcessAffinityMask != 0) {
 789       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 790       bitcount++;
 791     }
 792     return bitcount;
 793   } else {
 794     return proc_count;
 795   }
 796 }
 797 
 798 uint os::processor_id() {
 799   return (uint)GetCurrentProcessorNumber();
 800 }
 801 
 802 void os::set_native_thread_name(const char *name) {
 803 
 804   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 805   //
 806   // Note that unfortunately this only works if the process
 807   // is already attached to a debugger; debugger must observe
 808   // the exception below to show the correct name.
 809 
 810   // If there is no debugger attached skip raising the exception
 811   if (!IsDebuggerPresent()) {
 812     return;
 813   }
 814 
 815   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 816   struct {
 817     DWORD dwType;     // must be 0x1000
 818     LPCSTR szName;    // pointer to name (in user addr space)
 819     DWORD dwThreadID; // thread ID (-1=caller thread)
 820     DWORD dwFlags;    // reserved for future use, must be zero
 821   } info;
 822 
 823   info.dwType = 0x1000;
 824   info.szName = name;
 825   info.dwThreadID = -1;
 826   info.dwFlags = 0;
 827 
 828   __try {
 829     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 830   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 831 }
 832 
 833 bool os::distribute_processes(uint length, uint* distribution) {
 834   // Not yet implemented.
 835   return false;
 836 }
 837 
 838 bool os::bind_to_processor(uint processor_id) {
 839   // Not yet implemented.
 840   return false;
 841 }
 842 
 843 void os::win32::initialize_performance_counter() {
 844   LARGE_INTEGER count;
 845   QueryPerformanceFrequency(&count);
 846   performance_frequency = as_long(count);
 847   QueryPerformanceCounter(&count);
 848   initial_performance_count = as_long(count);
 849 }
 850 
 851 
 852 double os::elapsedTime() {
 853   return (double) elapsed_counter() / (double) elapsed_frequency();
 854 }
 855 
 856 
 857 // Windows format:
 858 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 859 // Java format:
 860 //   Java standards require the number of milliseconds since 1/1/1970
 861 
 862 // Constant offset - calculated using offset()
 863 static jlong  _offset   = 116444736000000000;
 864 // Fake time counter for reproducible results when debugging
 865 static jlong  fake_time = 0;
 866 
 867 #ifdef ASSERT
 868 // Just to be safe, recalculate the offset in debug mode
 869 static jlong _calculated_offset = 0;
 870 static int   _has_calculated_offset = 0;
 871 
 872 jlong offset() {
 873   if (_has_calculated_offset) return _calculated_offset;
 874   SYSTEMTIME java_origin;
 875   java_origin.wYear          = 1970;
 876   java_origin.wMonth         = 1;
 877   java_origin.wDayOfWeek     = 0; // ignored
 878   java_origin.wDay           = 1;
 879   java_origin.wHour          = 0;
 880   java_origin.wMinute        = 0;
 881   java_origin.wSecond        = 0;
 882   java_origin.wMilliseconds  = 0;
 883   FILETIME jot;
 884   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 885     fatal("Error = %d\nWindows error", GetLastError());
 886   }
 887   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 888   _has_calculated_offset = 1;
 889   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 890   return _calculated_offset;
 891 }
 892 #else
 893 jlong offset() {
 894   return _offset;
 895 }
 896 #endif
 897 
 898 jlong windows_to_java_time(FILETIME wt) {
 899   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 900   return (a - offset()) / 10000;
 901 }
 902 
 903 // Returns time ticks in (10th of micro seconds)
 904 jlong windows_to_time_ticks(FILETIME wt) {
 905   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 906   return (a - offset());
 907 }
 908 
 909 FILETIME java_to_windows_time(jlong l) {
 910   jlong a = (l * 10000) + offset();
 911   FILETIME result;
 912   result.dwHighDateTime = high(a);
 913   result.dwLowDateTime  = low(a);
 914   return result;
 915 }
 916 
 917 bool os::supports_vtime() { return true; }
 918 bool os::enable_vtime() { return false; }
 919 bool os::vtime_enabled() { return false; }
 920 
 921 double os::elapsedVTime() {
 922   FILETIME created;
 923   FILETIME exited;
 924   FILETIME kernel;
 925   FILETIME user;
 926   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 927     // the resolution of windows_to_java_time() should be sufficient (ms)
 928     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 929   } else {
 930     return elapsedTime();
 931   }
 932 }
 933 
 934 jlong os::javaTimeMillis() {
 935   FILETIME wt;
 936   GetSystemTimeAsFileTime(&wt);
 937   return windows_to_java_time(wt);
 938 }
 939 
 940 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 941   FILETIME wt;
 942   GetSystemTimeAsFileTime(&wt);
 943   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 944   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 945   seconds = secs;
 946   nanos = jlong(ticks - (secs*10000000)) * 100;
 947 }
 948 
 949 jlong os::javaTimeNanos() {
 950     LARGE_INTEGER current_count;
 951     QueryPerformanceCounter(&current_count);
 952     double current = as_long(current_count);
 953     double freq = performance_frequency;
 954     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 955     return time;
 956 }
 957 
 958 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 959   jlong freq = performance_frequency;
 960   if (freq < NANOSECS_PER_SEC) {
 961     // the performance counter is 64 bits and we will
 962     // be multiplying it -- so no wrap in 64 bits
 963     info_ptr->max_value = ALL_64_BITS;
 964   } else if (freq > NANOSECS_PER_SEC) {
 965     // use the max value the counter can reach to
 966     // determine the max value which could be returned
 967     julong max_counter = (julong)ALL_64_BITS;
 968     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 969   } else {
 970     // the performance counter is 64 bits and we will
 971     // be using it directly -- so no wrap in 64 bits
 972     info_ptr->max_value = ALL_64_BITS;
 973   }
 974 
 975   // using a counter, so no skipping
 976   info_ptr->may_skip_backward = false;
 977   info_ptr->may_skip_forward = false;
 978 
 979   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 980 }
 981 
 982 char* os::local_time_string(char *buf, size_t buflen) {
 983   SYSTEMTIME st;
 984   GetLocalTime(&st);
 985   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 986                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 987   return buf;
 988 }
 989 
 990 bool os::getTimesSecs(double* process_real_time,
 991                       double* process_user_time,
 992                       double* process_system_time) {
 993   HANDLE h_process = GetCurrentProcess();
 994   FILETIME create_time, exit_time, kernel_time, user_time;
 995   BOOL result = GetProcessTimes(h_process,
 996                                 &create_time,
 997                                 &exit_time,
 998                                 &kernel_time,
 999                                 &user_time);
1000   if (result != 0) {
1001     FILETIME wt;
1002     GetSystemTimeAsFileTime(&wt);
1003     jlong rtc_millis = windows_to_java_time(wt);
1004     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
1005     *process_user_time =
1006       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1007     *process_system_time =
1008       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1009     return true;
1010   } else {
1011     return false;
1012   }
1013 }
1014 
1015 void os::shutdown() {
1016   // allow PerfMemory to attempt cleanup of any persistent resources
1017   perfMemory_exit();
1018 
1019   // flush buffered output, finish log files
1020   ostream_abort();
1021 
1022   // Check for abort hook
1023   abort_hook_t abort_hook = Arguments::abort_hook();
1024   if (abort_hook != NULL) {
1025     abort_hook();
1026   }
1027 }
1028 
1029 
1030 static HANDLE dumpFile = NULL;
1031 
1032 // Check if dump file can be created.
1033 void os::check_dump_limit(char* buffer, size_t buffsz) {
1034   bool status = true;
1035   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1036     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1037     status = false;
1038   }
1039 
1040 #ifndef ASSERT
1041   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1042     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1043     status = false;
1044   }
1045 #endif
1046 
1047   if (status) {
1048     const char* cwd = get_current_directory(NULL, 0);
1049     int pid = current_process_id();
1050     if (cwd != NULL) {
1051       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1052     } else {
1053       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1054     }
1055 
1056     if (dumpFile == NULL &&
1057        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1058                  == INVALID_HANDLE_VALUE) {
1059       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1060       status = false;
1061     }
1062   }
1063   VMError::record_coredump_status(buffer, status);
1064 }
1065 
1066 void os::abort(bool dump_core, void* siginfo, const void* context) {
1067   EXCEPTION_POINTERS ep;
1068   MINIDUMP_EXCEPTION_INFORMATION mei;
1069   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1070 
1071   HANDLE hProcess = GetCurrentProcess();
1072   DWORD processId = GetCurrentProcessId();
1073   MINIDUMP_TYPE dumpType;
1074 
1075   shutdown();
1076   if (!dump_core || dumpFile == NULL) {
1077     if (dumpFile != NULL) {
1078       CloseHandle(dumpFile);
1079     }
1080     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1081   }
1082 
1083   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1084     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1085 
1086   if (siginfo != NULL && context != NULL) {
1087     ep.ContextRecord = (PCONTEXT) context;
1088     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1089 
1090     mei.ThreadId = GetCurrentThreadId();
1091     mei.ExceptionPointers = &ep;
1092     pmei = &mei;
1093   } else {
1094     pmei = NULL;
1095   }
1096 
1097   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1098   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1099   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1100       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1101     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1102   }
1103   CloseHandle(dumpFile);
1104   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1105 }
1106 
1107 // Die immediately, no exit hook, no abort hook, no cleanup.
1108 void os::die() {
1109   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1110 }
1111 
1112 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1113 //  * dirent_md.c       1.15 00/02/02
1114 //
1115 // The declarations for DIR and struct dirent are in jvm_win32.h.
1116 
1117 // Caller must have already run dirname through JVM_NativePath, which removes
1118 // duplicate slashes and converts all instances of '/' into '\\'.
1119 
1120 DIR * os::opendir(const char *dirname) {
1121   assert(dirname != NULL, "just checking");   // hotspot change
1122   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1123   DWORD fattr;                                // hotspot change
1124   char alt_dirname[4] = { 0, 0, 0, 0 };
1125 
1126   if (dirp == 0) {
1127     errno = ENOMEM;
1128     return 0;
1129   }
1130 
1131   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1132   // as a directory in FindFirstFile().  We detect this case here and
1133   // prepend the current drive name.
1134   //
1135   if (dirname[1] == '\0' && dirname[0] == '\\') {
1136     alt_dirname[0] = _getdrive() + 'A' - 1;
1137     alt_dirname[1] = ':';
1138     alt_dirname[2] = '\\';
1139     alt_dirname[3] = '\0';
1140     dirname = alt_dirname;
1141   }
1142 
1143   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1144   if (dirp->path == 0) {
1145     free(dirp);
1146     errno = ENOMEM;
1147     return 0;
1148   }
1149   strcpy(dirp->path, dirname);
1150 
1151   fattr = GetFileAttributes(dirp->path);
1152   if (fattr == 0xffffffff) {
1153     free(dirp->path);
1154     free(dirp);
1155     errno = ENOENT;
1156     return 0;
1157   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1158     free(dirp->path);
1159     free(dirp);
1160     errno = ENOTDIR;
1161     return 0;
1162   }
1163 
1164   // Append "*.*", or possibly "\\*.*", to path
1165   if (dirp->path[1] == ':' &&
1166       (dirp->path[2] == '\0' ||
1167       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1168     // No '\\' needed for cases like "Z:" or "Z:\"
1169     strcat(dirp->path, "*.*");
1170   } else {
1171     strcat(dirp->path, "\\*.*");
1172   }
1173 
1174   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1175   if (dirp->handle == INVALID_HANDLE_VALUE) {
1176     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1177       free(dirp->path);
1178       free(dirp);
1179       errno = EACCES;
1180       return 0;
1181     }
1182   }
1183   return dirp;
1184 }
1185 
1186 struct dirent * os::readdir(DIR *dirp) {
1187   assert(dirp != NULL, "just checking");      // hotspot change
1188   if (dirp->handle == INVALID_HANDLE_VALUE) {
1189     return NULL;
1190   }
1191 
1192   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1193 
1194   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1195     if (GetLastError() == ERROR_INVALID_HANDLE) {
1196       errno = EBADF;
1197       return NULL;
1198     }
1199     FindClose(dirp->handle);
1200     dirp->handle = INVALID_HANDLE_VALUE;
1201   }
1202 
1203   return &dirp->dirent;
1204 }
1205 
1206 int os::closedir(DIR *dirp) {
1207   assert(dirp != NULL, "just checking");      // hotspot change
1208   if (dirp->handle != INVALID_HANDLE_VALUE) {
1209     if (!FindClose(dirp->handle)) {
1210       errno = EBADF;
1211       return -1;
1212     }
1213     dirp->handle = INVALID_HANDLE_VALUE;
1214   }
1215   free(dirp->path);
1216   free(dirp);
1217   return 0;
1218 }
1219 
1220 // This must be hard coded because it's the system's temporary
1221 // directory not the java application's temp directory, ala java.io.tmpdir.
1222 const char* os::get_temp_directory() {
1223   static char path_buf[MAX_PATH];
1224   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1225     return path_buf;
1226   } else {
1227     path_buf[0] = '\0';
1228     return path_buf;
1229   }
1230 }
1231 
1232 // Needs to be in os specific directory because windows requires another
1233 // header file <direct.h>
1234 const char* os::get_current_directory(char *buf, size_t buflen) {
1235   int n = static_cast<int>(buflen);
1236   if (buflen > INT_MAX)  n = INT_MAX;
1237   return _getcwd(buf, n);
1238 }
1239 
1240 //-----------------------------------------------------------
1241 // Helper functions for fatal error handler
1242 #ifdef _WIN64
1243 // Helper routine which returns true if address in
1244 // within the NTDLL address space.
1245 //
1246 static bool _addr_in_ntdll(address addr) {
1247   HMODULE hmod;
1248   MODULEINFO minfo;
1249 
1250   hmod = GetModuleHandle("NTDLL.DLL");
1251   if (hmod == NULL) return false;
1252   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1253                                           &minfo, sizeof(MODULEINFO))) {
1254     return false;
1255   }
1256 
1257   if ((addr >= minfo.lpBaseOfDll) &&
1258       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1259     return true;
1260   } else {
1261     return false;
1262   }
1263 }
1264 #endif
1265 
1266 struct _modinfo {
1267   address addr;
1268   char*   full_path;   // point to a char buffer
1269   int     buflen;      // size of the buffer
1270   address base_addr;
1271 };
1272 
1273 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1274                                   address top_address, void * param) {
1275   struct _modinfo *pmod = (struct _modinfo *)param;
1276   if (!pmod) return -1;
1277 
1278   if (base_addr   <= pmod->addr &&
1279       top_address > pmod->addr) {
1280     // if a buffer is provided, copy path name to the buffer
1281     if (pmod->full_path) {
1282       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1283     }
1284     pmod->base_addr = base_addr;
1285     return 1;
1286   }
1287   return 0;
1288 }
1289 
1290 bool os::dll_address_to_library_name(address addr, char* buf,
1291                                      int buflen, int* offset) {
1292   // buf is not optional, but offset is optional
1293   assert(buf != NULL, "sanity check");
1294 
1295 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1296 //       return the full path to the DLL file, sometimes it returns path
1297 //       to the corresponding PDB file (debug info); sometimes it only
1298 //       returns partial path, which makes life painful.
1299 
1300   struct _modinfo mi;
1301   mi.addr      = addr;
1302   mi.full_path = buf;
1303   mi.buflen    = buflen;
1304   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1305     // buf already contains path name
1306     if (offset) *offset = addr - mi.base_addr;
1307     return true;
1308   }
1309 
1310   buf[0] = '\0';
1311   if (offset) *offset = -1;
1312   return false;
1313 }
1314 
1315 bool os::dll_address_to_function_name(address addr, char *buf,
1316                                       int buflen, int *offset,
1317                                       bool demangle) {
1318   // buf is not optional, but offset is optional
1319   assert(buf != NULL, "sanity check");
1320 
1321   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1322     return true;
1323   }
1324   if (offset != NULL)  *offset  = -1;
1325   buf[0] = '\0';
1326   return false;
1327 }
1328 
1329 // save the start and end address of jvm.dll into param[0] and param[1]
1330 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1331                            address top_address, void * param) {
1332   if (!param) return -1;
1333 
1334   if (base_addr   <= (address)_locate_jvm_dll &&
1335       top_address > (address)_locate_jvm_dll) {
1336     ((address*)param)[0] = base_addr;
1337     ((address*)param)[1] = top_address;
1338     return 1;
1339   }
1340   return 0;
1341 }
1342 
1343 address vm_lib_location[2];    // start and end address of jvm.dll
1344 
1345 // check if addr is inside jvm.dll
1346 bool os::address_is_in_vm(address addr) {
1347   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1348     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1349       assert(false, "Can't find jvm module.");
1350       return false;
1351     }
1352   }
1353 
1354   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1355 }
1356 
1357 // print module info; param is outputStream*
1358 static int _print_module(const char* fname, address base_address,
1359                          address top_address, void* param) {
1360   if (!param) return -1;
1361 
1362   outputStream* st = (outputStream*)param;
1363 
1364   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1365   return 0;
1366 }
1367 
1368 // Loads .dll/.so and
1369 // in case of error it checks if .dll/.so was built for the
1370 // same architecture as Hotspot is running on
1371 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1372   log_info(os)("attempting shared library load of %s", name);
1373 
1374   void * result = LoadLibrary(name);
1375   if (result != NULL) {
1376     Events::log(NULL, "Loaded shared library %s", name);
1377     // Recalculate pdb search path if a DLL was loaded successfully.
1378     SymbolEngine::recalc_search_path();
1379     log_info(os)("shared library load of %s was successful", name);
1380     return result;
1381   }
1382   DWORD errcode = GetLastError();
1383   // Read system error message into ebuf
1384   // It may or may not be overwritten below (in the for loop and just above)
1385   lasterror(ebuf, (size_t) ebuflen);
1386   ebuf[ebuflen - 1] = '\0';
1387   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1388   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1389 
1390   if (errcode == ERROR_MOD_NOT_FOUND) {
1391     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1392     ebuf[ebuflen - 1] = '\0';
1393     return NULL;
1394   }
1395 
1396   // Parsing dll below
1397   // If we can read dll-info and find that dll was built
1398   // for an architecture other than Hotspot is running in
1399   // - then print to buffer "DLL was built for a different architecture"
1400   // else call os::lasterror to obtain system error message
1401   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1402   if (fd < 0) {
1403     return NULL;
1404   }
1405 
1406   uint32_t signature_offset;
1407   uint16_t lib_arch = 0;
1408   bool failed_to_get_lib_arch =
1409     ( // Go to position 3c in the dll
1410      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1411      ||
1412      // Read location of signature
1413      (sizeof(signature_offset) !=
1414      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1415      ||
1416      // Go to COFF File Header in dll
1417      // that is located after "signature" (4 bytes long)
1418      (os::seek_to_file_offset(fd,
1419      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1420      ||
1421      // Read field that contains code of architecture
1422      // that dll was built for
1423      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1424     );
1425 
1426   ::close(fd);
1427   if (failed_to_get_lib_arch) {
1428     // file i/o error - report os::lasterror(...) msg
1429     return NULL;
1430   }
1431 
1432   typedef struct {
1433     uint16_t arch_code;
1434     char* arch_name;
1435   } arch_t;
1436 
1437   static const arch_t arch_array[] = {
1438     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1439     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1440   };
1441 #if (defined _M_AMD64)
1442   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1443 #elif (defined _M_IX86)
1444   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1445 #else
1446   #error Method os::dll_load requires that one of following \
1447          is defined :_M_AMD64 or _M_IX86
1448 #endif
1449 
1450 
1451   // Obtain a string for printf operation
1452   // lib_arch_str shall contain string what platform this .dll was built for
1453   // running_arch_str shall string contain what platform Hotspot was built for
1454   char *running_arch_str = NULL, *lib_arch_str = NULL;
1455   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1456     if (lib_arch == arch_array[i].arch_code) {
1457       lib_arch_str = arch_array[i].arch_name;
1458     }
1459     if (running_arch == arch_array[i].arch_code) {
1460       running_arch_str = arch_array[i].arch_name;
1461     }
1462   }
1463 
1464   assert(running_arch_str,
1465          "Didn't find running architecture code in arch_array");
1466 
1467   // If the architecture is right
1468   // but some other error took place - report os::lasterror(...) msg
1469   if (lib_arch == running_arch) {
1470     return NULL;
1471   }
1472 
1473   if (lib_arch_str != NULL) {
1474     ::_snprintf(ebuf, ebuflen - 1,
1475                 "Can't load %s-bit .dll on a %s-bit platform",
1476                 lib_arch_str, running_arch_str);
1477   } else {
1478     // don't know what architecture this dll was build for
1479     ::_snprintf(ebuf, ebuflen - 1,
1480                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1481                 lib_arch, running_arch_str);
1482   }
1483 
1484   return NULL;
1485 }
1486 
1487 void os::print_dll_info(outputStream *st) {
1488   st->print_cr("Dynamic libraries:");
1489   get_loaded_modules_info(_print_module, (void *)st);
1490 }
1491 
1492 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1493   HANDLE   hProcess;
1494 
1495 # define MAX_NUM_MODULES 128
1496   HMODULE     modules[MAX_NUM_MODULES];
1497   static char filename[MAX_PATH];
1498   int         result = 0;
1499 
1500   int pid = os::current_process_id();
1501   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1502                          FALSE, pid);
1503   if (hProcess == NULL) return 0;
1504 
1505   DWORD size_needed;
1506   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1507     CloseHandle(hProcess);
1508     return 0;
1509   }
1510 
1511   // number of modules that are currently loaded
1512   int num_modules = size_needed / sizeof(HMODULE);
1513 
1514   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1515     // Get Full pathname:
1516     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1517       filename[0] = '\0';
1518     }
1519 
1520     MODULEINFO modinfo;
1521     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1522       modinfo.lpBaseOfDll = NULL;
1523       modinfo.SizeOfImage = 0;
1524     }
1525 
1526     // Invoke callback function
1527     result = callback(filename, (address)modinfo.lpBaseOfDll,
1528                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1529     if (result) break;
1530   }
1531 
1532   CloseHandle(hProcess);
1533   return result;
1534 }
1535 
1536 bool os::get_host_name(char* buf, size_t buflen) {
1537   DWORD size = (DWORD)buflen;
1538   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1539 }
1540 
1541 void os::get_summary_os_info(char* buf, size_t buflen) {
1542   stringStream sst(buf, buflen);
1543   os::win32::print_windows_version(&sst);
1544   // chop off newline character
1545   char* nl = strchr(buf, '\n');
1546   if (nl != NULL) *nl = '\0';
1547 }
1548 
1549 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1550 #if _MSC_VER >= 1900
1551   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1552   int result = ::vsnprintf(buf, len, fmt, args);
1553   // If an encoding error occurred (result < 0) then it's not clear
1554   // whether the buffer is NUL terminated, so ensure it is.
1555   if ((result < 0) && (len > 0)) {
1556     buf[len - 1] = '\0';
1557   }
1558   return result;
1559 #else
1560   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1561   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1562   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1563   // go straight to _vscprintf.  The output is going to be truncated in
1564   // that case, except in the unusual case of empty output.  More
1565   // importantly, the documentation for various versions of Visual Studio
1566   // are inconsistent about the behavior of _vsnprintf when len == 0,
1567   // including it possibly being an error.
1568   int result = -1;
1569   if (len > 0) {
1570     result = _vsnprintf(buf, len, fmt, args);
1571     // If output (including NUL terminator) is truncated, the buffer
1572     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1573     if ((result < 0) || ((size_t)result >= len)) {
1574       buf[len - 1] = '\0';
1575     }
1576   }
1577   if (result < 0) {
1578     result = _vscprintf(fmt, args);
1579   }
1580   return result;
1581 #endif // _MSC_VER dispatch
1582 }
1583 
1584 static inline time_t get_mtime(const char* filename) {
1585   struct stat st;
1586   int ret = os::stat(filename, &st);
1587   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1588   return st.st_mtime;
1589 }
1590 
1591 int os::compare_file_modified_times(const char* file1, const char* file2) {
1592   time_t t1 = get_mtime(file1);
1593   time_t t2 = get_mtime(file2);
1594   return t1 - t2;
1595 }
1596 
1597 void os::print_os_info_brief(outputStream* st) {
1598   os::print_os_info(st);
1599 }
1600 
1601 void os::print_os_info(outputStream* st) {
1602 #ifdef ASSERT
1603   char buffer[1024];
1604   st->print("HostName: ");
1605   if (get_host_name(buffer, sizeof(buffer))) {
1606     st->print("%s ", buffer);
1607   } else {
1608     st->print("N/A ");
1609   }
1610 #endif
1611   st->print("OS:");
1612   os::win32::print_windows_version(st);
1613 
1614 #ifdef _LP64
1615   VM_Version::print_platform_virtualization_info(st);
1616 #endif
1617 }
1618 
1619 void os::win32::print_windows_version(outputStream* st) {
1620   OSVERSIONINFOEX osvi;
1621   VS_FIXEDFILEINFO *file_info;
1622   TCHAR kernel32_path[MAX_PATH];
1623   UINT len, ret;
1624 
1625   // Use the GetVersionEx information to see if we're on a server or
1626   // workstation edition of Windows. Starting with Windows 8.1 we can't
1627   // trust the OS version information returned by this API.
1628   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1629   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1630   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1631     st->print_cr("Call to GetVersionEx failed");
1632     return;
1633   }
1634   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1635 
1636   // Get the full path to \Windows\System32\kernel32.dll and use that for
1637   // determining what version of Windows we're running on.
1638   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1639   ret = GetSystemDirectory(kernel32_path, len);
1640   if (ret == 0 || ret > len) {
1641     st->print_cr("Call to GetSystemDirectory failed");
1642     return;
1643   }
1644   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1645 
1646   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1647   if (version_size == 0) {
1648     st->print_cr("Call to GetFileVersionInfoSize failed");
1649     return;
1650   }
1651 
1652   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1653   if (version_info == NULL) {
1654     st->print_cr("Failed to allocate version_info");
1655     return;
1656   }
1657 
1658   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1659     os::free(version_info);
1660     st->print_cr("Call to GetFileVersionInfo failed");
1661     return;
1662   }
1663 
1664   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1665     os::free(version_info);
1666     st->print_cr("Call to VerQueryValue failed");
1667     return;
1668   }
1669 
1670   int major_version = HIWORD(file_info->dwProductVersionMS);
1671   int minor_version = LOWORD(file_info->dwProductVersionMS);
1672   int build_number = HIWORD(file_info->dwProductVersionLS);
1673   int build_minor = LOWORD(file_info->dwProductVersionLS);
1674   int os_vers = major_version * 1000 + minor_version;
1675   os::free(version_info);
1676 
1677   st->print(" Windows ");
1678   switch (os_vers) {
1679 
1680   case 6000:
1681     if (is_workstation) {
1682       st->print("Vista");
1683     } else {
1684       st->print("Server 2008");
1685     }
1686     break;
1687 
1688   case 6001:
1689     if (is_workstation) {
1690       st->print("7");
1691     } else {
1692       st->print("Server 2008 R2");
1693     }
1694     break;
1695 
1696   case 6002:
1697     if (is_workstation) {
1698       st->print("8");
1699     } else {
1700       st->print("Server 2012");
1701     }
1702     break;
1703 
1704   case 6003:
1705     if (is_workstation) {
1706       st->print("8.1");
1707     } else {
1708       st->print("Server 2012 R2");
1709     }
1710     break;
1711 
1712   case 10000:
1713     if (is_workstation) {
1714       st->print("10");
1715     } else {
1716       // distinguish Windows Server 2016 and 2019 by build number
1717       // Windows server 2019 GA 10/2018 build number is 17763
1718       if (build_number > 17762) {
1719         st->print("Server 2019");
1720       } else {
1721         st->print("Server 2016");
1722       }
1723     }
1724     break;
1725 
1726   default:
1727     // Unrecognized windows, print out its major and minor versions
1728     st->print("%d.%d", major_version, minor_version);
1729     break;
1730   }
1731 
1732   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1733   // find out whether we are running on 64 bit processor or not
1734   SYSTEM_INFO si;
1735   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1736   GetNativeSystemInfo(&si);
1737   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1738     st->print(" , 64 bit");
1739   }
1740 
1741   st->print(" Build %d", build_number);
1742   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1743   st->cr();
1744 }
1745 
1746 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1747   // Nothing to do for now.
1748 }
1749 
1750 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1751   HKEY key;
1752   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1753                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1754   if (status == ERROR_SUCCESS) {
1755     DWORD size = (DWORD)buflen;
1756     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1757     if (status != ERROR_SUCCESS) {
1758         strncpy(buf, "## __CPU__", buflen);
1759     }
1760     RegCloseKey(key);
1761   } else {
1762     // Put generic cpu info to return
1763     strncpy(buf, "## __CPU__", buflen);
1764   }
1765 }
1766 
1767 void os::print_memory_info(outputStream* st) {
1768   st->print("Memory:");
1769   st->print(" %dk page", os::vm_page_size()>>10);
1770 
1771   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1772   // value if total memory is larger than 4GB
1773   MEMORYSTATUSEX ms;
1774   ms.dwLength = sizeof(ms);
1775   int r1 = GlobalMemoryStatusEx(&ms);
1776 
1777   if (r1 != 0) {
1778     st->print(", system-wide physical " INT64_FORMAT "M ",
1779              (int64_t) ms.ullTotalPhys >> 20);
1780     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1781 
1782     st->print("TotalPageFile size " INT64_FORMAT "M ",
1783              (int64_t) ms.ullTotalPageFile >> 20);
1784     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1785              (int64_t) ms.ullAvailPageFile >> 20);
1786 
1787     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1788 #if defined(_M_IX86)
1789     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1790              (int64_t) ms.ullTotalVirtual >> 20);
1791     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1792 #endif
1793   } else {
1794     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1795   }
1796 
1797   // extended memory statistics for a process
1798   PROCESS_MEMORY_COUNTERS_EX pmex;
1799   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1800   pmex.cb = sizeof(pmex);
1801   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1802 
1803   if (r2 != 0) {
1804     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1805              (int64_t) pmex.WorkingSetSize >> 20);
1806     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1807 
1808     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1809              (int64_t) pmex.PrivateUsage >> 20);
1810     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1811   } else {
1812     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1813   }
1814 
1815   st->cr();
1816 }
1817 
1818 bool os::signal_sent_by_kill(const void* siginfo) {
1819   // TODO: Is this possible?
1820   return false;
1821 }
1822 
1823 void os::print_siginfo(outputStream *st, const void* siginfo) {
1824   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1825   st->print("siginfo:");
1826 
1827   char tmp[64];
1828   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1829     strcpy(tmp, "EXCEPTION_??");
1830   }
1831   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1832 
1833   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1834        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1835        er->NumberParameters >= 2) {
1836     switch (er->ExceptionInformation[0]) {
1837     case 0: st->print(", reading address"); break;
1838     case 1: st->print(", writing address"); break;
1839     case 8: st->print(", data execution prevention violation at address"); break;
1840     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1841                        er->ExceptionInformation[0]);
1842     }
1843     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1844   } else {
1845     int num = er->NumberParameters;
1846     if (num > 0) {
1847       st->print(", ExceptionInformation=");
1848       for (int i = 0; i < num; i++) {
1849         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1850       }
1851     }
1852   }
1853   st->cr();
1854 }
1855 
1856 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1857   // TODO: Can we kill thread?
1858   return false;
1859 }
1860 
1861 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1862   // do nothing
1863 }
1864 
1865 static char saved_jvm_path[MAX_PATH] = {0};
1866 
1867 // Find the full path to the current module, jvm.dll
1868 void os::jvm_path(char *buf, jint buflen) {
1869   // Error checking.
1870   if (buflen < MAX_PATH) {
1871     assert(false, "must use a large-enough buffer");
1872     buf[0] = '\0';
1873     return;
1874   }
1875   // Lazy resolve the path to current module.
1876   if (saved_jvm_path[0] != 0) {
1877     strcpy(buf, saved_jvm_path);
1878     return;
1879   }
1880 
1881   buf[0] = '\0';
1882   if (Arguments::sun_java_launcher_is_altjvm()) {
1883     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1884     // for a JAVA_HOME environment variable and fix up the path so it
1885     // looks like jvm.dll is installed there (append a fake suffix
1886     // hotspot/jvm.dll).
1887     char* java_home_var = ::getenv("JAVA_HOME");
1888     if (java_home_var != NULL && java_home_var[0] != 0 &&
1889         strlen(java_home_var) < (size_t)buflen) {
1890       strncpy(buf, java_home_var, buflen);
1891 
1892       // determine if this is a legacy image or modules image
1893       // modules image doesn't have "jre" subdirectory
1894       size_t len = strlen(buf);
1895       char* jrebin_p = buf + len;
1896       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1897       if (0 != _access(buf, 0)) {
1898         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1899       }
1900       len = strlen(buf);
1901       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1902     }
1903   }
1904 
1905   if (buf[0] == '\0') {
1906     GetModuleFileName(vm_lib_handle, buf, buflen);
1907   }
1908   strncpy(saved_jvm_path, buf, MAX_PATH);
1909   saved_jvm_path[MAX_PATH - 1] = '\0';
1910 }
1911 
1912 
1913 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1914 #ifndef _WIN64
1915   st->print("_");
1916 #endif
1917 }
1918 
1919 
1920 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1921 #ifndef _WIN64
1922   st->print("@%d", args_size  * sizeof(int));
1923 #endif
1924 }
1925 
1926 // This method is a copy of JDK's sysGetLastErrorString
1927 // from src/windows/hpi/src/system_md.c
1928 
1929 size_t os::lasterror(char* buf, size_t len) {
1930   DWORD errval;
1931 
1932   if ((errval = GetLastError()) != 0) {
1933     // DOS error
1934     size_t n = (size_t)FormatMessage(
1935                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1936                                      NULL,
1937                                      errval,
1938                                      0,
1939                                      buf,
1940                                      (DWORD)len,
1941                                      NULL);
1942     if (n > 3) {
1943       // Drop final '.', CR, LF
1944       if (buf[n - 1] == '\n') n--;
1945       if (buf[n - 1] == '\r') n--;
1946       if (buf[n - 1] == '.') n--;
1947       buf[n] = '\0';
1948     }
1949     return n;
1950   }
1951 
1952   if (errno != 0) {
1953     // C runtime error that has no corresponding DOS error code
1954     const char* s = os::strerror(errno);
1955     size_t n = strlen(s);
1956     if (n >= len) n = len - 1;
1957     strncpy(buf, s, n);
1958     buf[n] = '\0';
1959     return n;
1960   }
1961 
1962   return 0;
1963 }
1964 
1965 int os::get_last_error() {
1966   DWORD error = GetLastError();
1967   if (error == 0) {
1968     error = errno;
1969   }
1970   return (int)error;
1971 }
1972 
1973 // sun.misc.Signal
1974 // NOTE that this is a workaround for an apparent kernel bug where if
1975 // a signal handler for SIGBREAK is installed then that signal handler
1976 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1977 // See bug 4416763.
1978 static void (*sigbreakHandler)(int) = NULL;
1979 
1980 static void UserHandler(int sig, void *siginfo, void *context) {
1981   os::signal_notify(sig);
1982   // We need to reinstate the signal handler each time...
1983   os::signal(sig, (void*)UserHandler);
1984 }
1985 
1986 void* os::user_handler() {
1987   return (void*) UserHandler;
1988 }
1989 
1990 void* os::signal(int signal_number, void* handler) {
1991   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1992     void (*oldHandler)(int) = sigbreakHandler;
1993     sigbreakHandler = (void (*)(int)) handler;
1994     return (void*) oldHandler;
1995   } else {
1996     return (void*)::signal(signal_number, (void (*)(int))handler);
1997   }
1998 }
1999 
2000 void os::signal_raise(int signal_number) {
2001   raise(signal_number);
2002 }
2003 
2004 // The Win32 C runtime library maps all console control events other than ^C
2005 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2006 // logoff, and shutdown events.  We therefore install our own console handler
2007 // that raises SIGTERM for the latter cases.
2008 //
2009 static BOOL WINAPI consoleHandler(DWORD event) {
2010   switch (event) {
2011   case CTRL_C_EVENT:
2012     if (VMError::is_error_reported()) {
2013       // Ctrl-C is pressed during error reporting, likely because the error
2014       // handler fails to abort. Let VM die immediately.
2015       os::die();
2016     }
2017 
2018     os::signal_raise(SIGINT);
2019     return TRUE;
2020     break;
2021   case CTRL_BREAK_EVENT:
2022     if (sigbreakHandler != NULL) {
2023       (*sigbreakHandler)(SIGBREAK);
2024     }
2025     return TRUE;
2026     break;
2027   case CTRL_LOGOFF_EVENT: {
2028     // Don't terminate JVM if it is running in a non-interactive session,
2029     // such as a service process.
2030     USEROBJECTFLAGS flags;
2031     HANDLE handle = GetProcessWindowStation();
2032     if (handle != NULL &&
2033         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2034         sizeof(USEROBJECTFLAGS), NULL)) {
2035       // If it is a non-interactive session, let next handler to deal
2036       // with it.
2037       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2038         return FALSE;
2039       }
2040     }
2041   }
2042   case CTRL_CLOSE_EVENT:
2043   case CTRL_SHUTDOWN_EVENT:
2044     os::signal_raise(SIGTERM);
2045     return TRUE;
2046     break;
2047   default:
2048     break;
2049   }
2050   return FALSE;
2051 }
2052 
2053 // The following code is moved from os.cpp for making this
2054 // code platform specific, which it is by its very nature.
2055 
2056 // Return maximum OS signal used + 1 for internal use only
2057 // Used as exit signal for signal_thread
2058 int os::sigexitnum_pd() {
2059   return NSIG;
2060 }
2061 
2062 // a counter for each possible signal value, including signal_thread exit signal
2063 static volatile jint pending_signals[NSIG+1] = { 0 };
2064 static Semaphore* sig_sem = NULL;
2065 
2066 static void jdk_misc_signal_init() {
2067   // Initialize signal structures
2068   memset((void*)pending_signals, 0, sizeof(pending_signals));
2069 
2070   // Initialize signal semaphore
2071   sig_sem = new Semaphore();
2072 
2073   // Programs embedding the VM do not want it to attempt to receive
2074   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2075   // shutdown hooks mechanism introduced in 1.3.  For example, when
2076   // the VM is run as part of a Windows NT service (i.e., a servlet
2077   // engine in a web server), the correct behavior is for any console
2078   // control handler to return FALSE, not TRUE, because the OS's
2079   // "final" handler for such events allows the process to continue if
2080   // it is a service (while terminating it if it is not a service).
2081   // To make this behavior uniform and the mechanism simpler, we
2082   // completely disable the VM's usage of these console events if -Xrs
2083   // (=ReduceSignalUsage) is specified.  This means, for example, that
2084   // the CTRL-BREAK thread dump mechanism is also disabled in this
2085   // case.  See bugs 4323062, 4345157, and related bugs.
2086 
2087   // Add a CTRL-C handler
2088   SetConsoleCtrlHandler(consoleHandler, TRUE);
2089 }
2090 
2091 void os::signal_notify(int sig) {
2092   if (sig_sem != NULL) {
2093     Atomic::inc(&pending_signals[sig]);
2094     sig_sem->signal();
2095   } else {
2096     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2097     // initialization isn't called.
2098     assert(ReduceSignalUsage, "signal semaphore should be created");
2099   }
2100 }
2101 
2102 static int check_pending_signals() {
2103   while (true) {
2104     for (int i = 0; i < NSIG + 1; i++) {
2105       jint n = pending_signals[i];
2106       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2107         return i;
2108       }
2109     }
2110     JavaThread *thread = JavaThread::current();
2111 
2112     ThreadBlockInVM tbivm(thread);
2113 
2114     bool threadIsSuspended;
2115     do {
2116       thread->set_suspend_equivalent();
2117       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2118       sig_sem->wait();
2119 
2120       // were we externally suspended while we were waiting?
2121       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2122       if (threadIsSuspended) {
2123         // The semaphore has been incremented, but while we were waiting
2124         // another thread suspended us. We don't want to continue running
2125         // while suspended because that would surprise the thread that
2126         // suspended us.
2127         sig_sem->signal();
2128 
2129         thread->java_suspend_self();
2130       }
2131     } while (threadIsSuspended);
2132   }
2133 }
2134 
2135 int os::signal_wait() {
2136   return check_pending_signals();
2137 }
2138 
2139 // Implicit OS exception handling
2140 
2141 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2142                       address handler) {
2143   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2144   // Save pc in thread
2145 #ifdef _M_AMD64
2146   // Do not blow up if no thread info available.
2147   if (thread) {
2148     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2149   }
2150   // Set pc to handler
2151   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2152 #else
2153   // Do not blow up if no thread info available.
2154   if (thread) {
2155     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2156   }
2157   // Set pc to handler
2158   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2159 #endif
2160 
2161   // Continue the execution
2162   return EXCEPTION_CONTINUE_EXECUTION;
2163 }
2164 
2165 
2166 // Used for PostMortemDump
2167 extern "C" void safepoints();
2168 extern "C" void find(int x);
2169 extern "C" void events();
2170 
2171 // According to Windows API documentation, an illegal instruction sequence should generate
2172 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2173 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2174 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2175 
2176 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2177 
2178 // From "Execution Protection in the Windows Operating System" draft 0.35
2179 // Once a system header becomes available, the "real" define should be
2180 // included or copied here.
2181 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2182 
2183 // Windows Vista/2008 heap corruption check
2184 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2185 
2186 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2187 // C++ compiler contain this error code. Because this is a compiler-generated
2188 // error, the code is not listed in the Win32 API header files.
2189 // The code is actually a cryptic mnemonic device, with the initial "E"
2190 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2191 // ASCII values of "msc".
2192 
2193 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2194 
2195 #define def_excpt(val) { #val, (val) }
2196 
2197 static const struct { const char* name; uint number; } exceptlabels[] = {
2198     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2199     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2200     def_excpt(EXCEPTION_BREAKPOINT),
2201     def_excpt(EXCEPTION_SINGLE_STEP),
2202     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2203     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2204     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2205     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2206     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2207     def_excpt(EXCEPTION_FLT_OVERFLOW),
2208     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2209     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2210     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2211     def_excpt(EXCEPTION_INT_OVERFLOW),
2212     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2213     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2214     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2215     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2216     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2217     def_excpt(EXCEPTION_STACK_OVERFLOW),
2218     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2219     def_excpt(EXCEPTION_GUARD_PAGE),
2220     def_excpt(EXCEPTION_INVALID_HANDLE),
2221     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2222     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2223 };
2224 
2225 #undef def_excpt
2226 
2227 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2228   uint code = static_cast<uint>(exception_code);
2229   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2230     if (exceptlabels[i].number == code) {
2231       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2232       return buf;
2233     }
2234   }
2235 
2236   return NULL;
2237 }
2238 
2239 //-----------------------------------------------------------------------------
2240 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2241   // handle exception caused by idiv; should only happen for -MinInt/-1
2242   // (division by zero is handled explicitly)
2243 #ifdef  _M_AMD64
2244   PCONTEXT ctx = exceptionInfo->ContextRecord;
2245   address pc = (address)ctx->Rip;
2246   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2247   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2248   if (pc[0] == 0xF7) {
2249     // set correct result values and continue after idiv instruction
2250     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2251   } else {
2252     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2253   }
2254   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2255   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2256   // idiv opcode (0xF7).
2257   ctx->Rdx = (DWORD)0;             // remainder
2258   // Continue the execution
2259 #else
2260   PCONTEXT ctx = exceptionInfo->ContextRecord;
2261   address pc = (address)ctx->Eip;
2262   assert(pc[0] == 0xF7, "not an idiv opcode");
2263   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2264   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2265   // set correct result values and continue after idiv instruction
2266   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2267   ctx->Eax = (DWORD)min_jint;      // result
2268   ctx->Edx = (DWORD)0;             // remainder
2269   // Continue the execution
2270 #endif
2271   return EXCEPTION_CONTINUE_EXECUTION;
2272 }
2273 
2274 //-----------------------------------------------------------------------------
2275 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2276   PCONTEXT ctx = exceptionInfo->ContextRecord;
2277 #ifndef  _WIN64
2278   // handle exception caused by native method modifying control word
2279   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2280 
2281   switch (exception_code) {
2282   case EXCEPTION_FLT_DENORMAL_OPERAND:
2283   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2284   case EXCEPTION_FLT_INEXACT_RESULT:
2285   case EXCEPTION_FLT_INVALID_OPERATION:
2286   case EXCEPTION_FLT_OVERFLOW:
2287   case EXCEPTION_FLT_STACK_CHECK:
2288   case EXCEPTION_FLT_UNDERFLOW:
2289     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2290     if (fp_control_word != ctx->FloatSave.ControlWord) {
2291       // Restore FPCW and mask out FLT exceptions
2292       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2293       // Mask out pending FLT exceptions
2294       ctx->FloatSave.StatusWord &=  0xffffff00;
2295       return EXCEPTION_CONTINUE_EXECUTION;
2296     }
2297   }
2298 
2299   if (prev_uef_handler != NULL) {
2300     // We didn't handle this exception so pass it to the previous
2301     // UnhandledExceptionFilter.
2302     return (prev_uef_handler)(exceptionInfo);
2303   }
2304 #else // !_WIN64
2305   // On Windows, the mxcsr control bits are non-volatile across calls
2306   // See also CR 6192333
2307   //
2308   jint MxCsr = INITIAL_MXCSR;
2309   // we can't use StubRoutines::addr_mxcsr_std()
2310   // because in Win64 mxcsr is not saved there
2311   if (MxCsr != ctx->MxCsr) {
2312     ctx->MxCsr = MxCsr;
2313     return EXCEPTION_CONTINUE_EXECUTION;
2314   }
2315 #endif // !_WIN64
2316 
2317   return EXCEPTION_CONTINUE_SEARCH;
2318 }
2319 
2320 static inline void report_error(Thread* t, DWORD exception_code,
2321                                 address addr, void* siginfo, void* context) {
2322   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2323 
2324   // If UseOsErrorReporting, this will return here and save the error file
2325   // somewhere where we can find it in the minidump.
2326 }
2327 
2328 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2329         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2330   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2331   address addr = (address) exceptionRecord->ExceptionInformation[1];
2332   if (Interpreter::contains(pc)) {
2333     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2334     if (!fr->is_first_java_frame()) {
2335       // get_frame_at_stack_banging_point() is only called when we
2336       // have well defined stacks so java_sender() calls do not need
2337       // to assert safe_for_sender() first.
2338       *fr = fr->java_sender();
2339     }
2340   } else {
2341     // more complex code with compiled code
2342     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2343     CodeBlob* cb = CodeCache::find_blob(pc);
2344     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2345       // Not sure where the pc points to, fallback to default
2346       // stack overflow handling
2347       return false;
2348     } else {
2349       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2350       // in compiled code, the stack banging is performed just after the return pc
2351       // has been pushed on the stack
2352       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2353       if (!fr->is_java_frame()) {
2354         // See java_sender() comment above.
2355         *fr = fr->java_sender();
2356       }
2357     }
2358   }
2359   assert(fr->is_java_frame(), "Safety check");
2360   return true;
2361 }
2362 
2363 #if INCLUDE_AOT
2364 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2365   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2366   address addr = (address) exceptionRecord->ExceptionInformation[1];
2367   address pc = (address) exceptionInfo->ContextRecord->Rip;
2368 
2369   // Handle the case where we get an implicit exception in AOT generated
2370   // code.  AOT DLL's loaded are not registered for structured exceptions.
2371   // If the exception occurred in the codeCache or AOT code, pass control
2372   // to our normal exception handler.
2373   CodeBlob* cb = CodeCache::find_blob(pc);
2374   if (cb != NULL) {
2375     return topLevelExceptionFilter(exceptionInfo);
2376   }
2377 
2378   return EXCEPTION_CONTINUE_SEARCH;
2379 }
2380 #endif
2381 
2382 //-----------------------------------------------------------------------------
2383 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2384   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2385   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2386 #ifdef _M_AMD64
2387   address pc = (address) exceptionInfo->ContextRecord->Rip;
2388 #else
2389   address pc = (address) exceptionInfo->ContextRecord->Eip;
2390 #endif
2391   Thread* t = Thread::current_or_null_safe();
2392 
2393   // Handle SafeFetch32 and SafeFetchN exceptions.
2394   if (StubRoutines::is_safefetch_fault(pc)) {
2395     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2396   }
2397 
2398 #ifndef _WIN64
2399   // Execution protection violation - win32 running on AMD64 only
2400   // Handled first to avoid misdiagnosis as a "normal" access violation;
2401   // This is safe to do because we have a new/unique ExceptionInformation
2402   // code for this condition.
2403   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2404     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2405     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2406     address addr = (address) exceptionRecord->ExceptionInformation[1];
2407 
2408     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2409       int page_size = os::vm_page_size();
2410 
2411       // Make sure the pc and the faulting address are sane.
2412       //
2413       // If an instruction spans a page boundary, and the page containing
2414       // the beginning of the instruction is executable but the following
2415       // page is not, the pc and the faulting address might be slightly
2416       // different - we still want to unguard the 2nd page in this case.
2417       //
2418       // 15 bytes seems to be a (very) safe value for max instruction size.
2419       bool pc_is_near_addr =
2420         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2421       bool instr_spans_page_boundary =
2422         (align_down((intptr_t) pc ^ (intptr_t) addr,
2423                          (intptr_t) page_size) > 0);
2424 
2425       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2426         static volatile address last_addr =
2427           (address) os::non_memory_address_word();
2428 
2429         // In conservative mode, don't unguard unless the address is in the VM
2430         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2431             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2432 
2433           // Set memory to RWX and retry
2434           address page_start = align_down(addr, page_size);
2435           bool res = os::protect_memory((char*) page_start, page_size,
2436                                         os::MEM_PROT_RWX);
2437 
2438           log_debug(os)("Execution protection violation "
2439                         "at " INTPTR_FORMAT
2440                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2441                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2442 
2443           // Set last_addr so if we fault again at the same address, we don't
2444           // end up in an endless loop.
2445           //
2446           // There are two potential complications here.  Two threads trapping
2447           // at the same address at the same time could cause one of the
2448           // threads to think it already unguarded, and abort the VM.  Likely
2449           // very rare.
2450           //
2451           // The other race involves two threads alternately trapping at
2452           // different addresses and failing to unguard the page, resulting in
2453           // an endless loop.  This condition is probably even more unlikely
2454           // than the first.
2455           //
2456           // Although both cases could be avoided by using locks or thread
2457           // local last_addr, these solutions are unnecessary complication:
2458           // this handler is a best-effort safety net, not a complete solution.
2459           // It is disabled by default and should only be used as a workaround
2460           // in case we missed any no-execute-unsafe VM code.
2461 
2462           last_addr = addr;
2463 
2464           return EXCEPTION_CONTINUE_EXECUTION;
2465         }
2466       }
2467 
2468       // Last unguard failed or not unguarding
2469       tty->print_raw_cr("Execution protection violation");
2470       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2471                    exceptionInfo->ContextRecord);
2472       return EXCEPTION_CONTINUE_SEARCH;
2473     }
2474   }
2475 #endif // _WIN64
2476 
2477   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2478       VM_Version::is_cpuinfo_segv_addr(pc)) {
2479     // Verify that OS save/restore AVX registers.
2480     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2481   }
2482 
2483   if (t != NULL && t->is_Java_thread()) {
2484     JavaThread* thread = (JavaThread*) t;
2485     bool in_java = thread->thread_state() == _thread_in_Java;
2486 
2487     // Handle potential stack overflows up front.
2488     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2489       if (thread->stack_guards_enabled()) {
2490         if (in_java) {
2491           frame fr;
2492           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2493           address addr = (address) exceptionRecord->ExceptionInformation[1];
2494           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2495             assert(fr.is_java_frame(), "Must be a Java frame");
2496             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2497           }
2498         }
2499         // Yellow zone violation.  The o/s has unprotected the first yellow
2500         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2501         // update the enabled status, even if the zone contains only one page.
2502         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2503         thread->disable_stack_yellow_reserved_zone();
2504         // If not in java code, return and hope for the best.
2505         return in_java
2506             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2507             :  EXCEPTION_CONTINUE_EXECUTION;
2508       } else {
2509         // Fatal red zone violation.
2510         thread->disable_stack_red_zone();
2511         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2512         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2513                       exceptionInfo->ContextRecord);
2514         return EXCEPTION_CONTINUE_SEARCH;
2515       }
2516     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2517       // Either stack overflow or null pointer exception.
2518       if (in_java) {
2519         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2520         address addr = (address) exceptionRecord->ExceptionInformation[1];
2521         address stack_end = thread->stack_end();
2522         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2523           // Stack overflow.
2524           assert(!os::uses_stack_guard_pages(),
2525                  "should be caught by red zone code above.");
2526           return Handle_Exception(exceptionInfo,
2527                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2528         }
2529         // Check for safepoint polling and implicit null
2530         // We only expect null pointers in the stubs (vtable)
2531         // the rest are checked explicitly now.
2532         CodeBlob* cb = CodeCache::find_blob(pc);
2533         if (cb != NULL) {
2534           if (os::is_poll_address(addr)) {
2535             address stub = SharedRuntime::get_poll_stub(pc);
2536             return Handle_Exception(exceptionInfo, stub);
2537           }
2538         }
2539         {
2540 #ifdef _WIN64
2541           // If it's a legal stack address map the entire region in
2542           //
2543           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2544           address addr = (address) exceptionRecord->ExceptionInformation[1];
2545           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2546             addr = (address)((uintptr_t)addr &
2547                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2548             os::commit_memory((char *)addr, thread->stack_base() - addr,
2549                               !ExecMem);
2550             return EXCEPTION_CONTINUE_EXECUTION;
2551           } else
2552 #endif
2553           {
2554             // Null pointer exception.
2555             if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2556               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2557               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2558             }
2559             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2560                          exceptionInfo->ContextRecord);
2561             return EXCEPTION_CONTINUE_SEARCH;
2562           }
2563         }
2564       }
2565 
2566 #ifdef _WIN64
2567       // Special care for fast JNI field accessors.
2568       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2569       // in and the heap gets shrunk before the field access.
2570       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2571         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2572         if (addr != (address)-1) {
2573           return Handle_Exception(exceptionInfo, addr);
2574         }
2575       }
2576 #endif
2577 
2578       // Stack overflow or null pointer exception in native code.
2579       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2580                    exceptionInfo->ContextRecord);
2581       return EXCEPTION_CONTINUE_SEARCH;
2582     } // /EXCEPTION_ACCESS_VIOLATION
2583     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2584 
2585     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2586       CompiledMethod* nm = NULL;
2587       JavaThread* thread = (JavaThread*)t;
2588       if (in_java) {
2589         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2590         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2591       }
2592 
2593       bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2594       if (((thread->thread_state() == _thread_in_vm ||
2595            thread->thread_state() == _thread_in_native ||
2596            is_unsafe_arraycopy) &&
2597           thread->doing_unsafe_access()) ||
2598           (nm != NULL && nm->has_unsafe_access())) {
2599         address next_pc =  Assembler::locate_next_instruction(pc);
2600         if (is_unsafe_arraycopy) {
2601           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2602         }
2603         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2604       }
2605     }
2606 
2607     if (in_java) {
2608       switch (exception_code) {
2609       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2610         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2611 
2612       case EXCEPTION_INT_OVERFLOW:
2613         return Handle_IDiv_Exception(exceptionInfo);
2614 
2615       } // switch
2616     }
2617     if (((thread->thread_state() == _thread_in_Java) ||
2618          (thread->thread_state() == _thread_in_native)) &&
2619          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2620       LONG result=Handle_FLT_Exception(exceptionInfo);
2621       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2622     }
2623   }
2624 
2625   if (exception_code != EXCEPTION_BREAKPOINT) {
2626     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2627                  exceptionInfo->ContextRecord);
2628   }
2629   return EXCEPTION_CONTINUE_SEARCH;
2630 }
2631 
2632 #ifndef _WIN64
2633 // Special care for fast JNI accessors.
2634 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2635 // the heap gets shrunk before the field access.
2636 // Need to install our own structured exception handler since native code may
2637 // install its own.
2638 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2639   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2640   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2641     address pc = (address) exceptionInfo->ContextRecord->Eip;
2642     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2643     if (addr != (address)-1) {
2644       return Handle_Exception(exceptionInfo, addr);
2645     }
2646   }
2647   return EXCEPTION_CONTINUE_SEARCH;
2648 }
2649 
2650 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2651   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2652                                                      jobject obj,           \
2653                                                      jfieldID fieldID) {    \
2654     __try {                                                                 \
2655       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2656                                                                  obj,       \
2657                                                                  fieldID);  \
2658     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2659                                               _exception_info())) {         \
2660     }                                                                       \
2661     return 0;                                                               \
2662   }
2663 
2664 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2665 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2666 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2667 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2668 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2669 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2670 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2671 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2672 
2673 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2674   switch (type) {
2675   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2676   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2677   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2678   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2679   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2680   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2681   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2682   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2683   default:        ShouldNotReachHere();
2684   }
2685   return (address)-1;
2686 }
2687 #endif
2688 
2689 // Virtual Memory
2690 
2691 int os::vm_page_size() { return os::win32::vm_page_size(); }
2692 int os::vm_allocation_granularity() {
2693   return os::win32::vm_allocation_granularity();
2694 }
2695 
2696 // Windows large page support is available on Windows 2003. In order to use
2697 // large page memory, the administrator must first assign additional privilege
2698 // to the user:
2699 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2700 //   + select Local Policies -> User Rights Assignment
2701 //   + double click "Lock pages in memory", add users and/or groups
2702 //   + reboot
2703 // Note the above steps are needed for administrator as well, as administrators
2704 // by default do not have the privilege to lock pages in memory.
2705 //
2706 // Note about Windows 2003: although the API supports committing large page
2707 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2708 // scenario, I found through experiment it only uses large page if the entire
2709 // memory region is reserved and committed in a single VirtualAlloc() call.
2710 // This makes Windows large page support more or less like Solaris ISM, in
2711 // that the entire heap must be committed upfront. This probably will change
2712 // in the future, if so the code below needs to be revisited.
2713 
2714 #ifndef MEM_LARGE_PAGES
2715   #define MEM_LARGE_PAGES 0x20000000
2716 #endif
2717 
2718 static HANDLE    _hProcess;
2719 static HANDLE    _hToken;
2720 
2721 // Container for NUMA node list info
2722 class NUMANodeListHolder {
2723  private:
2724   int *_numa_used_node_list;  // allocated below
2725   int _numa_used_node_count;
2726 
2727   void free_node_list() {
2728     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2729   }
2730 
2731  public:
2732   NUMANodeListHolder() {
2733     _numa_used_node_count = 0;
2734     _numa_used_node_list = NULL;
2735     // do rest of initialization in build routine (after function pointers are set up)
2736   }
2737 
2738   ~NUMANodeListHolder() {
2739     free_node_list();
2740   }
2741 
2742   bool build() {
2743     DWORD_PTR proc_aff_mask;
2744     DWORD_PTR sys_aff_mask;
2745     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2746     ULONG highest_node_number;
2747     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2748     free_node_list();
2749     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2750     for (unsigned int i = 0; i <= highest_node_number; i++) {
2751       ULONGLONG proc_mask_numa_node;
2752       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2753       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2754         _numa_used_node_list[_numa_used_node_count++] = i;
2755       }
2756     }
2757     return (_numa_used_node_count > 1);
2758   }
2759 
2760   int get_count() { return _numa_used_node_count; }
2761   int get_node_list_entry(int n) {
2762     // for indexes out of range, returns -1
2763     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2764   }
2765 
2766 } numa_node_list_holder;
2767 
2768 
2769 
2770 static size_t _large_page_size = 0;
2771 
2772 static bool request_lock_memory_privilege() {
2773   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2774                           os::current_process_id());
2775 
2776   LUID luid;
2777   if (_hProcess != NULL &&
2778       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2779       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2780 
2781     TOKEN_PRIVILEGES tp;
2782     tp.PrivilegeCount = 1;
2783     tp.Privileges[0].Luid = luid;
2784     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2785 
2786     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2787     // privilege. Check GetLastError() too. See MSDN document.
2788     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2789         (GetLastError() == ERROR_SUCCESS)) {
2790       return true;
2791     }
2792   }
2793 
2794   return false;
2795 }
2796 
2797 static void cleanup_after_large_page_init() {
2798   if (_hProcess) CloseHandle(_hProcess);
2799   _hProcess = NULL;
2800   if (_hToken) CloseHandle(_hToken);
2801   _hToken = NULL;
2802 }
2803 
2804 static bool numa_interleaving_init() {
2805   bool success = false;
2806   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2807 
2808   // print a warning if UseNUMAInterleaving flag is specified on command line
2809   bool warn_on_failure = use_numa_interleaving_specified;
2810 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2811 
2812   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2813   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2814   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2815 
2816   if (numa_node_list_holder.build()) {
2817     if (log_is_enabled(Debug, os, cpu)) {
2818       Log(os, cpu) log;
2819       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2820       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2821         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2822       }
2823     }
2824     success = true;
2825   } else {
2826     WARN("Process does not cover multiple NUMA nodes.");
2827   }
2828   if (!success) {
2829     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2830   }
2831   return success;
2832 #undef WARN
2833 }
2834 
2835 // this routine is used whenever we need to reserve a contiguous VA range
2836 // but we need to make separate VirtualAlloc calls for each piece of the range
2837 // Reasons for doing this:
2838 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2839 //  * UseNUMAInterleaving requires a separate node for each piece
2840 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2841                                          DWORD prot,
2842                                          bool should_inject_error = false) {
2843   char * p_buf;
2844   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2845   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2846   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2847 
2848   // first reserve enough address space in advance since we want to be
2849   // able to break a single contiguous virtual address range into multiple
2850   // large page commits but WS2003 does not allow reserving large page space
2851   // so we just use 4K pages for reserve, this gives us a legal contiguous
2852   // address space. then we will deallocate that reservation, and re alloc
2853   // using large pages
2854   const size_t size_of_reserve = bytes + chunk_size;
2855   if (bytes > size_of_reserve) {
2856     // Overflowed.
2857     return NULL;
2858   }
2859   p_buf = (char *) VirtualAlloc(addr,
2860                                 size_of_reserve,  // size of Reserve
2861                                 MEM_RESERVE,
2862                                 PAGE_READWRITE);
2863   // If reservation failed, return NULL
2864   if (p_buf == NULL) return NULL;
2865   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2866   os::release_memory(p_buf, bytes + chunk_size);
2867 
2868   // we still need to round up to a page boundary (in case we are using large pages)
2869   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2870   // instead we handle this in the bytes_to_rq computation below
2871   p_buf = align_up(p_buf, page_size);
2872 
2873   // now go through and allocate one chunk at a time until all bytes are
2874   // allocated
2875   size_t  bytes_remaining = bytes;
2876   // An overflow of align_up() would have been caught above
2877   // in the calculation of size_of_reserve.
2878   char * next_alloc_addr = p_buf;
2879   HANDLE hProc = GetCurrentProcess();
2880 
2881 #ifdef ASSERT
2882   // Variable for the failure injection
2883   int ran_num = os::random();
2884   size_t fail_after = ran_num % bytes;
2885 #endif
2886 
2887   int count=0;
2888   while (bytes_remaining) {
2889     // select bytes_to_rq to get to the next chunk_size boundary
2890 
2891     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2892     // Note allocate and commit
2893     char * p_new;
2894 
2895 #ifdef ASSERT
2896     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2897 #else
2898     const bool inject_error_now = false;
2899 #endif
2900 
2901     if (inject_error_now) {
2902       p_new = NULL;
2903     } else {
2904       if (!UseNUMAInterleaving) {
2905         p_new = (char *) VirtualAlloc(next_alloc_addr,
2906                                       bytes_to_rq,
2907                                       flags,
2908                                       prot);
2909       } else {
2910         // get the next node to use from the used_node_list
2911         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2912         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2913         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2914       }
2915     }
2916 
2917     if (p_new == NULL) {
2918       // Free any allocated pages
2919       if (next_alloc_addr > p_buf) {
2920         // Some memory was committed so release it.
2921         size_t bytes_to_release = bytes - bytes_remaining;
2922         // NMT has yet to record any individual blocks, so it
2923         // need to create a dummy 'reserve' record to match
2924         // the release.
2925         MemTracker::record_virtual_memory_reserve((address)p_buf,
2926                                                   bytes_to_release, CALLER_PC);
2927         os::release_memory(p_buf, bytes_to_release);
2928       }
2929 #ifdef ASSERT
2930       if (should_inject_error) {
2931         log_develop_debug(pagesize)("Reserving pages individually failed.");
2932       }
2933 #endif
2934       return NULL;
2935     }
2936 
2937     bytes_remaining -= bytes_to_rq;
2938     next_alloc_addr += bytes_to_rq;
2939     count++;
2940   }
2941   // Although the memory is allocated individually, it is returned as one.
2942   // NMT records it as one block.
2943   if ((flags & MEM_COMMIT) != 0) {
2944     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
2945   } else {
2946     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
2947   }
2948 
2949   // made it this far, success
2950   return p_buf;
2951 }
2952 
2953 
2954 
2955 void os::large_page_init() {
2956   if (!UseLargePages) return;
2957 
2958   // print a warning if any large page related flag is specified on command line
2959   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2960                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2961   bool success = false;
2962 
2963 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2964   if (request_lock_memory_privilege()) {
2965     size_t s = GetLargePageMinimum();
2966     if (s) {
2967 #if defined(IA32) || defined(AMD64)
2968       if (s > 4*M || LargePageSizeInBytes > 4*M) {
2969         WARN("JVM cannot use large pages bigger than 4mb.");
2970       } else {
2971 #endif
2972         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
2973           _large_page_size = LargePageSizeInBytes;
2974         } else {
2975           _large_page_size = s;
2976         }
2977         success = true;
2978 #if defined(IA32) || defined(AMD64)
2979       }
2980 #endif
2981     } else {
2982       WARN("Large page is not supported by the processor.");
2983     }
2984   } else {
2985     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
2986   }
2987 #undef WARN
2988 
2989   const size_t default_page_size = (size_t) vm_page_size();
2990   if (success && _large_page_size > default_page_size) {
2991     _page_sizes[0] = _large_page_size;
2992     _page_sizes[1] = default_page_size;
2993     _page_sizes[2] = 0;
2994   }
2995 
2996   cleanup_after_large_page_init();
2997   UseLargePages = success;
2998 }
2999 
3000 int os::create_file_for_heap(const char* dir) {
3001 
3002   const char name_template[] = "/jvmheap.XXXXXX";
3003 
3004   size_t fullname_len = strlen(dir) + strlen(name_template);
3005   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3006   if (fullname == NULL) {
3007     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3008     return -1;
3009   }
3010   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3011   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3012 
3013   os::native_path(fullname);
3014 
3015   char *path = _mktemp(fullname);
3016   if (path == NULL) {
3017     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3018     os::free(fullname);
3019     return -1;
3020   }
3021 
3022   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3023 
3024   os::free(fullname);
3025   if (fd < 0) {
3026     warning("Problem opening file for heap (%s)", os::strerror(errno));
3027     return -1;
3028   }
3029   return fd;
3030 }
3031 
3032 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3033 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3034   assert(fd != -1, "File descriptor is not valid");
3035 
3036   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3037 #ifdef _LP64
3038   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3039     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3040 #else
3041   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3042     0, (DWORD)size, NULL);
3043 #endif
3044   if (fileMapping == NULL) {
3045     if (GetLastError() == ERROR_DISK_FULL) {
3046       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3047     }
3048     else {
3049       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3050     }
3051 
3052     return NULL;
3053   }
3054 
3055   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3056 
3057   CloseHandle(fileMapping);
3058 
3059   return (char*)addr;
3060 }
3061 
3062 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3063   assert(fd != -1, "File descriptor is not valid");
3064   assert(base != NULL, "Base address cannot be NULL");
3065 
3066   release_memory(base, size);
3067   return map_memory_to_file(base, size, fd);
3068 }
3069 
3070 // On win32, one cannot release just a part of reserved memory, it's an
3071 // all or nothing deal.  When we split a reservation, we must break the
3072 // reservation into two reservations.
3073 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3074                                   bool realloc) {
3075   if (size > 0) {
3076     release_memory(base, size);
3077     if (realloc) {
3078       reserve_memory(split, base);
3079     }
3080     if (size != split) {
3081       reserve_memory(size - split, base + split);
3082     }
3083   }
3084 }
3085 
3086 // Multiple threads can race in this code but it's not possible to unmap small sections of
3087 // virtual space to get requested alignment, like posix-like os's.
3088 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3089 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3090   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3091          "Alignment must be a multiple of allocation granularity (page size)");
3092   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3093 
3094   size_t extra_size = size + alignment;
3095   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3096 
3097   char* aligned_base = NULL;
3098 
3099   do {
3100     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3101     if (extra_base == NULL) {
3102       return NULL;
3103     }
3104     // Do manual alignment
3105     aligned_base = align_up(extra_base, alignment);
3106 
3107     if (file_desc != -1) {
3108       os::unmap_memory(extra_base, extra_size);
3109     } else {
3110       os::release_memory(extra_base, extra_size);
3111     }
3112 
3113     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3114 
3115   } while (aligned_base == NULL);
3116 
3117   return aligned_base;
3118 }
3119 
3120 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3121   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3122          "reserve alignment");
3123   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3124   char* res;
3125   // note that if UseLargePages is on, all the areas that require interleaving
3126   // will go thru reserve_memory_special rather than thru here.
3127   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3128   if (!use_individual) {
3129     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3130   } else {
3131     elapsedTimer reserveTimer;
3132     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3133     // in numa interleaving, we have to allocate pages individually
3134     // (well really chunks of NUMAInterleaveGranularity size)
3135     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3136     if (res == NULL) {
3137       warning("NUMA page allocation failed");
3138     }
3139     if (Verbose && PrintMiscellaneous) {
3140       reserveTimer.stop();
3141       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3142                     reserveTimer.milliseconds(), reserveTimer.ticks());
3143     }
3144   }
3145   assert(res == NULL || addr == NULL || addr == res,
3146          "Unexpected address from reserve.");
3147 
3148   return res;
3149 }
3150 
3151 // Reserve memory at an arbitrary address, only if that area is
3152 // available (and not reserved for something else).
3153 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3154   // Windows os::reserve_memory() fails of the requested address range is
3155   // not avilable.
3156   return reserve_memory(bytes, requested_addr);
3157 }
3158 
3159 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3160   assert(file_desc >= 0, "file_desc is not valid");
3161   return map_memory_to_file(requested_addr, bytes, file_desc);
3162 }
3163 
3164 size_t os::large_page_size() {
3165   return _large_page_size;
3166 }
3167 
3168 bool os::can_commit_large_page_memory() {
3169   // Windows only uses large page memory when the entire region is reserved
3170   // and committed in a single VirtualAlloc() call. This may change in the
3171   // future, but with Windows 2003 it's not possible to commit on demand.
3172   return false;
3173 }
3174 
3175 bool os::can_execute_large_page_memory() {
3176   return true;
3177 }
3178 
3179 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3180                                  bool exec) {
3181   assert(UseLargePages, "only for large pages");
3182 
3183   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3184     return NULL; // Fallback to small pages.
3185   }
3186 
3187   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3188   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3189 
3190   // with large pages, there are two cases where we need to use Individual Allocation
3191   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3192   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3193   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3194     log_debug(pagesize)("Reserving large pages individually.");
3195 
3196     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3197     if (p_buf == NULL) {
3198       // give an appropriate warning message
3199       if (UseNUMAInterleaving) {
3200         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3201       }
3202       if (UseLargePagesIndividualAllocation) {
3203         warning("Individually allocated large pages failed, "
3204                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3205       }
3206       return NULL;
3207     }
3208 
3209     return p_buf;
3210 
3211   } else {
3212     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3213 
3214     // normal policy just allocate it all at once
3215     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3216     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3217     if (res != NULL) {
3218       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3219     }
3220 
3221     return res;
3222   }
3223 }
3224 
3225 bool os::release_memory_special(char* base, size_t bytes) {
3226   assert(base != NULL, "Sanity check");
3227   return release_memory(base, bytes);
3228 }
3229 
3230 void os::print_statistics() {
3231 }
3232 
3233 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3234   int err = os::get_last_error();
3235   char buf[256];
3236   size_t buf_len = os::lasterror(buf, sizeof(buf));
3237   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3238           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3239           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3240 }
3241 
3242 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3243   if (bytes == 0) {
3244     // Don't bother the OS with noops.
3245     return true;
3246   }
3247   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3248   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3249   // Don't attempt to print anything if the OS call fails. We're
3250   // probably low on resources, so the print itself may cause crashes.
3251 
3252   // unless we have NUMAInterleaving enabled, the range of a commit
3253   // is always within a reserve covered by a single VirtualAlloc
3254   // in that case we can just do a single commit for the requested size
3255   if (!UseNUMAInterleaving) {
3256     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3257       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3258       return false;
3259     }
3260     if (exec) {
3261       DWORD oldprot;
3262       // Windows doc says to use VirtualProtect to get execute permissions
3263       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3264         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3265         return false;
3266       }
3267     }
3268     return true;
3269   } else {
3270 
3271     // when NUMAInterleaving is enabled, the commit might cover a range that
3272     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3273     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3274     // returns represents the number of bytes that can be committed in one step.
3275     size_t bytes_remaining = bytes;
3276     char * next_alloc_addr = addr;
3277     while (bytes_remaining > 0) {
3278       MEMORY_BASIC_INFORMATION alloc_info;
3279       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3280       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3281       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3282                        PAGE_READWRITE) == NULL) {
3283         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3284                                             exec);)
3285         return false;
3286       }
3287       if (exec) {
3288         DWORD oldprot;
3289         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3290                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3291           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3292                                               exec);)
3293           return false;
3294         }
3295       }
3296       bytes_remaining -= bytes_to_rq;
3297       next_alloc_addr += bytes_to_rq;
3298     }
3299   }
3300   // if we made it this far, return true
3301   return true;
3302 }
3303 
3304 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3305                           bool exec) {
3306   // alignment_hint is ignored on this OS
3307   return pd_commit_memory(addr, size, exec);
3308 }
3309 
3310 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3311                                   const char* mesg) {
3312   assert(mesg != NULL, "mesg must be specified");
3313   if (!pd_commit_memory(addr, size, exec)) {
3314     warn_fail_commit_memory(addr, size, exec);
3315     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3316   }
3317 }
3318 
3319 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3320                                   size_t alignment_hint, bool exec,
3321                                   const char* mesg) {
3322   // alignment_hint is ignored on this OS
3323   pd_commit_memory_or_exit(addr, size, exec, mesg);
3324 }
3325 
3326 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3327   if (bytes == 0) {
3328     // Don't bother the OS with noops.
3329     return true;
3330   }
3331   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3332   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3333   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3334 }
3335 
3336 bool os::pd_release_memory(char* addr, size_t bytes) {
3337   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3338 }
3339 
3340 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3341   return os::commit_memory(addr, size, !ExecMem);
3342 }
3343 
3344 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3345   return os::uncommit_memory(addr, size);
3346 }
3347 
3348 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3349   uint count = 0;
3350   bool ret = false;
3351   size_t bytes_remaining = bytes;
3352   char * next_protect_addr = addr;
3353 
3354   // Use VirtualQuery() to get the chunk size.
3355   while (bytes_remaining) {
3356     MEMORY_BASIC_INFORMATION alloc_info;
3357     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3358       return false;
3359     }
3360 
3361     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3362     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3363     // but we don't distinguish here as both cases are protected by same API.
3364     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3365     warning("Failed protecting pages individually for chunk #%u", count);
3366     if (!ret) {
3367       return false;
3368     }
3369 
3370     bytes_remaining -= bytes_to_protect;
3371     next_protect_addr += bytes_to_protect;
3372     count++;
3373   }
3374   return ret;
3375 }
3376 
3377 // Set protections specified
3378 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3379                         bool is_committed) {
3380   unsigned int p = 0;
3381   switch (prot) {
3382   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3383   case MEM_PROT_READ: p = PAGE_READONLY; break;
3384   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3385   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3386   default:
3387     ShouldNotReachHere();
3388   }
3389 
3390   DWORD old_status;
3391 
3392   // Strange enough, but on Win32 one can change protection only for committed
3393   // memory, not a big deal anyway, as bytes less or equal than 64K
3394   if (!is_committed) {
3395     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3396                           "cannot commit protection page");
3397   }
3398   // One cannot use os::guard_memory() here, as on Win32 guard page
3399   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3400   //
3401   // Pages in the region become guard pages. Any attempt to access a guard page
3402   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3403   // the guard page status. Guard pages thus act as a one-time access alarm.
3404   bool ret;
3405   if (UseNUMAInterleaving) {
3406     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3407     // so we must protect the chunks individually.
3408     ret = protect_pages_individually(addr, bytes, p, &old_status);
3409   } else {
3410     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3411   }
3412 #ifdef ASSERT
3413   if (!ret) {
3414     int err = os::get_last_error();
3415     char buf[256];
3416     size_t buf_len = os::lasterror(buf, sizeof(buf));
3417     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3418           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3419           buf_len != 0 ? buf : "<no_error_string>", err);
3420   }
3421 #endif
3422   return ret;
3423 }
3424 
3425 bool os::guard_memory(char* addr, size_t bytes) {
3426   DWORD old_status;
3427   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3428 }
3429 
3430 bool os::unguard_memory(char* addr, size_t bytes) {
3431   DWORD old_status;
3432   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3433 }
3434 
3435 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3436 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3437 void os::numa_make_global(char *addr, size_t bytes)    { }
3438 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3439 bool os::numa_topology_changed()                       { return false; }
3440 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3441 int os::numa_get_group_id()                            { return 0; }
3442 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3443   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3444     // Provide an answer for UMA systems
3445     ids[0] = 0;
3446     return 1;
3447   } else {
3448     // check for size bigger than actual groups_num
3449     size = MIN2(size, numa_get_groups_num());
3450     for (int i = 0; i < (int)size; i++) {
3451       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3452     }
3453     return size;
3454   }
3455 }
3456 
3457 bool os::get_page_info(char *start, page_info* info) {
3458   return false;
3459 }
3460 
3461 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3462                      page_info* page_found) {
3463   return end;
3464 }
3465 
3466 char* os::non_memory_address_word() {
3467   // Must never look like an address returned by reserve_memory,
3468   // even in its subfields (as defined by the CPU immediate fields,
3469   // if the CPU splits constants across multiple instructions).
3470   return (char*)-1;
3471 }
3472 
3473 #define MAX_ERROR_COUNT 100
3474 #define SYS_THREAD_ERROR 0xffffffffUL
3475 
3476 void os::pd_start_thread(Thread* thread) {
3477   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3478   // Returns previous suspend state:
3479   // 0:  Thread was not suspended
3480   // 1:  Thread is running now
3481   // >1: Thread is still suspended.
3482   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3483 }
3484 
3485 
3486 // Short sleep, direct OS call.
3487 //
3488 // ms = 0, means allow others (if any) to run.
3489 //
3490 void os::naked_short_sleep(jlong ms) {
3491   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3492   Sleep(ms);
3493 }
3494 
3495 // Windows does not provide sleep functionality with nanosecond resolution, so we
3496 // try to approximate this with spinning combined with yielding if another thread
3497 // is ready to run on the current processor.
3498 void os::naked_short_nanosleep(jlong ns) {
3499   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3500 
3501   int64_t start = os::javaTimeNanos();
3502   do {
3503     if (SwitchToThread() == 0) {
3504       // Nothing else is ready to run on this cpu, spin a little
3505       SpinPause();
3506     }
3507   } while (os::javaTimeNanos() - start < ns);
3508 }
3509 
3510 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3511 void os::infinite_sleep() {
3512   while (true) {    // sleep forever ...
3513     Sleep(100000);  // ... 100 seconds at a time
3514   }
3515 }
3516 
3517 typedef BOOL (WINAPI * STTSignature)(void);
3518 
3519 void os::naked_yield() {
3520   // Consider passing back the return value from SwitchToThread().
3521   SwitchToThread();
3522 }
3523 
3524 // Win32 only gives you access to seven real priorities at a time,
3525 // so we compress Java's ten down to seven.  It would be better
3526 // if we dynamically adjusted relative priorities.
3527 
3528 int os::java_to_os_priority[CriticalPriority + 1] = {
3529   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3530   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3531   THREAD_PRIORITY_LOWEST,                       // 2
3532   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3533   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3534   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3535   THREAD_PRIORITY_NORMAL,                       // 6
3536   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3537   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3538   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3539   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3540   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3541 };
3542 
3543 int prio_policy1[CriticalPriority + 1] = {
3544   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3545   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3546   THREAD_PRIORITY_LOWEST,                       // 2
3547   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3548   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3549   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3550   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3551   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3552   THREAD_PRIORITY_HIGHEST,                      // 8
3553   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3554   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3555   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3556 };
3557 
3558 static int prio_init() {
3559   // If ThreadPriorityPolicy is 1, switch tables
3560   if (ThreadPriorityPolicy == 1) {
3561     int i;
3562     for (i = 0; i < CriticalPriority + 1; i++) {
3563       os::java_to_os_priority[i] = prio_policy1[i];
3564     }
3565   }
3566   if (UseCriticalJavaThreadPriority) {
3567     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3568   }
3569   return 0;
3570 }
3571 
3572 OSReturn os::set_native_priority(Thread* thread, int priority) {
3573   if (!UseThreadPriorities) return OS_OK;
3574   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3575   return ret ? OS_OK : OS_ERR;
3576 }
3577 
3578 OSReturn os::get_native_priority(const Thread* const thread,
3579                                  int* priority_ptr) {
3580   if (!UseThreadPriorities) {
3581     *priority_ptr = java_to_os_priority[NormPriority];
3582     return OS_OK;
3583   }
3584   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3585   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3586     assert(false, "GetThreadPriority failed");
3587     return OS_ERR;
3588   }
3589   *priority_ptr = os_prio;
3590   return OS_OK;
3591 }
3592 
3593 // GetCurrentThreadId() returns DWORD
3594 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3595 
3596 static int _initial_pid = 0;
3597 
3598 int os::current_process_id() {
3599   return (_initial_pid ? _initial_pid : _getpid());
3600 }
3601 
3602 int    os::win32::_vm_page_size              = 0;
3603 int    os::win32::_vm_allocation_granularity = 0;
3604 int    os::win32::_processor_type            = 0;
3605 // Processor level is not available on non-NT systems, use vm_version instead
3606 int    os::win32::_processor_level           = 0;
3607 julong os::win32::_physical_memory           = 0;
3608 size_t os::win32::_default_stack_size        = 0;
3609 
3610 intx          os::win32::_os_thread_limit    = 0;
3611 volatile intx os::win32::_os_thread_count    = 0;
3612 
3613 bool   os::win32::_is_windows_server         = false;
3614 
3615 // 6573254
3616 // Currently, the bug is observed across all the supported Windows releases,
3617 // including the latest one (as of this writing - Windows Server 2012 R2)
3618 bool   os::win32::_has_exit_bug              = true;
3619 
3620 void os::win32::initialize_system_info() {
3621   SYSTEM_INFO si;
3622   GetSystemInfo(&si);
3623   _vm_page_size    = si.dwPageSize;
3624   _vm_allocation_granularity = si.dwAllocationGranularity;
3625   _processor_type  = si.dwProcessorType;
3626   _processor_level = si.wProcessorLevel;
3627   set_processor_count(si.dwNumberOfProcessors);
3628 
3629   MEMORYSTATUSEX ms;
3630   ms.dwLength = sizeof(ms);
3631 
3632   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3633   // dwMemoryLoad (% of memory in use)
3634   GlobalMemoryStatusEx(&ms);
3635   _physical_memory = ms.ullTotalPhys;
3636 
3637   if (FLAG_IS_DEFAULT(MaxRAM)) {
3638     // Adjust MaxRAM according to the maximum virtual address space available.
3639     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3640   }
3641 
3642   OSVERSIONINFOEX oi;
3643   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3644   GetVersionEx((OSVERSIONINFO*)&oi);
3645   switch (oi.dwPlatformId) {
3646   case VER_PLATFORM_WIN32_NT:
3647     {
3648       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3649       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3650           oi.wProductType == VER_NT_SERVER) {
3651         _is_windows_server = true;
3652       }
3653     }
3654     break;
3655   default: fatal("Unknown platform");
3656   }
3657 
3658   _default_stack_size = os::current_stack_size();
3659   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3660   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3661          "stack size not a multiple of page size");
3662 
3663   initialize_performance_counter();
3664 }
3665 
3666 
3667 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3668                                       int ebuflen) {
3669   char path[MAX_PATH];
3670   DWORD size;
3671   DWORD pathLen = (DWORD)sizeof(path);
3672   HINSTANCE result = NULL;
3673 
3674   // only allow library name without path component
3675   assert(strchr(name, '\\') == NULL, "path not allowed");
3676   assert(strchr(name, ':') == NULL, "path not allowed");
3677   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3678     jio_snprintf(ebuf, ebuflen,
3679                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3680     return NULL;
3681   }
3682 
3683   // search system directory
3684   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3685     if (size >= pathLen) {
3686       return NULL; // truncated
3687     }
3688     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3689       return NULL; // truncated
3690     }
3691     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3692       return result;
3693     }
3694   }
3695 
3696   // try Windows directory
3697   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3698     if (size >= pathLen) {
3699       return NULL; // truncated
3700     }
3701     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3702       return NULL; // truncated
3703     }
3704     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3705       return result;
3706     }
3707   }
3708 
3709   jio_snprintf(ebuf, ebuflen,
3710                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3711   return NULL;
3712 }
3713 
3714 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3715 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3716 
3717 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3718   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3719   return TRUE;
3720 }
3721 
3722 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3723   // Basic approach:
3724   //  - Each exiting thread registers its intent to exit and then does so.
3725   //  - A thread trying to terminate the process must wait for all
3726   //    threads currently exiting to complete their exit.
3727 
3728   if (os::win32::has_exit_bug()) {
3729     // The array holds handles of the threads that have started exiting by calling
3730     // _endthreadex().
3731     // Should be large enough to avoid blocking the exiting thread due to lack of
3732     // a free slot.
3733     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3734     static int handle_count = 0;
3735 
3736     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3737     static CRITICAL_SECTION crit_sect;
3738     static volatile DWORD process_exiting = 0;
3739     int i, j;
3740     DWORD res;
3741     HANDLE hproc, hthr;
3742 
3743     // We only attempt to register threads until a process exiting
3744     // thread manages to set the process_exiting flag. Any threads
3745     // that come through here after the process_exiting flag is set
3746     // are unregistered and will be caught in the SuspendThread()
3747     // infinite loop below.
3748     bool registered = false;
3749 
3750     // The first thread that reached this point, initializes the critical section.
3751     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3752       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3753     } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3754       if (what != EPT_THREAD) {
3755         // Atomically set process_exiting before the critical section
3756         // to increase the visibility between racing threads.
3757         Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0);
3758       }
3759       EnterCriticalSection(&crit_sect);
3760 
3761       if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3762         // Remove from the array those handles of the threads that have completed exiting.
3763         for (i = 0, j = 0; i < handle_count; ++i) {
3764           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3765           if (res == WAIT_TIMEOUT) {
3766             handles[j++] = handles[i];
3767           } else {
3768             if (res == WAIT_FAILED) {
3769               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3770                       GetLastError(), __FILE__, __LINE__);
3771             }
3772             // Don't keep the handle, if we failed waiting for it.
3773             CloseHandle(handles[i]);
3774           }
3775         }
3776 
3777         // If there's no free slot in the array of the kept handles, we'll have to
3778         // wait until at least one thread completes exiting.
3779         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3780           // Raise the priority of the oldest exiting thread to increase its chances
3781           // to complete sooner.
3782           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3783           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3784           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3785             i = (res - WAIT_OBJECT_0);
3786             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3787             for (; i < handle_count; ++i) {
3788               handles[i] = handles[i + 1];
3789             }
3790           } else {
3791             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3792                     (res == WAIT_FAILED ? "failed" : "timed out"),
3793                     GetLastError(), __FILE__, __LINE__);
3794             // Don't keep handles, if we failed waiting for them.
3795             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3796               CloseHandle(handles[i]);
3797             }
3798             handle_count = 0;
3799           }
3800         }
3801 
3802         // Store a duplicate of the current thread handle in the array of handles.
3803         hproc = GetCurrentProcess();
3804         hthr = GetCurrentThread();
3805         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3806                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3807           warning("DuplicateHandle failed (%u) in %s: %d\n",
3808                   GetLastError(), __FILE__, __LINE__);
3809 
3810           // We can't register this thread (no more handles) so this thread
3811           // may be racing with a thread that is calling exit(). If the thread
3812           // that is calling exit() has managed to set the process_exiting
3813           // flag, then this thread will be caught in the SuspendThread()
3814           // infinite loop below which closes that race. A small timing
3815           // window remains before the process_exiting flag is set, but it
3816           // is only exposed when we are out of handles.
3817         } else {
3818           ++handle_count;
3819           registered = true;
3820 
3821           // The current exiting thread has stored its handle in the array, and now
3822           // should leave the critical section before calling _endthreadex().
3823         }
3824 
3825       } else if (what != EPT_THREAD && handle_count > 0) {
3826         jlong start_time, finish_time, timeout_left;
3827         // Before ending the process, make sure all the threads that had called
3828         // _endthreadex() completed.
3829 
3830         // Set the priority level of the current thread to the same value as
3831         // the priority level of exiting threads.
3832         // This is to ensure it will be given a fair chance to execute if
3833         // the timeout expires.
3834         hthr = GetCurrentThread();
3835         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3836         start_time = os::javaTimeNanos();
3837         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3838         for (i = 0; ; ) {
3839           int portion_count = handle_count - i;
3840           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3841             portion_count = MAXIMUM_WAIT_OBJECTS;
3842           }
3843           for (j = 0; j < portion_count; ++j) {
3844             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3845           }
3846           timeout_left = (finish_time - start_time) / 1000000L;
3847           if (timeout_left < 0) {
3848             timeout_left = 0;
3849           }
3850           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3851           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3852             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3853                     (res == WAIT_FAILED ? "failed" : "timed out"),
3854                     GetLastError(), __FILE__, __LINE__);
3855             // Reset portion_count so we close the remaining
3856             // handles due to this error.
3857             portion_count = handle_count - i;
3858           }
3859           for (j = 0; j < portion_count; ++j) {
3860             CloseHandle(handles[i + j]);
3861           }
3862           if ((i += portion_count) >= handle_count) {
3863             break;
3864           }
3865           start_time = os::javaTimeNanos();
3866         }
3867         handle_count = 0;
3868       }
3869 
3870       LeaveCriticalSection(&crit_sect);
3871     }
3872 
3873     if (!registered &&
3874         OrderAccess::load_acquire(&process_exiting) != 0 &&
3875         process_exiting != GetCurrentThreadId()) {
3876       // Some other thread is about to call exit(), so we don't let
3877       // the current unregistered thread proceed to exit() or _endthreadex()
3878       while (true) {
3879         SuspendThread(GetCurrentThread());
3880         // Avoid busy-wait loop, if SuspendThread() failed.
3881         Sleep(EXIT_TIMEOUT);
3882       }
3883     }
3884   }
3885 
3886   // We are here if either
3887   // - there's no 'race at exit' bug on this OS release;
3888   // - initialization of the critical section failed (unlikely);
3889   // - the current thread has registered itself and left the critical section;
3890   // - the process-exiting thread has raised the flag and left the critical section.
3891   if (what == EPT_THREAD) {
3892     _endthreadex((unsigned)exit_code);
3893   } else if (what == EPT_PROCESS) {
3894     ::exit(exit_code);
3895   } else {
3896     _exit(exit_code);
3897   }
3898 
3899   // Should not reach here
3900   return exit_code;
3901 }
3902 
3903 #undef EXIT_TIMEOUT
3904 
3905 void os::win32::setmode_streams() {
3906   _setmode(_fileno(stdin), _O_BINARY);
3907   _setmode(_fileno(stdout), _O_BINARY);
3908   _setmode(_fileno(stderr), _O_BINARY);
3909 }
3910 
3911 
3912 bool os::is_debugger_attached() {
3913   return IsDebuggerPresent() ? true : false;
3914 }
3915 
3916 
3917 void os::wait_for_keypress_at_exit(void) {
3918   if (PauseAtExit) {
3919     fprintf(stderr, "Press any key to continue...\n");
3920     fgetc(stdin);
3921   }
3922 }
3923 
3924 
3925 bool os::message_box(const char* title, const char* message) {
3926   int result = MessageBox(NULL, message, title,
3927                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3928   return result == IDYES;
3929 }
3930 
3931 #ifndef PRODUCT
3932 #ifndef _WIN64
3933 // Helpers to check whether NX protection is enabled
3934 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3935   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3936       pex->ExceptionRecord->NumberParameters > 0 &&
3937       pex->ExceptionRecord->ExceptionInformation[0] ==
3938       EXCEPTION_INFO_EXEC_VIOLATION) {
3939     return EXCEPTION_EXECUTE_HANDLER;
3940   }
3941   return EXCEPTION_CONTINUE_SEARCH;
3942 }
3943 
3944 void nx_check_protection() {
3945   // If NX is enabled we'll get an exception calling into code on the stack
3946   char code[] = { (char)0xC3 }; // ret
3947   void *code_ptr = (void *)code;
3948   __try {
3949     __asm call code_ptr
3950   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3951     tty->print_raw_cr("NX protection detected.");
3952   }
3953 }
3954 #endif // _WIN64
3955 #endif // PRODUCT
3956 
3957 // This is called _before_ the global arguments have been parsed
3958 void os::init(void) {
3959   _initial_pid = _getpid();
3960 
3961   init_random(1234567);
3962 
3963   win32::initialize_system_info();
3964   win32::setmode_streams();
3965   init_page_sizes((size_t) win32::vm_page_size());
3966 
3967   // This may be overridden later when argument processing is done.
3968   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
3969 
3970   // Initialize main_process and main_thread
3971   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
3972   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3973                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3974     fatal("DuplicateHandle failed\n");
3975   }
3976   main_thread_id = (int) GetCurrentThreadId();
3977 
3978   // initialize fast thread access - only used for 32-bit
3979   win32::initialize_thread_ptr_offset();
3980 }
3981 
3982 // To install functions for atexit processing
3983 extern "C" {
3984   static void perfMemory_exit_helper() {
3985     perfMemory_exit();
3986   }
3987 }
3988 
3989 static jint initSock();
3990 
3991 // this is called _after_ the global arguments have been parsed
3992 jint os::init_2(void) {
3993 
3994   // This could be set any time but all platforms
3995   // have to set it the same so we have to mirror Solaris.
3996   DEBUG_ONLY(os::set_mutex_init_done();)
3997 
3998   // Setup Windows Exceptions
3999 
4000 #if INCLUDE_AOT
4001   // If AOT is enabled we need to install a vectored exception handler
4002   // in order to forward implicit exceptions from code in AOT
4003   // generated DLLs.  This is necessary since these DLLs are not
4004   // registered for structured exceptions like codecache methods are.
4005   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4006     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4007   }
4008 #endif
4009 
4010   // for debugging float code generation bugs
4011   if (ForceFloatExceptions) {
4012 #ifndef  _WIN64
4013     static long fp_control_word = 0;
4014     __asm { fstcw fp_control_word }
4015     // see Intel PPro Manual, Vol. 2, p 7-16
4016     const long precision = 0x20;
4017     const long underflow = 0x10;
4018     const long overflow  = 0x08;
4019     const long zero_div  = 0x04;
4020     const long denorm    = 0x02;
4021     const long invalid   = 0x01;
4022     fp_control_word |= invalid;
4023     __asm { fldcw fp_control_word }
4024 #endif
4025   }
4026 
4027   // If stack_commit_size is 0, windows will reserve the default size,
4028   // but only commit a small portion of it.
4029   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4030   size_t default_reserve_size = os::win32::default_stack_size();
4031   size_t actual_reserve_size = stack_commit_size;
4032   if (stack_commit_size < default_reserve_size) {
4033     // If stack_commit_size == 0, we want this too
4034     actual_reserve_size = default_reserve_size;
4035   }
4036 
4037   // Check minimum allowable stack size for thread creation and to initialize
4038   // the java system classes, including StackOverflowError - depends on page
4039   // size.  Add two 4K pages for compiler2 recursion in main thread.
4040   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4041   // class initialization depending on 32 or 64 bit VM.
4042   size_t min_stack_allowed =
4043             (size_t)(JavaThread::stack_guard_zone_size() +
4044                      JavaThread::stack_shadow_zone_size() +
4045                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4046 
4047   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4048 
4049   if (actual_reserve_size < min_stack_allowed) {
4050     tty->print_cr("\nThe Java thread stack size specified is too small. "
4051                   "Specify at least %dk",
4052                   min_stack_allowed / K);
4053     return JNI_ERR;
4054   }
4055 
4056   JavaThread::set_stack_size_at_create(stack_commit_size);
4057 
4058   // Calculate theoretical max. size of Threads to guard gainst artifical
4059   // out-of-memory situations, where all available address-space has been
4060   // reserved by thread stacks.
4061   assert(actual_reserve_size != 0, "Must have a stack");
4062 
4063   // Calculate the thread limit when we should start doing Virtual Memory
4064   // banging. Currently when the threads will have used all but 200Mb of space.
4065   //
4066   // TODO: consider performing a similar calculation for commit size instead
4067   // as reserve size, since on a 64-bit platform we'll run into that more
4068   // often than running out of virtual memory space.  We can use the
4069   // lower value of the two calculations as the os_thread_limit.
4070   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4071   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4072 
4073   // at exit methods are called in the reverse order of their registration.
4074   // there is no limit to the number of functions registered. atexit does
4075   // not set errno.
4076 
4077   if (PerfAllowAtExitRegistration) {
4078     // only register atexit functions if PerfAllowAtExitRegistration is set.
4079     // atexit functions can be delayed until process exit time, which
4080     // can be problematic for embedded VM situations. Embedded VMs should
4081     // call DestroyJavaVM() to assure that VM resources are released.
4082 
4083     // note: perfMemory_exit_helper atexit function may be removed in
4084     // the future if the appropriate cleanup code can be added to the
4085     // VM_Exit VMOperation's doit method.
4086     if (atexit(perfMemory_exit_helper) != 0) {
4087       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4088     }
4089   }
4090 
4091 #ifndef _WIN64
4092   // Print something if NX is enabled (win32 on AMD64)
4093   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4094 #endif
4095 
4096   // initialize thread priority policy
4097   prio_init();
4098 
4099   if (UseNUMA && !ForceNUMA) {
4100     UseNUMA = false; // We don't fully support this yet
4101   }
4102 
4103   if (UseNUMAInterleaving) {
4104     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4105     bool success = numa_interleaving_init();
4106     if (!success) UseNUMAInterleaving = false;
4107   }
4108 
4109   if (initSock() != JNI_OK) {
4110     return JNI_ERR;
4111   }
4112 
4113   SymbolEngine::recalc_search_path();
4114 
4115   // Initialize data for jdk.internal.misc.Signal
4116   if (!ReduceSignalUsage) {
4117     jdk_misc_signal_init();
4118   }
4119 
4120   return JNI_OK;
4121 }
4122 
4123 // Mark the polling page as unreadable
4124 void os::make_polling_page_unreadable(void) {
4125   DWORD old_status;
4126   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4127                       PAGE_NOACCESS, &old_status)) {
4128     fatal("Could not disable polling page");
4129   }
4130 }
4131 
4132 // Mark the polling page as readable
4133 void os::make_polling_page_readable(void) {
4134   DWORD old_status;
4135   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4136                       PAGE_READONLY, &old_status)) {
4137     fatal("Could not enable polling page");
4138   }
4139 }
4140 
4141 // combine the high and low DWORD into a ULONGLONG
4142 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4143   ULONGLONG value = high_word;
4144   value <<= sizeof(high_word) * 8;
4145   value |= low_word;
4146   return value;
4147 }
4148 
4149 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4150 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4151   ::memset((void*)sbuf, 0, sizeof(struct stat));
4152   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4153   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4154                                   file_data.ftLastWriteTime.dwLowDateTime);
4155   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4156                                   file_data.ftCreationTime.dwLowDateTime);
4157   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4158                                   file_data.ftLastAccessTime.dwLowDateTime);
4159   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4160     sbuf->st_mode |= S_IFDIR;
4161   } else {
4162     sbuf->st_mode |= S_IFREG;
4163   }
4164 }
4165 
4166 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4167 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4168 // additional_space is the number of additionally allocated wchars after the terminating L'\0'.
4169 // This is based on pathToNTPath() in io_util_md.cpp, but omits the optimizations for
4170 // short paths.
4171 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4172   if ((path == NULL) || (path[0] == '\0')) {
4173     err = ENOENT;
4174     return NULL;
4175   }
4176 
4177   size_t path_len = strlen(path);
4178   // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4179   char* buf = (char*) os::malloc(1 + MAX2((size_t) 3, path_len), mtInternal);
4180   wchar_t* result = NULL;
4181 
4182   if (buf == NULL) {
4183     err = ENOMEM;
4184   } else {
4185     memcpy(buf, path, path_len + 1);
4186     os::native_path(buf);
4187 
4188     wchar_t* prefix;
4189     int prefix_off = 0;
4190     bool is_abs = true;
4191     bool needs_fullpath = true;
4192 
4193     if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4194       prefix = L"\\\\?\\";
4195     } else if (buf[0] == '\\' && buf[1] == '\\') {
4196       if (buf[2] == '?' && buf[3] == '\\') {
4197         prefix = L"";
4198         needs_fullpath = false;
4199       } else {
4200         prefix = L"\\\\?\\UNC";
4201         prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4202       }
4203     } else {
4204       is_abs = false;
4205       prefix = L"\\\\?\\";
4206     }
4207 
4208     size_t buf_len = strlen(buf);
4209     size_t prefix_len = wcslen(prefix);
4210     size_t full_path_size = is_abs ? 1 + buf_len : JVM_MAXPATHLEN;
4211     size_t result_size = prefix_len + full_path_size - prefix_off;
4212     result = (wchar_t*) os::malloc(sizeof(wchar_t) * (additional_space + result_size), mtInternal);
4213 
4214     if (result == NULL) {
4215       err = ENOMEM;
4216     } else {
4217       size_t converted_chars;
4218       wchar_t* path_start = result + prefix_len - prefix_off;
4219       err = ::mbstowcs_s(&converted_chars, path_start, buf_len + 1, buf, buf_len);
4220 
4221       if ((err == ERROR_SUCCESS) && needs_fullpath) {
4222         wchar_t* tmp = (wchar_t*) os::malloc(sizeof(wchar_t) * full_path_size, mtInternal);
4223 
4224         if (tmp == NULL) {
4225           err = ENOMEM;
4226         } else {
4227           if (!_wfullpath(tmp, path_start, full_path_size)) {
4228             err = ENOENT;
4229           } else {
4230             ::memcpy(path_start, tmp, (1 + wcslen(tmp)) * sizeof(wchar_t));
4231           }
4232 
4233           os::free(tmp);
4234         }
4235       }
4236 
4237       memcpy(result, prefix, sizeof(wchar_t) * prefix_len);
4238 
4239       // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4240       size_t result_len = wcslen(result);
4241 
4242       if (result[result_len - 1] == L'\\') {
4243         if (!(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4244           result[result_len - 1] = L'\0';
4245         }
4246       }
4247     }
4248   }
4249 
4250   os::free(buf);
4251 
4252   if (err != ERROR_SUCCESS) {
4253     os::free(result);
4254     result = NULL;
4255   }
4256 
4257   return result;
4258 }
4259 
4260 int os::stat(const char *path, struct stat *sbuf) {
4261   errno_t err;
4262   wchar_t* wide_path = wide_abs_unc_path(path, err);
4263 
4264   if (wide_path == NULL) {
4265     errno = err;
4266     return -1;
4267   }
4268 
4269   WIN32_FILE_ATTRIBUTE_DATA file_data;;
4270   BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4271   os::free(wide_path);
4272 
4273   if (!bret) {
4274     errno = ::GetLastError();
4275     return -1;
4276   }
4277 
4278   file_attribute_data_to_stat(sbuf, file_data);
4279   return 0;
4280 }
4281 
4282 static HANDLE create_read_only_file_handle(const char* file) {
4283   errno_t err;
4284   wchar_t* wide_path = wide_abs_unc_path(file, err);
4285 
4286   if (wide_path == NULL) {
4287     errno = err;
4288     return INVALID_HANDLE_VALUE;
4289   }
4290 
4291   HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4292                                 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4293   os::free(wide_path);
4294 
4295   return handle;
4296 }
4297 
4298 bool os::same_files(const char* file1, const char* file2) {
4299 
4300   if (file1 == NULL && file2 == NULL) {
4301     return true;
4302   }
4303 
4304   if (file1 == NULL || file2 == NULL) {
4305     return false;
4306   }
4307 
4308   if (strcmp(file1, file2) == 0) {
4309     return true;
4310   }
4311 
4312   HANDLE handle1 = create_read_only_file_handle(file1);
4313   HANDLE handle2 = create_read_only_file_handle(file2);
4314   bool result = false;
4315 
4316   // if we could open both paths...
4317   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4318     BY_HANDLE_FILE_INFORMATION fileInfo1;
4319     BY_HANDLE_FILE_INFORMATION fileInfo2;
4320     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4321       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4322       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4323       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4324         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4325         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4326         result = true;
4327       }
4328     }
4329   }
4330 
4331   //free the handles
4332   if (handle1 != INVALID_HANDLE_VALUE) {
4333     ::CloseHandle(handle1);
4334   }
4335 
4336   if (handle2 != INVALID_HANDLE_VALUE) {
4337     ::CloseHandle(handle2);
4338   }
4339 
4340   return result;
4341 }
4342 
4343 #define FT2INT64(ft) \
4344   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4345 
4346 
4347 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4348 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4349 // of a thread.
4350 //
4351 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4352 // the fast estimate available on the platform.
4353 
4354 // current_thread_cpu_time() is not optimized for Windows yet
4355 jlong os::current_thread_cpu_time() {
4356   // return user + sys since the cost is the same
4357   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4358 }
4359 
4360 jlong os::thread_cpu_time(Thread* thread) {
4361   // consistent with what current_thread_cpu_time() returns.
4362   return os::thread_cpu_time(thread, true /* user+sys */);
4363 }
4364 
4365 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4366   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4367 }
4368 
4369 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4370   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4371   // If this function changes, os::is_thread_cpu_time_supported() should too
4372   FILETIME CreationTime;
4373   FILETIME ExitTime;
4374   FILETIME KernelTime;
4375   FILETIME UserTime;
4376 
4377   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4378                       &ExitTime, &KernelTime, &UserTime) == 0) {
4379     return -1;
4380   } else if (user_sys_cpu_time) {
4381     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4382   } else {
4383     return FT2INT64(UserTime) * 100;
4384   }
4385 }
4386 
4387 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4388   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4389   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4390   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4391   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4392 }
4393 
4394 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4395   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4396   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4397   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4398   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4399 }
4400 
4401 bool os::is_thread_cpu_time_supported() {
4402   // see os::thread_cpu_time
4403   FILETIME CreationTime;
4404   FILETIME ExitTime;
4405   FILETIME KernelTime;
4406   FILETIME UserTime;
4407 
4408   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4409                       &KernelTime, &UserTime) == 0) {
4410     return false;
4411   } else {
4412     return true;
4413   }
4414 }
4415 
4416 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4417 // It does have primitives (PDH API) to get CPU usage and run queue length.
4418 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4419 // If we wanted to implement loadavg on Windows, we have a few options:
4420 //
4421 // a) Query CPU usage and run queue length and "fake" an answer by
4422 //    returning the CPU usage if it's under 100%, and the run queue
4423 //    length otherwise.  It turns out that querying is pretty slow
4424 //    on Windows, on the order of 200 microseconds on a fast machine.
4425 //    Note that on the Windows the CPU usage value is the % usage
4426 //    since the last time the API was called (and the first call
4427 //    returns 100%), so we'd have to deal with that as well.
4428 //
4429 // b) Sample the "fake" answer using a sampling thread and store
4430 //    the answer in a global variable.  The call to loadavg would
4431 //    just return the value of the global, avoiding the slow query.
4432 //
4433 // c) Sample a better answer using exponential decay to smooth the
4434 //    value.  This is basically the algorithm used by UNIX kernels.
4435 //
4436 // Note that sampling thread starvation could affect both (b) and (c).
4437 int os::loadavg(double loadavg[], int nelem) {
4438   return -1;
4439 }
4440 
4441 
4442 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4443 bool os::dont_yield() {
4444   return DontYieldALot;
4445 }
4446 
4447 int os::open(const char *path, int oflag, int mode) {
4448   errno_t err;
4449   wchar_t* wide_path = wide_abs_unc_path(path, err);
4450 
4451   if (wide_path == NULL) {
4452     errno = err;
4453     return -1;
4454   }
4455   int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4456   os::free(wide_path);
4457 
4458   if (fd == -1) {
4459     errno = ::GetLastError();
4460   }
4461 
4462   return fd;
4463 }
4464 
4465 FILE* os::open(int fd, const char* mode) {
4466   return ::_fdopen(fd, mode);
4467 }
4468 
4469 // Is a (classpath) directory empty?
4470 bool os::dir_is_empty(const char* path) {
4471   errno_t err;
4472   wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4473 
4474   if (wide_path == NULL) {
4475     errno = err;
4476     return false;
4477   }
4478 
4479   // Make sure we end with "\\*"
4480   if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4481     wcscat(wide_path, L"*");
4482   } else {
4483     wcscat(wide_path, L"\\*");
4484   }
4485 
4486   WIN32_FIND_DATAW fd;
4487   HANDLE f = ::FindFirstFileW(wide_path, &fd);
4488   os::free(wide_path);
4489   bool is_empty = true;
4490 
4491   if (f != INVALID_HANDLE_VALUE) {
4492     while (is_empty && ::FindNextFileW(f, &fd)) {
4493       // An empty directory contains only the current directory file
4494       // and the previous directory file.
4495       if ((wcscmp(fd.cFileName, L".") != 0) &&
4496           (wcscmp(fd.cFileName, L"..") != 0)) {
4497         is_empty = false;
4498       }
4499     }
4500     FindClose(f);
4501   } else {
4502     errno = ::GetLastError();
4503   }
4504 
4505   return is_empty;
4506 }
4507 
4508 // create binary file, rewriting existing file if required
4509 int os::create_binary_file(const char* path, bool rewrite_existing) {
4510   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4511   if (!rewrite_existing) {
4512     oflags |= _O_EXCL;
4513   }
4514   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4515 }
4516 
4517 // return current position of file pointer
4518 jlong os::current_file_offset(int fd) {
4519   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4520 }
4521 
4522 // move file pointer to the specified offset
4523 jlong os::seek_to_file_offset(int fd, jlong offset) {
4524   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4525 }
4526 
4527 
4528 jlong os::lseek(int fd, jlong offset, int whence) {
4529   return (jlong) ::_lseeki64(fd, offset, whence);
4530 }
4531 
4532 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4533   OVERLAPPED ov;
4534   DWORD nread;
4535   BOOL result;
4536 
4537   ZeroMemory(&ov, sizeof(ov));
4538   ov.Offset = (DWORD)offset;
4539   ov.OffsetHigh = (DWORD)(offset >> 32);
4540 
4541   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4542 
4543   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4544 
4545   return result ? nread : 0;
4546 }
4547 
4548 
4549 // This method is a slightly reworked copy of JDK's sysNativePath
4550 // from src/windows/hpi/src/path_md.c
4551 
4552 // Convert a pathname to native format.  On win32, this involves forcing all
4553 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4554 // sometimes rejects '/') and removing redundant separators.  The input path is
4555 // assumed to have been converted into the character encoding used by the local
4556 // system.  Because this might be a double-byte encoding, care is taken to
4557 // treat double-byte lead characters correctly.
4558 //
4559 // This procedure modifies the given path in place, as the result is never
4560 // longer than the original.  There is no error return; this operation always
4561 // succeeds.
4562 char * os::native_path(char *path) {
4563   char *src = path, *dst = path, *end = path;
4564   char *colon = NULL;  // If a drive specifier is found, this will
4565                        // point to the colon following the drive letter
4566 
4567   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4568   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4569           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4570 
4571   // Check for leading separators
4572 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4573   while (isfilesep(*src)) {
4574     src++;
4575   }
4576 
4577   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4578     // Remove leading separators if followed by drive specifier.  This
4579     // hack is necessary to support file URLs containing drive
4580     // specifiers (e.g., "file://c:/path").  As a side effect,
4581     // "/c:/path" can be used as an alternative to "c:/path".
4582     *dst++ = *src++;
4583     colon = dst;
4584     *dst++ = ':';
4585     src++;
4586   } else {
4587     src = path;
4588     if (isfilesep(src[0]) && isfilesep(src[1])) {
4589       // UNC pathname: Retain first separator; leave src pointed at
4590       // second separator so that further separators will be collapsed
4591       // into the second separator.  The result will be a pathname
4592       // beginning with "\\\\" followed (most likely) by a host name.
4593       src = dst = path + 1;
4594       path[0] = '\\';     // Force first separator to '\\'
4595     }
4596   }
4597 
4598   end = dst;
4599 
4600   // Remove redundant separators from remainder of path, forcing all
4601   // separators to be '\\' rather than '/'. Also, single byte space
4602   // characters are removed from the end of the path because those
4603   // are not legal ending characters on this operating system.
4604   //
4605   while (*src != '\0') {
4606     if (isfilesep(*src)) {
4607       *dst++ = '\\'; src++;
4608       while (isfilesep(*src)) src++;
4609       if (*src == '\0') {
4610         // Check for trailing separator
4611         end = dst;
4612         if (colon == dst - 2) break;  // "z:\\"
4613         if (dst == path + 1) break;   // "\\"
4614         if (dst == path + 2 && isfilesep(path[0])) {
4615           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4616           // beginning of a UNC pathname.  Even though it is not, by
4617           // itself, a valid UNC pathname, we leave it as is in order
4618           // to be consistent with the path canonicalizer as well
4619           // as the win32 APIs, which treat this case as an invalid
4620           // UNC pathname rather than as an alias for the root
4621           // directory of the current drive.
4622           break;
4623         }
4624         end = --dst;  // Path does not denote a root directory, so
4625                       // remove trailing separator
4626         break;
4627       }
4628       end = dst;
4629     } else {
4630       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4631         *dst++ = *src++;
4632         if (*src) *dst++ = *src++;
4633         end = dst;
4634       } else {  // Copy a single-byte character
4635         char c = *src++;
4636         *dst++ = c;
4637         // Space is not a legal ending character
4638         if (c != ' ') end = dst;
4639       }
4640     }
4641   }
4642 
4643   *end = '\0';
4644 
4645   // For "z:", add "." to work around a bug in the C runtime library
4646   if (colon == dst - 1) {
4647     path[2] = '.';
4648     path[3] = '\0';
4649   }
4650 
4651   return path;
4652 }
4653 
4654 // This code is a copy of JDK's sysSetLength
4655 // from src/windows/hpi/src/sys_api_md.c
4656 
4657 int os::ftruncate(int fd, jlong length) {
4658   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4659   long high = (long)(length >> 32);
4660   DWORD ret;
4661 
4662   if (h == (HANDLE)(-1)) {
4663     return -1;
4664   }
4665 
4666   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4667   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4668     return -1;
4669   }
4670 
4671   if (::SetEndOfFile(h) == FALSE) {
4672     return -1;
4673   }
4674 
4675   return 0;
4676 }
4677 
4678 int os::get_fileno(FILE* fp) {
4679   return _fileno(fp);
4680 }
4681 
4682 // This code is a copy of JDK's sysSync
4683 // from src/windows/hpi/src/sys_api_md.c
4684 // except for the legacy workaround for a bug in Win 98
4685 
4686 int os::fsync(int fd) {
4687   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4688 
4689   if ((!::FlushFileBuffers(handle)) &&
4690       (GetLastError() != ERROR_ACCESS_DENIED)) {
4691     // from winerror.h
4692     return -1;
4693   }
4694   return 0;
4695 }
4696 
4697 static int nonSeekAvailable(int, long *);
4698 static int stdinAvailable(int, long *);
4699 
4700 // This code is a copy of JDK's sysAvailable
4701 // from src/windows/hpi/src/sys_api_md.c
4702 
4703 int os::available(int fd, jlong *bytes) {
4704   jlong cur, end;
4705   struct _stati64 stbuf64;
4706 
4707   if (::_fstati64(fd, &stbuf64) >= 0) {
4708     int mode = stbuf64.st_mode;
4709     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4710       int ret;
4711       long lpbytes;
4712       if (fd == 0) {
4713         ret = stdinAvailable(fd, &lpbytes);
4714       } else {
4715         ret = nonSeekAvailable(fd, &lpbytes);
4716       }
4717       (*bytes) = (jlong)(lpbytes);
4718       return ret;
4719     }
4720     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4721       return FALSE;
4722     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4723       return FALSE;
4724     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4725       return FALSE;
4726     }
4727     *bytes = end - cur;
4728     return TRUE;
4729   } else {
4730     return FALSE;
4731   }
4732 }
4733 
4734 void os::flockfile(FILE* fp) {
4735   _lock_file(fp);
4736 }
4737 
4738 void os::funlockfile(FILE* fp) {
4739   _unlock_file(fp);
4740 }
4741 
4742 // This code is a copy of JDK's nonSeekAvailable
4743 // from src/windows/hpi/src/sys_api_md.c
4744 
4745 static int nonSeekAvailable(int fd, long *pbytes) {
4746   // This is used for available on non-seekable devices
4747   // (like both named and anonymous pipes, such as pipes
4748   //  connected to an exec'd process).
4749   // Standard Input is a special case.
4750   HANDLE han;
4751 
4752   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4753     return FALSE;
4754   }
4755 
4756   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4757     // PeekNamedPipe fails when at EOF.  In that case we
4758     // simply make *pbytes = 0 which is consistent with the
4759     // behavior we get on Solaris when an fd is at EOF.
4760     // The only alternative is to raise an Exception,
4761     // which isn't really warranted.
4762     //
4763     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4764       return FALSE;
4765     }
4766     *pbytes = 0;
4767   }
4768   return TRUE;
4769 }
4770 
4771 #define MAX_INPUT_EVENTS 2000
4772 
4773 // This code is a copy of JDK's stdinAvailable
4774 // from src/windows/hpi/src/sys_api_md.c
4775 
4776 static int stdinAvailable(int fd, long *pbytes) {
4777   HANDLE han;
4778   DWORD numEventsRead = 0;  // Number of events read from buffer
4779   DWORD numEvents = 0;      // Number of events in buffer
4780   DWORD i = 0;              // Loop index
4781   DWORD curLength = 0;      // Position marker
4782   DWORD actualLength = 0;   // Number of bytes readable
4783   BOOL error = FALSE;       // Error holder
4784   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4785 
4786   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4787     return FALSE;
4788   }
4789 
4790   // Construct an array of input records in the console buffer
4791   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4792   if (error == 0) {
4793     return nonSeekAvailable(fd, pbytes);
4794   }
4795 
4796   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4797   if (numEvents > MAX_INPUT_EVENTS) {
4798     numEvents = MAX_INPUT_EVENTS;
4799   }
4800 
4801   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4802   if (lpBuffer == NULL) {
4803     return FALSE;
4804   }
4805 
4806   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4807   if (error == 0) {
4808     os::free(lpBuffer);
4809     return FALSE;
4810   }
4811 
4812   // Examine input records for the number of bytes available
4813   for (i=0; i<numEvents; i++) {
4814     if (lpBuffer[i].EventType == KEY_EVENT) {
4815 
4816       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4817                                       &(lpBuffer[i].Event);
4818       if (keyRecord->bKeyDown == TRUE) {
4819         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4820         curLength++;
4821         if (*keyPressed == '\r') {
4822           actualLength = curLength;
4823         }
4824       }
4825     }
4826   }
4827 
4828   if (lpBuffer != NULL) {
4829     os::free(lpBuffer);
4830   }
4831 
4832   *pbytes = (long) actualLength;
4833   return TRUE;
4834 }
4835 
4836 // Map a block of memory.
4837 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4838                         char *addr, size_t bytes, bool read_only,
4839                         bool allow_exec) {
4840   HANDLE hFile;
4841   char* base;
4842 
4843   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4844                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4845   if (hFile == NULL) {
4846     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4847     return NULL;
4848   }
4849 
4850   if (allow_exec) {
4851     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4852     // unless it comes from a PE image (which the shared archive is not.)
4853     // Even VirtualProtect refuses to give execute access to mapped memory
4854     // that was not previously executable.
4855     //
4856     // Instead, stick the executable region in anonymous memory.  Yuck.
4857     // Penalty is that ~4 pages will not be shareable - in the future
4858     // we might consider DLLizing the shared archive with a proper PE
4859     // header so that mapping executable + sharing is possible.
4860 
4861     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4862                                 PAGE_READWRITE);
4863     if (base == NULL) {
4864       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4865       CloseHandle(hFile);
4866       return NULL;
4867     }
4868 
4869     // Record virtual memory allocation
4870     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
4871 
4872     DWORD bytes_read;
4873     OVERLAPPED overlapped;
4874     overlapped.Offset = (DWORD)file_offset;
4875     overlapped.OffsetHigh = 0;
4876     overlapped.hEvent = NULL;
4877     // ReadFile guarantees that if the return value is true, the requested
4878     // number of bytes were read before returning.
4879     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4880     if (!res) {
4881       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4882       release_memory(base, bytes);
4883       CloseHandle(hFile);
4884       return NULL;
4885     }
4886   } else {
4887     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4888                                     NULL /* file_name */);
4889     if (hMap == NULL) {
4890       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4891       CloseHandle(hFile);
4892       return NULL;
4893     }
4894 
4895     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4896     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4897                                   (DWORD)bytes, addr);
4898     if (base == NULL) {
4899       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4900       CloseHandle(hMap);
4901       CloseHandle(hFile);
4902       return NULL;
4903     }
4904 
4905     if (CloseHandle(hMap) == 0) {
4906       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4907       CloseHandle(hFile);
4908       return base;
4909     }
4910   }
4911 
4912   if (allow_exec) {
4913     DWORD old_protect;
4914     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4915     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4916 
4917     if (!res) {
4918       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4919       // Don't consider this a hard error, on IA32 even if the
4920       // VirtualProtect fails, we should still be able to execute
4921       CloseHandle(hFile);
4922       return base;
4923     }
4924   }
4925 
4926   if (CloseHandle(hFile) == 0) {
4927     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4928     return base;
4929   }
4930 
4931   return base;
4932 }
4933 
4934 
4935 // Remap a block of memory.
4936 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4937                           char *addr, size_t bytes, bool read_only,
4938                           bool allow_exec) {
4939   // This OS does not allow existing memory maps to be remapped so we
4940   // would have to unmap the memory before we remap it.
4941 
4942   // Because there is a small window between unmapping memory and mapping
4943   // it in again with different protections, CDS archives are mapped RW
4944   // on windows, so this function isn't called.
4945   ShouldNotReachHere();
4946   return NULL;
4947 }
4948 
4949 
4950 // Unmap a block of memory.
4951 // Returns true=success, otherwise false.
4952 
4953 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4954   MEMORY_BASIC_INFORMATION mem_info;
4955   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4956     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
4957     return false;
4958   }
4959 
4960   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4961   // Instead, executable region was allocated using VirtualAlloc(). See
4962   // pd_map_memory() above.
4963   //
4964   // The following flags should match the 'exec_access' flages used for
4965   // VirtualProtect() in pd_map_memory().
4966   if (mem_info.Protect == PAGE_EXECUTE_READ ||
4967       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4968     return pd_release_memory(addr, bytes);
4969   }
4970 
4971   BOOL result = UnmapViewOfFile(addr);
4972   if (result == 0) {
4973     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
4974     return false;
4975   }
4976   return true;
4977 }
4978 
4979 void os::pause() {
4980   char filename[MAX_PATH];
4981   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4982     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4983   } else {
4984     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4985   }
4986 
4987   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4988   if (fd != -1) {
4989     struct stat buf;
4990     ::close(fd);
4991     while (::stat(filename, &buf) == 0) {
4992       Sleep(100);
4993     }
4994   } else {
4995     jio_fprintf(stderr,
4996                 "Could not open pause file '%s', continuing immediately.\n", filename);
4997   }
4998 }
4999 
5000 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5001 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5002 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5003 
5004 os::ThreadCrashProtection::ThreadCrashProtection() {
5005 }
5006 
5007 // See the caveats for this class in os_windows.hpp
5008 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5009 // into this method and returns false. If no OS EXCEPTION was raised, returns
5010 // true.
5011 // The callback is supposed to provide the method that should be protected.
5012 //
5013 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5014 
5015   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5016 
5017   _protected_thread = Thread::current_or_null();
5018   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5019 
5020   bool success = true;
5021   __try {
5022     _crash_protection = this;
5023     cb.call();
5024   } __except(EXCEPTION_EXECUTE_HANDLER) {
5025     // only for protection, nothing to do
5026     success = false;
5027   }
5028   _crash_protection = NULL;
5029   _protected_thread = NULL;
5030   Thread::muxRelease(&_crash_mux);
5031   return success;
5032 }
5033 
5034 
5035 class HighResolutionInterval : public CHeapObj<mtThread> {
5036   // The default timer resolution seems to be 10 milliseconds.
5037   // (Where is this written down?)
5038   // If someone wants to sleep for only a fraction of the default,
5039   // then we set the timer resolution down to 1 millisecond for
5040   // the duration of their interval.
5041   // We carefully set the resolution back, since otherwise we
5042   // seem to incur an overhead (3%?) that we don't need.
5043   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5044   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5045   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5046   // timeBeginPeriod() if the relative error exceeded some threshold.
5047   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5048   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5049   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5050   // resolution timers running.
5051  private:
5052   jlong resolution;
5053  public:
5054   HighResolutionInterval(jlong ms) {
5055     resolution = ms % 10L;
5056     if (resolution != 0) {
5057       MMRESULT result = timeBeginPeriod(1L);
5058     }
5059   }
5060   ~HighResolutionInterval() {
5061     if (resolution != 0) {
5062       MMRESULT result = timeEndPeriod(1L);
5063     }
5064     resolution = 0L;
5065   }
5066 };
5067 
5068 // An Event wraps a win32 "CreateEvent" kernel handle.
5069 //
5070 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5071 //
5072 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5073 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5074 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5075 //     In addition, an unpark() operation might fetch the handle field, but the
5076 //     event could recycle between the fetch and the SetEvent() operation.
5077 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5078 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5079 //     on an stale but recycled handle would be harmless, but in practice this might
5080 //     confuse other non-Sun code, so it's not a viable approach.
5081 //
5082 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5083 //     with the Event.  The event handle is never closed.  This could be construed
5084 //     as handle leakage, but only up to the maximum # of threads that have been extant
5085 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5086 //     permit a process to have hundreds of thousands of open handles.
5087 //
5088 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5089 //     and release unused handles.
5090 //
5091 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5092 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5093 //
5094 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5095 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5096 //
5097 // We use (2).
5098 //
5099 // TODO-FIXME:
5100 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5101 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5102 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5103 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5104 //     into a single win32 CreateEvent() handle.
5105 //
5106 // Assumption:
5107 //    Only one parker can exist on an event, which is why we allocate
5108 //    them per-thread. Multiple unparkers can coexist.
5109 //
5110 // _Event transitions in park()
5111 //   -1 => -1 : illegal
5112 //    1 =>  0 : pass - return immediately
5113 //    0 => -1 : block; then set _Event to 0 before returning
5114 //
5115 // _Event transitions in unpark()
5116 //    0 => 1 : just return
5117 //    1 => 1 : just return
5118 //   -1 => either 0 or 1; must signal target thread
5119 //         That is, we can safely transition _Event from -1 to either
5120 //         0 or 1.
5121 //
5122 // _Event serves as a restricted-range semaphore.
5123 //   -1 : thread is blocked, i.e. there is a waiter
5124 //    0 : neutral: thread is running or ready,
5125 //        could have been signaled after a wait started
5126 //    1 : signaled - thread is running or ready
5127 //
5128 // Another possible encoding of _Event would be with
5129 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5130 //
5131 
5132 int os::PlatformEvent::park(jlong Millis) {
5133   // Transitions for _Event:
5134   //   -1 => -1 : illegal
5135   //    1 =>  0 : pass - return immediately
5136   //    0 => -1 : block; then set _Event to 0 before returning
5137 
5138   guarantee(_ParkHandle != NULL , "Invariant");
5139   guarantee(Millis > 0          , "Invariant");
5140 
5141   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5142   // the initial park() operation.
5143   // Consider: use atomic decrement instead of CAS-loop
5144 
5145   int v;
5146   for (;;) {
5147     v = _Event;
5148     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5149   }
5150   guarantee((v == 0) || (v == 1), "invariant");
5151   if (v != 0) return OS_OK;
5152 
5153   // Do this the hard way by blocking ...
5154   // TODO: consider a brief spin here, gated on the success of recent
5155   // spin attempts by this thread.
5156   //
5157   // We decompose long timeouts into series of shorter timed waits.
5158   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5159   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5160   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5161   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5162   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5163   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5164   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5165   // for the already waited time.  This policy does not admit any new outcomes.
5166   // In the future, however, we might want to track the accumulated wait time and
5167   // adjust Millis accordingly if we encounter a spurious wakeup.
5168 
5169   const int MAXTIMEOUT = 0x10000000;
5170   DWORD rv = WAIT_TIMEOUT;
5171   while (_Event < 0 && Millis > 0) {
5172     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5173     if (Millis > MAXTIMEOUT) {
5174       prd = MAXTIMEOUT;
5175     }
5176     HighResolutionInterval *phri = NULL;
5177     if (!ForceTimeHighResolution) {
5178       phri = new HighResolutionInterval(prd);
5179     }
5180     rv = ::WaitForSingleObject(_ParkHandle, prd);
5181     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5182     if (rv == WAIT_TIMEOUT) {
5183       Millis -= prd;
5184     }
5185     delete phri; // if it is NULL, harmless
5186   }
5187   v = _Event;
5188   _Event = 0;
5189   // see comment at end of os::PlatformEvent::park() below:
5190   OrderAccess::fence();
5191   // If we encounter a nearly simultanous timeout expiry and unpark()
5192   // we return OS_OK indicating we awoke via unpark().
5193   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5194   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5195 }
5196 
5197 void os::PlatformEvent::park() {
5198   // Transitions for _Event:
5199   //   -1 => -1 : illegal
5200   //    1 =>  0 : pass - return immediately
5201   //    0 => -1 : block; then set _Event to 0 before returning
5202 
5203   guarantee(_ParkHandle != NULL, "Invariant");
5204   // Invariant: Only the thread associated with the Event/PlatformEvent
5205   // may call park().
5206   // Consider: use atomic decrement instead of CAS-loop
5207   int v;
5208   for (;;) {
5209     v = _Event;
5210     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5211   }
5212   guarantee((v == 0) || (v == 1), "invariant");
5213   if (v != 0) return;
5214 
5215   // Do this the hard way by blocking ...
5216   // TODO: consider a brief spin here, gated on the success of recent
5217   // spin attempts by this thread.
5218   while (_Event < 0) {
5219     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5220     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5221   }
5222 
5223   // Usually we'll find _Event == 0 at this point, but as
5224   // an optional optimization we clear it, just in case can
5225   // multiple unpark() operations drove _Event up to 1.
5226   _Event = 0;
5227   OrderAccess::fence();
5228   guarantee(_Event >= 0, "invariant");
5229 }
5230 
5231 void os::PlatformEvent::unpark() {
5232   guarantee(_ParkHandle != NULL, "Invariant");
5233 
5234   // Transitions for _Event:
5235   //    0 => 1 : just return
5236   //    1 => 1 : just return
5237   //   -1 => either 0 or 1; must signal target thread
5238   //         That is, we can safely transition _Event from -1 to either
5239   //         0 or 1.
5240   // See also: "Semaphores in Plan 9" by Mullender & Cox
5241   //
5242   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5243   // that it will take two back-to-back park() calls for the owning
5244   // thread to block. This has the benefit of forcing a spurious return
5245   // from the first park() call after an unpark() call which will help
5246   // shake out uses of park() and unpark() without condition variables.
5247 
5248   if (Atomic::xchg(1, &_Event) >= 0) return;
5249 
5250   ::SetEvent(_ParkHandle);
5251 }
5252 
5253 
5254 // JSR166
5255 // -------------------------------------------------------
5256 
5257 // The Windows implementation of Park is very straightforward: Basic
5258 // operations on Win32 Events turn out to have the right semantics to
5259 // use them directly. We opportunistically resuse the event inherited
5260 // from Monitor.
5261 
5262 void Parker::park(bool isAbsolute, jlong time) {
5263   guarantee(_ParkEvent != NULL, "invariant");
5264   // First, demultiplex/decode time arguments
5265   if (time < 0) { // don't wait
5266     return;
5267   } else if (time == 0 && !isAbsolute) {
5268     time = INFINITE;
5269   } else if (isAbsolute) {
5270     time -= os::javaTimeMillis(); // convert to relative time
5271     if (time <= 0) {  // already elapsed
5272       return;
5273     }
5274   } else { // relative
5275     time /= 1000000;  // Must coarsen from nanos to millis
5276     if (time == 0) {  // Wait for the minimal time unit if zero
5277       time = 1;
5278     }
5279   }
5280 
5281   JavaThread* thread = JavaThread::current();
5282 
5283   // Don't wait if interrupted or already triggered
5284   if (thread->is_interrupted(false) ||
5285       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5286     ResetEvent(_ParkEvent);
5287     return;
5288   } else {
5289     ThreadBlockInVM tbivm(thread);
5290     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5291     thread->set_suspend_equivalent();
5292 
5293     WaitForSingleObject(_ParkEvent, time);
5294     ResetEvent(_ParkEvent);
5295 
5296     // If externally suspended while waiting, re-suspend
5297     if (thread->handle_special_suspend_equivalent_condition()) {
5298       thread->java_suspend_self();
5299     }
5300   }
5301 }
5302 
5303 void Parker::unpark() {
5304   guarantee(_ParkEvent != NULL, "invariant");
5305   SetEvent(_ParkEvent);
5306 }
5307 
5308 // Platform Monitor implementation
5309 
5310 // Must already be locked
5311 int os::PlatformMonitor::wait(jlong millis) {
5312   assert(millis >= 0, "negative timeout");
5313   int ret = OS_TIMEOUT;
5314   int status = SleepConditionVariableCS(&_cond, &_mutex,
5315                                         millis == 0 ? INFINITE : millis);
5316   if (status != 0) {
5317     ret = OS_OK;
5318   }
5319   #ifndef PRODUCT
5320   else {
5321     DWORD err = GetLastError();
5322     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5323   }
5324   #endif
5325   return ret;
5326 }
5327 
5328 // Run the specified command in a separate process. Return its exit value,
5329 // or -1 on failure (e.g. can't create a new process).
5330 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5331   STARTUPINFO si;
5332   PROCESS_INFORMATION pi;
5333   DWORD exit_code;
5334 
5335   char * cmd_string;
5336   const char * cmd_prefix = "cmd /C ";
5337   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5338   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5339   if (cmd_string == NULL) {
5340     return -1;
5341   }
5342   cmd_string[0] = '\0';
5343   strcat(cmd_string, cmd_prefix);
5344   strcat(cmd_string, cmd);
5345 
5346   // now replace all '\n' with '&'
5347   char * substring = cmd_string;
5348   while ((substring = strchr(substring, '\n')) != NULL) {
5349     substring[0] = '&';
5350     substring++;
5351   }
5352   memset(&si, 0, sizeof(si));
5353   si.cb = sizeof(si);
5354   memset(&pi, 0, sizeof(pi));
5355   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5356                             cmd_string,    // command line
5357                             NULL,   // process security attribute
5358                             NULL,   // thread security attribute
5359                             TRUE,   // inherits system handles
5360                             0,      // no creation flags
5361                             NULL,   // use parent's environment block
5362                             NULL,   // use parent's starting directory
5363                             &si,    // (in) startup information
5364                             &pi);   // (out) process information
5365 
5366   if (rslt) {
5367     // Wait until child process exits.
5368     WaitForSingleObject(pi.hProcess, INFINITE);
5369 
5370     GetExitCodeProcess(pi.hProcess, &exit_code);
5371 
5372     // Close process and thread handles.
5373     CloseHandle(pi.hProcess);
5374     CloseHandle(pi.hThread);
5375   } else {
5376     exit_code = -1;
5377   }
5378 
5379   FREE_C_HEAP_ARRAY(char, cmd_string);
5380   return (int)exit_code;
5381 }
5382 
5383 bool os::find(address addr, outputStream* st) {
5384   int offset = -1;
5385   bool result = false;
5386   char buf[256];
5387   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5388     st->print(PTR_FORMAT " ", addr);
5389     if (strlen(buf) < sizeof(buf) - 1) {
5390       char* p = strrchr(buf, '\\');
5391       if (p) {
5392         st->print("%s", p + 1);
5393       } else {
5394         st->print("%s", buf);
5395       }
5396     } else {
5397         // The library name is probably truncated. Let's omit the library name.
5398         // See also JDK-8147512.
5399     }
5400     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5401       st->print("::%s + 0x%x", buf, offset);
5402     }
5403     st->cr();
5404     result = true;
5405   }
5406   return result;
5407 }
5408 
5409 static jint initSock() {
5410   WSADATA wsadata;
5411 
5412   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5413     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5414                 ::GetLastError());
5415     return JNI_ERR;
5416   }
5417   return JNI_OK;
5418 }
5419 
5420 struct hostent* os::get_host_by_name(char* name) {
5421   return (struct hostent*)gethostbyname(name);
5422 }
5423 
5424 int os::socket_close(int fd) {
5425   return ::closesocket(fd);
5426 }
5427 
5428 int os::socket(int domain, int type, int protocol) {
5429   return ::socket(domain, type, protocol);
5430 }
5431 
5432 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5433   return ::connect(fd, him, len);
5434 }
5435 
5436 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5437   return ::recv(fd, buf, (int)nBytes, flags);
5438 }
5439 
5440 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5441   return ::send(fd, buf, (int)nBytes, flags);
5442 }
5443 
5444 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5445   return ::send(fd, buf, (int)nBytes, flags);
5446 }
5447 
5448 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5449 #if defined(IA32)
5450   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5451 #elif defined (AMD64)
5452   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5453 #endif
5454 
5455 // returns true if thread could be suspended,
5456 // false otherwise
5457 static bool do_suspend(HANDLE* h) {
5458   if (h != NULL) {
5459     if (SuspendThread(*h) != ~0) {
5460       return true;
5461     }
5462   }
5463   return false;
5464 }
5465 
5466 // resume the thread
5467 // calling resume on an active thread is a no-op
5468 static void do_resume(HANDLE* h) {
5469   if (h != NULL) {
5470     ResumeThread(*h);
5471   }
5472 }
5473 
5474 // retrieve a suspend/resume context capable handle
5475 // from the tid. Caller validates handle return value.
5476 void get_thread_handle_for_extended_context(HANDLE* h,
5477                                             OSThread::thread_id_t tid) {
5478   if (h != NULL) {
5479     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5480   }
5481 }
5482 
5483 // Thread sampling implementation
5484 //
5485 void os::SuspendedThreadTask::internal_do_task() {
5486   CONTEXT    ctxt;
5487   HANDLE     h = NULL;
5488 
5489   // get context capable handle for thread
5490   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5491 
5492   // sanity
5493   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5494     return;
5495   }
5496 
5497   // suspend the thread
5498   if (do_suspend(&h)) {
5499     ctxt.ContextFlags = sampling_context_flags;
5500     // get thread context
5501     GetThreadContext(h, &ctxt);
5502     SuspendedThreadTaskContext context(_thread, &ctxt);
5503     // pass context to Thread Sampling impl
5504     do_task(context);
5505     // resume thread
5506     do_resume(&h);
5507   }
5508 
5509   // close handle
5510   CloseHandle(h);
5511 }
5512 
5513 bool os::start_debugging(char *buf, int buflen) {
5514   int len = (int)strlen(buf);
5515   char *p = &buf[len];
5516 
5517   jio_snprintf(p, buflen-len,
5518              "\n\n"
5519              "Do you want to debug the problem?\n\n"
5520              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5521              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5522              "Otherwise, select 'No' to abort...",
5523              os::current_process_id(), os::current_thread_id());
5524 
5525   bool yes = os::message_box("Unexpected Error", buf);
5526 
5527   if (yes) {
5528     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5529     // exception. If VM is running inside a debugger, the debugger will
5530     // catch the exception. Otherwise, the breakpoint exception will reach
5531     // the default windows exception handler, which can spawn a debugger and
5532     // automatically attach to the dying VM.
5533     os::breakpoint();
5534     yes = false;
5535   }
5536   return yes;
5537 }
5538 
5539 void* os::get_default_process_handle() {
5540   return (void*)GetModuleHandle(NULL);
5541 }
5542 
5543 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5544 // which is used to find statically linked in agents.
5545 // Additionally for windows, takes into account __stdcall names.
5546 // Parameters:
5547 //            sym_name: Symbol in library we are looking for
5548 //            lib_name: Name of library to look in, NULL for shared libs.
5549 //            is_absolute_path == true if lib_name is absolute path to agent
5550 //                                     such as "C:/a/b/L.dll"
5551 //            == false if only the base name of the library is passed in
5552 //               such as "L"
5553 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5554                                     bool is_absolute_path) {
5555   char *agent_entry_name;
5556   size_t len;
5557   size_t name_len;
5558   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5559   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5560   const char *start;
5561 
5562   if (lib_name != NULL) {
5563     len = name_len = strlen(lib_name);
5564     if (is_absolute_path) {
5565       // Need to strip path, prefix and suffix
5566       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5567         lib_name = ++start;
5568       } else {
5569         // Need to check for drive prefix
5570         if ((start = strchr(lib_name, ':')) != NULL) {
5571           lib_name = ++start;
5572         }
5573       }
5574       if (len <= (prefix_len + suffix_len)) {
5575         return NULL;
5576       }
5577       lib_name += prefix_len;
5578       name_len = strlen(lib_name) - suffix_len;
5579     }
5580   }
5581   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5582   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5583   if (agent_entry_name == NULL) {
5584     return NULL;
5585   }
5586   if (lib_name != NULL) {
5587     const char *p = strrchr(sym_name, '@');
5588     if (p != NULL && p != sym_name) {
5589       // sym_name == _Agent_OnLoad@XX
5590       strncpy(agent_entry_name, sym_name, (p - sym_name));
5591       agent_entry_name[(p-sym_name)] = '\0';
5592       // agent_entry_name == _Agent_OnLoad
5593       strcat(agent_entry_name, "_");
5594       strncat(agent_entry_name, lib_name, name_len);
5595       strcat(agent_entry_name, p);
5596       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5597     } else {
5598       strcpy(agent_entry_name, sym_name);
5599       strcat(agent_entry_name, "_");
5600       strncat(agent_entry_name, lib_name, name_len);
5601     }
5602   } else {
5603     strcpy(agent_entry_name, sym_name);
5604   }
5605   return agent_entry_name;
5606 }
5607 
5608 #ifndef PRODUCT
5609 
5610 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5611 // contiguous memory block at a particular address.
5612 // The test first tries to find a good approximate address to allocate at by using the same
5613 // method to allocate some memory at any address. The test then tries to allocate memory in
5614 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5615 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5616 // the previously allocated memory is available for allocation. The only actual failure
5617 // that is reported is when the test tries to allocate at a particular location but gets a
5618 // different valid one. A NULL return value at this point is not considered an error but may
5619 // be legitimate.
5620 void TestReserveMemorySpecial_test() {
5621   if (!UseLargePages) {
5622     return;
5623   }
5624   // save current value of globals
5625   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5626   bool old_use_numa_interleaving = UseNUMAInterleaving;
5627 
5628   // set globals to make sure we hit the correct code path
5629   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5630 
5631   // do an allocation at an address selected by the OS to get a good one.
5632   const size_t large_allocation_size = os::large_page_size() * 4;
5633   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5634   if (result == NULL) {
5635   } else {
5636     os::release_memory_special(result, large_allocation_size);
5637 
5638     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5639     // we managed to get it once.
5640     const size_t expected_allocation_size = os::large_page_size();
5641     char* expected_location = result + os::large_page_size();
5642     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5643     if (actual_location == NULL) {
5644     } else {
5645       // release memory
5646       os::release_memory_special(actual_location, expected_allocation_size);
5647       // only now check, after releasing any memory to avoid any leaks.
5648       assert(actual_location == expected_location,
5649              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5650              expected_location, expected_allocation_size, actual_location);
5651     }
5652   }
5653 
5654   // restore globals
5655   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5656   UseNUMAInterleaving = old_use_numa_interleaving;
5657 }
5658 #endif // PRODUCT
5659 
5660 /*
5661   All the defined signal names for Windows.
5662 
5663   NOTE that not all of these names are accepted by FindSignal!
5664 
5665   For various reasons some of these may be rejected at runtime.
5666 
5667   Here are the names currently accepted by a user of sun.misc.Signal with
5668   1.4.1 (ignoring potential interaction with use of chaining, etc):
5669 
5670      (LIST TBD)
5671 
5672 */
5673 int os::get_signal_number(const char* name) {
5674   static const struct {
5675     const char* name;
5676     int         number;
5677   } siglabels [] =
5678     // derived from version 6.0 VC98/include/signal.h
5679   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5680   "FPE",        SIGFPE,         // floating point exception
5681   "SEGV",       SIGSEGV,        // segment violation
5682   "INT",        SIGINT,         // interrupt
5683   "TERM",       SIGTERM,        // software term signal from kill
5684   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5685   "ILL",        SIGILL};        // illegal instruction
5686   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5687     if (strcmp(name, siglabels[i].name) == 0) {
5688       return siglabels[i].number;
5689     }
5690   }
5691   return -1;
5692 }
5693 
5694 // Fast current thread access
5695 
5696 int os::win32::_thread_ptr_offset = 0;
5697 
5698 static void call_wrapper_dummy() {}
5699 
5700 // We need to call the os_exception_wrapper once so that it sets
5701 // up the offset from FS of the thread pointer.
5702 void os::win32::initialize_thread_ptr_offset() {
5703   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5704                            NULL, NULL, NULL, NULL);
5705 }
5706 
5707 bool os::supports_map_sync() {
5708   return false;
5709 }