1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/allocation.inline.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "services/attachListener.hpp"
  67 #include "services/memTracker.hpp"
  68 #include "services/runtimeService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/growableArray.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 #include "symbolengine.hpp"
  77 #include "windbghelp.hpp"
  78 
  79 
  80 #ifdef _DEBUG
  81 #include <crtdbg.h>
  82 #endif
  83 
  84 
  85 #include <windows.h>
  86 #include <sys/types.h>
  87 #include <sys/stat.h>
  88 #include <sys/timeb.h>
  89 #include <objidl.h>
  90 #include <shlobj.h>
  91 
  92 #include <malloc.h>
  93 #include <signal.h>
  94 #include <direct.h>
  95 #include <errno.h>
  96 #include <fcntl.h>
  97 #include <io.h>
  98 #include <process.h>              // For _beginthreadex(), _endthreadex()
  99 #include <imagehlp.h>             // For os::dll_address_to_function_name
 100 // for enumerating dll libraries
 101 #include <vdmdbg.h>
 102 #include <psapi.h>
 103 #include <mmsystem.h>
 104 #include <winsock2.h>
 105 
 106 // for timer info max values which include all bits
 107 #define ALL_64_BITS CONST64(-1)
 108 
 109 // For DLL loading/load error detection
 110 // Values of PE COFF
 111 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 112 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 113 
 114 static HANDLE main_process;
 115 static HANDLE main_thread;
 116 static int    main_thread_id;
 117 
 118 static FILETIME process_creation_time;
 119 static FILETIME process_exit_time;
 120 static FILETIME process_user_time;
 121 static FILETIME process_kernel_time;
 122 
 123 #ifdef _M_AMD64
 124   #define __CPU__ amd64
 125 #else
 126   #define __CPU__ i486
 127 #endif
 128 
 129 #if INCLUDE_AOT
 130 PVOID  topLevelVectoredExceptionHandler = NULL;
 131 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 132 #endif
 133 
 134 // save DLL module handle, used by GetModuleFileName
 135 
 136 HINSTANCE vm_lib_handle;
 137 
 138 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 139   switch (reason) {
 140   case DLL_PROCESS_ATTACH:
 141     vm_lib_handle = hinst;
 142     if (ForceTimeHighResolution) {
 143       timeBeginPeriod(1L);
 144     }
 145     WindowsDbgHelp::pre_initialize();
 146     SymbolEngine::pre_initialize();
 147     break;
 148   case DLL_PROCESS_DETACH:
 149     if (ForceTimeHighResolution) {
 150       timeEndPeriod(1L);
 151     }
 152 #if INCLUDE_AOT
 153     if (topLevelVectoredExceptionHandler != NULL) {
 154       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 155       topLevelVectoredExceptionHandler = NULL;
 156     }
 157 #endif
 158     break;
 159   default:
 160     break;
 161   }
 162   return true;
 163 }
 164 
 165 static inline double fileTimeAsDouble(FILETIME* time) {
 166   const double high  = (double) ((unsigned int) ~0);
 167   const double split = 10000000.0;
 168   double result = (time->dwLowDateTime / split) +
 169                    time->dwHighDateTime * (high/split);
 170   return result;
 171 }
 172 
 173 // Implementation of os
 174 
 175 bool os::unsetenv(const char* name) {
 176   assert(name != NULL, "Null pointer");
 177   return (SetEnvironmentVariable(name, NULL) == TRUE);
 178 }
 179 
 180 // No setuid programs under Windows.
 181 bool os::have_special_privileges() {
 182   return false;
 183 }
 184 
 185 
 186 // This method is  a periodic task to check for misbehaving JNI applications
 187 // under CheckJNI, we can add any periodic checks here.
 188 // For Windows at the moment does nothing
 189 void os::run_periodic_checks() {
 190   return;
 191 }
 192 
 193 // previous UnhandledExceptionFilter, if there is one
 194 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 195 
 196 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 197 
 198 void os::init_system_properties_values() {
 199   // sysclasspath, java_home, dll_dir
 200   {
 201     char *home_path;
 202     char *dll_path;
 203     char *pslash;
 204     const char *bin = "\\bin";
 205     char home_dir[MAX_PATH + 1];
 206     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 207 
 208     if (alt_home_dir != NULL)  {
 209       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 210       home_dir[MAX_PATH] = '\0';
 211     } else {
 212       os::jvm_path(home_dir, sizeof(home_dir));
 213       // Found the full path to jvm.dll.
 214       // Now cut the path to <java_home>/jre if we can.
 215       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 216       pslash = strrchr(home_dir, '\\');
 217       if (pslash != NULL) {
 218         *pslash = '\0';                   // get rid of \{client|server}
 219         pslash = strrchr(home_dir, '\\');
 220         if (pslash != NULL) {
 221           *pslash = '\0';                 // get rid of \bin
 222         }
 223       }
 224     }
 225 
 226     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 227     strcpy(home_path, home_dir);
 228     Arguments::set_java_home(home_path);
 229     FREE_C_HEAP_ARRAY(char, home_path);
 230 
 231     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 232                                 mtInternal);
 233     strcpy(dll_path, home_dir);
 234     strcat(dll_path, bin);
 235     Arguments::set_dll_dir(dll_path);
 236     FREE_C_HEAP_ARRAY(char, dll_path);
 237 
 238     if (!set_boot_path('\\', ';')) {
 239       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 240     }
 241   }
 242 
 243 // library_path
 244 #define EXT_DIR "\\lib\\ext"
 245 #define BIN_DIR "\\bin"
 246 #define PACKAGE_DIR "\\Sun\\Java"
 247   {
 248     // Win32 library search order (See the documentation for LoadLibrary):
 249     //
 250     // 1. The directory from which application is loaded.
 251     // 2. The system wide Java Extensions directory (Java only)
 252     // 3. System directory (GetSystemDirectory)
 253     // 4. Windows directory (GetWindowsDirectory)
 254     // 5. The PATH environment variable
 255     // 6. The current directory
 256 
 257     char *library_path;
 258     char tmp[MAX_PATH];
 259     char *path_str = ::getenv("PATH");
 260 
 261     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 262                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 263 
 264     library_path[0] = '\0';
 265 
 266     GetModuleFileName(NULL, tmp, sizeof(tmp));
 267     *(strrchr(tmp, '\\')) = '\0';
 268     strcat(library_path, tmp);
 269 
 270     GetWindowsDirectory(tmp, sizeof(tmp));
 271     strcat(library_path, ";");
 272     strcat(library_path, tmp);
 273     strcat(library_path, PACKAGE_DIR BIN_DIR);
 274 
 275     GetSystemDirectory(tmp, sizeof(tmp));
 276     strcat(library_path, ";");
 277     strcat(library_path, tmp);
 278 
 279     GetWindowsDirectory(tmp, sizeof(tmp));
 280     strcat(library_path, ";");
 281     strcat(library_path, tmp);
 282 
 283     if (path_str) {
 284       strcat(library_path, ";");
 285       strcat(library_path, path_str);
 286     }
 287 
 288     strcat(library_path, ";.");
 289 
 290     Arguments::set_library_path(library_path);
 291     FREE_C_HEAP_ARRAY(char, library_path);
 292   }
 293 
 294   // Default extensions directory
 295   {
 296     char path[MAX_PATH];
 297     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 298     GetWindowsDirectory(path, MAX_PATH);
 299     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 300             path, PACKAGE_DIR, EXT_DIR);
 301     Arguments::set_ext_dirs(buf);
 302   }
 303   #undef EXT_DIR
 304   #undef BIN_DIR
 305   #undef PACKAGE_DIR
 306 
 307 #ifndef _WIN64
 308   // set our UnhandledExceptionFilter and save any previous one
 309   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 310 #endif
 311 
 312   // Done
 313   return;
 314 }
 315 
 316 void os::breakpoint() {
 317   DebugBreak();
 318 }
 319 
 320 // Invoked from the BREAKPOINT Macro
 321 extern "C" void breakpoint() {
 322   os::breakpoint();
 323 }
 324 
 325 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 326 // So far, this method is only used by Native Memory Tracking, which is
 327 // only supported on Windows XP or later.
 328 //
 329 int os::get_native_stack(address* stack, int frames, int toSkip) {
 330   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 331   for (int index = captured; index < frames; index ++) {
 332     stack[index] = NULL;
 333   }
 334   return captured;
 335 }
 336 
 337 
 338 // os::current_stack_base()
 339 //
 340 //   Returns the base of the stack, which is the stack's
 341 //   starting address.  This function must be called
 342 //   while running on the stack of the thread being queried.
 343 
 344 address os::current_stack_base() {
 345   MEMORY_BASIC_INFORMATION minfo;
 346   address stack_bottom;
 347   size_t stack_size;
 348 
 349   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 350   stack_bottom =  (address)minfo.AllocationBase;
 351   stack_size = minfo.RegionSize;
 352 
 353   // Add up the sizes of all the regions with the same
 354   // AllocationBase.
 355   while (1) {
 356     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 357     if (stack_bottom == (address)minfo.AllocationBase) {
 358       stack_size += minfo.RegionSize;
 359     } else {
 360       break;
 361     }
 362   }
 363   return stack_bottom + stack_size;
 364 }
 365 
 366 size_t os::current_stack_size() {
 367   size_t sz;
 368   MEMORY_BASIC_INFORMATION minfo;
 369   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 370   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 371   return sz;
 372 }
 373 
 374 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 375   MEMORY_BASIC_INFORMATION minfo;
 376   committed_start = NULL;
 377   committed_size = 0;
 378   address top = start + size;
 379   const address start_addr = start;
 380   while (start < top) {
 381     VirtualQuery(start, &minfo, sizeof(minfo));
 382     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 383       if (committed_start != NULL) {
 384         break;
 385       }
 386     } else {  // committed
 387       if (committed_start == NULL) {
 388         committed_start = start;
 389       }
 390       size_t offset = start - (address)minfo.BaseAddress;
 391       committed_size += minfo.RegionSize - offset;
 392     }
 393     start = (address)minfo.BaseAddress + minfo.RegionSize;
 394   }
 395 
 396   if (committed_start == NULL) {
 397     assert(committed_size == 0, "Sanity");
 398     return false;
 399   } else {
 400     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 401     // current region may go beyond the limit, trim to the limit
 402     committed_size = MIN2(committed_size, size_t(top - committed_start));
 403     return true;
 404   }
 405 }
 406 
 407 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 408   const struct tm* time_struct_ptr = localtime(clock);
 409   if (time_struct_ptr != NULL) {
 410     *res = *time_struct_ptr;
 411     return res;
 412   }
 413   return NULL;
 414 }
 415 
 416 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 417   const struct tm* time_struct_ptr = gmtime(clock);
 418   if (time_struct_ptr != NULL) {
 419     *res = *time_struct_ptr;
 420     return res;
 421   }
 422   return NULL;
 423 }
 424 
 425 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 426 
 427 // Thread start routine for all newly created threads
 428 static unsigned __stdcall thread_native_entry(Thread* thread) {
 429 
 430   thread->record_stack_base_and_size();
 431 
 432   // Try to randomize the cache line index of hot stack frames.
 433   // This helps when threads of the same stack traces evict each other's
 434   // cache lines. The threads can be either from the same JVM instance, or
 435   // from different JVM instances. The benefit is especially true for
 436   // processors with hyperthreading technology.
 437   static int counter = 0;
 438   int pid = os::current_process_id();
 439   _alloca(((pid ^ counter++) & 7) * 128);
 440 
 441   thread->initialize_thread_current();
 442 
 443   OSThread* osthr = thread->osthread();
 444   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 445 
 446   if (UseNUMA) {
 447     int lgrp_id = os::numa_get_group_id();
 448     if (lgrp_id != -1) {
 449       thread->set_lgrp_id(lgrp_id);
 450     }
 451   }
 452 
 453   // Diagnostic code to investigate JDK-6573254
 454   int res = 30115;  // non-java thread
 455   if (thread->is_Java_thread()) {
 456     res = 20115;    // java thread
 457   }
 458 
 459   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 460 
 461   // Install a win32 structured exception handler around every thread created
 462   // by VM, so VM can generate error dump when an exception occurred in non-
 463   // Java thread (e.g. VM thread).
 464   __try {
 465     thread->call_run();
 466   } __except(topLevelExceptionFilter(
 467                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 468     // Nothing to do.
 469   }
 470 
 471   // Note: at this point the thread object may already have deleted itself.
 472   // Do not dereference it from here on out.
 473 
 474   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 475 
 476   // One less thread is executing
 477   // When the VMThread gets here, the main thread may have already exited
 478   // which frees the CodeHeap containing the Atomic::add code
 479   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 480     Atomic::dec(&os::win32::_os_thread_count);
 481   }
 482 
 483   // Thread must not return from exit_process_or_thread(), but if it does,
 484   // let it proceed to exit normally
 485   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 486 }
 487 
 488 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 489                                   int thread_id) {
 490   // Allocate the OSThread object
 491   OSThread* osthread = new OSThread(NULL, NULL);
 492   if (osthread == NULL) return NULL;
 493 
 494   // Initialize the JDK library's interrupt event.
 495   // This should really be done when OSThread is constructed,
 496   // but there is no way for a constructor to report failure to
 497   // allocate the event.
 498   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 499   if (interrupt_event == NULL) {
 500     delete osthread;
 501     return NULL;
 502   }
 503   osthread->set_interrupt_event(interrupt_event);
 504 
 505   // Store info on the Win32 thread into the OSThread
 506   osthread->set_thread_handle(thread_handle);
 507   osthread->set_thread_id(thread_id);
 508 
 509   if (UseNUMA) {
 510     int lgrp_id = os::numa_get_group_id();
 511     if (lgrp_id != -1) {
 512       thread->set_lgrp_id(lgrp_id);
 513     }
 514   }
 515 
 516   // Initial thread state is INITIALIZED, not SUSPENDED
 517   osthread->set_state(INITIALIZED);
 518 
 519   return osthread;
 520 }
 521 
 522 
 523 bool os::create_attached_thread(JavaThread* thread) {
 524 #ifdef ASSERT
 525   thread->verify_not_published();
 526 #endif
 527   HANDLE thread_h;
 528   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 529                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 530     fatal("DuplicateHandle failed\n");
 531   }
 532   OSThread* osthread = create_os_thread(thread, thread_h,
 533                                         (int)current_thread_id());
 534   if (osthread == NULL) {
 535     return false;
 536   }
 537 
 538   // Initial thread state is RUNNABLE
 539   osthread->set_state(RUNNABLE);
 540 
 541   thread->set_osthread(osthread);
 542 
 543   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 544     os::current_thread_id());
 545 
 546   return true;
 547 }
 548 
 549 bool os::create_main_thread(JavaThread* thread) {
 550 #ifdef ASSERT
 551   thread->verify_not_published();
 552 #endif
 553   if (_starting_thread == NULL) {
 554     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 555     if (_starting_thread == NULL) {
 556       return false;
 557     }
 558   }
 559 
 560   // The primordial thread is runnable from the start)
 561   _starting_thread->set_state(RUNNABLE);
 562 
 563   thread->set_osthread(_starting_thread);
 564   return true;
 565 }
 566 
 567 // Helper function to trace _beginthreadex attributes,
 568 //  similar to os::Posix::describe_pthread_attr()
 569 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 570                                                size_t stacksize, unsigned initflag) {
 571   stringStream ss(buf, buflen);
 572   if (stacksize == 0) {
 573     ss.print("stacksize: default, ");
 574   } else {
 575     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 576   }
 577   ss.print("flags: ");
 578   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 579   #define ALL(X) \
 580     X(CREATE_SUSPENDED) \
 581     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 582   ALL(PRINT_FLAG)
 583   #undef ALL
 584   #undef PRINT_FLAG
 585   return buf;
 586 }
 587 
 588 // Allocate and initialize a new OSThread
 589 bool os::create_thread(Thread* thread, ThreadType thr_type,
 590                        size_t stack_size) {
 591   unsigned thread_id;
 592 
 593   // Allocate the OSThread object
 594   OSThread* osthread = new OSThread(NULL, NULL);
 595   if (osthread == NULL) {
 596     return false;
 597   }
 598 
 599   // Initialize the JDK library's interrupt event.
 600   // This should really be done when OSThread is constructed,
 601   // but there is no way for a constructor to report failure to
 602   // allocate the event.
 603   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 604   if (interrupt_event == NULL) {
 605     delete osthread;
 606     return false;
 607   }
 608   osthread->set_interrupt_event(interrupt_event);
 609   // We don't call set_interrupted(false) as it will trip the assert in there
 610   // as we are not operating on the current thread. We don't need to call it
 611   // because the initial state is already correct.
 612 
 613   thread->set_osthread(osthread);
 614 
 615   if (stack_size == 0) {
 616     switch (thr_type) {
 617     case os::java_thread:
 618       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 619       if (JavaThread::stack_size_at_create() > 0) {
 620         stack_size = JavaThread::stack_size_at_create();
 621       }
 622       break;
 623     case os::compiler_thread:
 624       if (CompilerThreadStackSize > 0) {
 625         stack_size = (size_t)(CompilerThreadStackSize * K);
 626         break;
 627       } // else fall through:
 628         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 629     case os::vm_thread:
 630     case os::pgc_thread:
 631     case os::cgc_thread:
 632     case os::watcher_thread:
 633       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 634       break;
 635     }
 636   }
 637 
 638   // Create the Win32 thread
 639   //
 640   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 641   // does not specify stack size. Instead, it specifies the size of
 642   // initially committed space. The stack size is determined by
 643   // PE header in the executable. If the committed "stack_size" is larger
 644   // than default value in the PE header, the stack is rounded up to the
 645   // nearest multiple of 1MB. For example if the launcher has default
 646   // stack size of 320k, specifying any size less than 320k does not
 647   // affect the actual stack size at all, it only affects the initial
 648   // commitment. On the other hand, specifying 'stack_size' larger than
 649   // default value may cause significant increase in memory usage, because
 650   // not only the stack space will be rounded up to MB, but also the
 651   // entire space is committed upfront.
 652   //
 653   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 654   // for CreateThread() that can treat 'stack_size' as stack size. However we
 655   // are not supposed to call CreateThread() directly according to MSDN
 656   // document because JVM uses C runtime library. The good news is that the
 657   // flag appears to work with _beginthredex() as well.
 658 
 659   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 660   HANDLE thread_handle =
 661     (HANDLE)_beginthreadex(NULL,
 662                            (unsigned)stack_size,
 663                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 664                            thread,
 665                            initflag,
 666                            &thread_id);
 667 
 668   char buf[64];
 669   if (thread_handle != NULL) {
 670     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 671       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 672   } else {
 673     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 674       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 675     // Log some OS information which might explain why creating the thread failed.
 676     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 677     LogStream st(Log(os, thread)::info());
 678     os::print_memory_info(&st);
 679   }
 680 
 681   if (thread_handle == NULL) {
 682     // Need to clean up stuff we've allocated so far
 683     thread->set_osthread(NULL);
 684     delete osthread;
 685     return false;
 686   }
 687 
 688   Atomic::inc(&os::win32::_os_thread_count);
 689 
 690   // Store info on the Win32 thread into the OSThread
 691   osthread->set_thread_handle(thread_handle);
 692   osthread->set_thread_id(thread_id);
 693 
 694   // Initial thread state is INITIALIZED, not SUSPENDED
 695   osthread->set_state(INITIALIZED);
 696 
 697   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 698   return true;
 699 }
 700 
 701 
 702 // Free Win32 resources related to the OSThread
 703 void os::free_thread(OSThread* osthread) {
 704   assert(osthread != NULL, "osthread not set");
 705 
 706   // We are told to free resources of the argument thread,
 707   // but we can only really operate on the current thread.
 708   assert(Thread::current()->osthread() == osthread,
 709          "os::free_thread but not current thread");
 710 
 711   CloseHandle(osthread->thread_handle());
 712   delete osthread;
 713 }
 714 
 715 static jlong first_filetime;
 716 static jlong initial_performance_count;
 717 static jlong performance_frequency;
 718 
 719 
 720 jlong as_long(LARGE_INTEGER x) {
 721   jlong result = 0; // initialization to avoid warning
 722   set_high(&result, x.HighPart);
 723   set_low(&result, x.LowPart);
 724   return result;
 725 }
 726 
 727 
 728 jlong os::elapsed_counter() {
 729   LARGE_INTEGER count;
 730   QueryPerformanceCounter(&count);
 731   return as_long(count) - initial_performance_count;
 732 }
 733 
 734 
 735 jlong os::elapsed_frequency() {
 736   return performance_frequency;
 737 }
 738 
 739 
 740 julong os::available_memory() {
 741   return win32::available_memory();
 742 }
 743 
 744 julong os::win32::available_memory() {
 745   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 746   // value if total memory is larger than 4GB
 747   MEMORYSTATUSEX ms;
 748   ms.dwLength = sizeof(ms);
 749   GlobalMemoryStatusEx(&ms);
 750 
 751   return (julong)ms.ullAvailPhys;
 752 }
 753 
 754 julong os::physical_memory() {
 755   return win32::physical_memory();
 756 }
 757 
 758 bool os::has_allocatable_memory_limit(julong* limit) {
 759   MEMORYSTATUSEX ms;
 760   ms.dwLength = sizeof(ms);
 761   GlobalMemoryStatusEx(&ms);
 762 #ifdef _LP64
 763   *limit = (julong)ms.ullAvailVirtual;
 764   return true;
 765 #else
 766   // Limit to 1400m because of the 2gb address space wall
 767   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 768   return true;
 769 #endif
 770 }
 771 
 772 int os::active_processor_count() {
 773   // User has overridden the number of active processors
 774   if (ActiveProcessorCount > 0) {
 775     log_trace(os)("active_processor_count: "
 776                   "active processor count set by user : %d",
 777                   ActiveProcessorCount);
 778     return ActiveProcessorCount;
 779   }
 780 
 781   DWORD_PTR lpProcessAffinityMask = 0;
 782   DWORD_PTR lpSystemAffinityMask = 0;
 783   int proc_count = processor_count();
 784   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 785       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 786     // Nof active processors is number of bits in process affinity mask
 787     int bitcount = 0;
 788     while (lpProcessAffinityMask != 0) {
 789       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 790       bitcount++;
 791     }
 792     return bitcount;
 793   } else {
 794     return proc_count;
 795   }
 796 }
 797 
 798 uint os::processor_id() {
 799   return (uint)GetCurrentProcessorNumber();
 800 }
 801 
 802 void os::set_native_thread_name(const char *name) {
 803 
 804   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 805   //
 806   // Note that unfortunately this only works if the process
 807   // is already attached to a debugger; debugger must observe
 808   // the exception below to show the correct name.
 809 
 810   // If there is no debugger attached skip raising the exception
 811   if (!IsDebuggerPresent()) {
 812     return;
 813   }
 814 
 815   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 816   struct {
 817     DWORD dwType;     // must be 0x1000
 818     LPCSTR szName;    // pointer to name (in user addr space)
 819     DWORD dwThreadID; // thread ID (-1=caller thread)
 820     DWORD dwFlags;    // reserved for future use, must be zero
 821   } info;
 822 
 823   info.dwType = 0x1000;
 824   info.szName = name;
 825   info.dwThreadID = -1;
 826   info.dwFlags = 0;
 827 
 828   __try {
 829     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 830   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 831 }
 832 
 833 bool os::bind_to_processor(uint processor_id) {
 834   // Not yet implemented.
 835   return false;
 836 }
 837 
 838 void os::win32::initialize_performance_counter() {
 839   LARGE_INTEGER count;
 840   QueryPerformanceFrequency(&count);
 841   performance_frequency = as_long(count);
 842   QueryPerformanceCounter(&count);
 843   initial_performance_count = as_long(count);
 844 }
 845 
 846 
 847 double os::elapsedTime() {
 848   return (double) elapsed_counter() / (double) elapsed_frequency();
 849 }
 850 
 851 
 852 // Windows format:
 853 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 854 // Java format:
 855 //   Java standards require the number of milliseconds since 1/1/1970
 856 
 857 // Constant offset - calculated using offset()
 858 static jlong  _offset   = 116444736000000000;
 859 // Fake time counter for reproducible results when debugging
 860 static jlong  fake_time = 0;
 861 
 862 #ifdef ASSERT
 863 // Just to be safe, recalculate the offset in debug mode
 864 static jlong _calculated_offset = 0;
 865 static int   _has_calculated_offset = 0;
 866 
 867 jlong offset() {
 868   if (_has_calculated_offset) return _calculated_offset;
 869   SYSTEMTIME java_origin;
 870   java_origin.wYear          = 1970;
 871   java_origin.wMonth         = 1;
 872   java_origin.wDayOfWeek     = 0; // ignored
 873   java_origin.wDay           = 1;
 874   java_origin.wHour          = 0;
 875   java_origin.wMinute        = 0;
 876   java_origin.wSecond        = 0;
 877   java_origin.wMilliseconds  = 0;
 878   FILETIME jot;
 879   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 880     fatal("Error = %d\nWindows error", GetLastError());
 881   }
 882   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 883   _has_calculated_offset = 1;
 884   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 885   return _calculated_offset;
 886 }
 887 #else
 888 jlong offset() {
 889   return _offset;
 890 }
 891 #endif
 892 
 893 jlong windows_to_java_time(FILETIME wt) {
 894   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 895   return (a - offset()) / 10000;
 896 }
 897 
 898 // Returns time ticks in (10th of micro seconds)
 899 jlong windows_to_time_ticks(FILETIME wt) {
 900   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 901   return (a - offset());
 902 }
 903 
 904 FILETIME java_to_windows_time(jlong l) {
 905   jlong a = (l * 10000) + offset();
 906   FILETIME result;
 907   result.dwHighDateTime = high(a);
 908   result.dwLowDateTime  = low(a);
 909   return result;
 910 }
 911 
 912 bool os::supports_vtime() { return true; }
 913 
 914 double os::elapsedVTime() {
 915   FILETIME created;
 916   FILETIME exited;
 917   FILETIME kernel;
 918   FILETIME user;
 919   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 920     // the resolution of windows_to_java_time() should be sufficient (ms)
 921     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 922   } else {
 923     return elapsedTime();
 924   }
 925 }
 926 
 927 jlong os::javaTimeMillis() {
 928   FILETIME wt;
 929   GetSystemTimeAsFileTime(&wt);
 930   return windows_to_java_time(wt);
 931 }
 932 
 933 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 934   FILETIME wt;
 935   GetSystemTimeAsFileTime(&wt);
 936   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 937   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 938   seconds = secs;
 939   nanos = jlong(ticks - (secs*10000000)) * 100;
 940 }
 941 
 942 jlong os::javaTimeNanos() {
 943     LARGE_INTEGER current_count;
 944     QueryPerformanceCounter(&current_count);
 945     double current = as_long(current_count);
 946     double freq = performance_frequency;
 947     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 948     return time;
 949 }
 950 
 951 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 952   jlong freq = performance_frequency;
 953   if (freq < NANOSECS_PER_SEC) {
 954     // the performance counter is 64 bits and we will
 955     // be multiplying it -- so no wrap in 64 bits
 956     info_ptr->max_value = ALL_64_BITS;
 957   } else if (freq > NANOSECS_PER_SEC) {
 958     // use the max value the counter can reach to
 959     // determine the max value which could be returned
 960     julong max_counter = (julong)ALL_64_BITS;
 961     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 962   } else {
 963     // the performance counter is 64 bits and we will
 964     // be using it directly -- so no wrap in 64 bits
 965     info_ptr->max_value = ALL_64_BITS;
 966   }
 967 
 968   // using a counter, so no skipping
 969   info_ptr->may_skip_backward = false;
 970   info_ptr->may_skip_forward = false;
 971 
 972   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 973 }
 974 
 975 char* os::local_time_string(char *buf, size_t buflen) {
 976   SYSTEMTIME st;
 977   GetLocalTime(&st);
 978   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 979                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 980   return buf;
 981 }
 982 
 983 bool os::getTimesSecs(double* process_real_time,
 984                       double* process_user_time,
 985                       double* process_system_time) {
 986   HANDLE h_process = GetCurrentProcess();
 987   FILETIME create_time, exit_time, kernel_time, user_time;
 988   BOOL result = GetProcessTimes(h_process,
 989                                 &create_time,
 990                                 &exit_time,
 991                                 &kernel_time,
 992                                 &user_time);
 993   if (result != 0) {
 994     FILETIME wt;
 995     GetSystemTimeAsFileTime(&wt);
 996     jlong rtc_millis = windows_to_java_time(wt);
 997     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 998     *process_user_time =
 999       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1000     *process_system_time =
1001       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1002     return true;
1003   } else {
1004     return false;
1005   }
1006 }
1007 
1008 void os::shutdown() {
1009   // allow PerfMemory to attempt cleanup of any persistent resources
1010   perfMemory_exit();
1011 
1012   // flush buffered output, finish log files
1013   ostream_abort();
1014 
1015   // Check for abort hook
1016   abort_hook_t abort_hook = Arguments::abort_hook();
1017   if (abort_hook != NULL) {
1018     abort_hook();
1019   }
1020 }
1021 
1022 
1023 static HANDLE dumpFile = NULL;
1024 
1025 // Check if dump file can be created.
1026 void os::check_dump_limit(char* buffer, size_t buffsz) {
1027   bool status = true;
1028   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1029     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1030     status = false;
1031   }
1032 
1033 #ifndef ASSERT
1034   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1035     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1036     status = false;
1037   }
1038 #endif
1039 
1040   if (status) {
1041     const char* cwd = get_current_directory(NULL, 0);
1042     int pid = current_process_id();
1043     if (cwd != NULL) {
1044       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1045     } else {
1046       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1047     }
1048 
1049     if (dumpFile == NULL &&
1050        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1051                  == INVALID_HANDLE_VALUE) {
1052       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1053       status = false;
1054     }
1055   }
1056   VMError::record_coredump_status(buffer, status);
1057 }
1058 
1059 void os::abort(bool dump_core, void* siginfo, const void* context) {
1060   EXCEPTION_POINTERS ep;
1061   MINIDUMP_EXCEPTION_INFORMATION mei;
1062   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1063 
1064   HANDLE hProcess = GetCurrentProcess();
1065   DWORD processId = GetCurrentProcessId();
1066   MINIDUMP_TYPE dumpType;
1067 
1068   shutdown();
1069   if (!dump_core || dumpFile == NULL) {
1070     if (dumpFile != NULL) {
1071       CloseHandle(dumpFile);
1072     }
1073     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1074   }
1075 
1076   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1077     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1078 
1079   if (siginfo != NULL && context != NULL) {
1080     ep.ContextRecord = (PCONTEXT) context;
1081     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1082 
1083     mei.ThreadId = GetCurrentThreadId();
1084     mei.ExceptionPointers = &ep;
1085     pmei = &mei;
1086   } else {
1087     pmei = NULL;
1088   }
1089 
1090   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1091   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1092   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1093       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1094     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1095   }
1096   CloseHandle(dumpFile);
1097   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1098 }
1099 
1100 // Die immediately, no exit hook, no abort hook, no cleanup.
1101 void os::die() {
1102   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1103 }
1104 
1105 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1106 //  * dirent_md.c       1.15 00/02/02
1107 //
1108 // The declarations for DIR and struct dirent are in jvm_win32.h.
1109 
1110 // Caller must have already run dirname through JVM_NativePath, which removes
1111 // duplicate slashes and converts all instances of '/' into '\\'.
1112 
1113 DIR * os::opendir(const char *dirname) {
1114   assert(dirname != NULL, "just checking");   // hotspot change
1115   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1116   DWORD fattr;                                // hotspot change
1117   char alt_dirname[4] = { 0, 0, 0, 0 };
1118 
1119   if (dirp == 0) {
1120     errno = ENOMEM;
1121     return 0;
1122   }
1123 
1124   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1125   // as a directory in FindFirstFile().  We detect this case here and
1126   // prepend the current drive name.
1127   //
1128   if (dirname[1] == '\0' && dirname[0] == '\\') {
1129     alt_dirname[0] = _getdrive() + 'A' - 1;
1130     alt_dirname[1] = ':';
1131     alt_dirname[2] = '\\';
1132     alt_dirname[3] = '\0';
1133     dirname = alt_dirname;
1134   }
1135 
1136   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1137   if (dirp->path == 0) {
1138     free(dirp);
1139     errno = ENOMEM;
1140     return 0;
1141   }
1142   strcpy(dirp->path, dirname);
1143 
1144   fattr = GetFileAttributes(dirp->path);
1145   if (fattr == 0xffffffff) {
1146     free(dirp->path);
1147     free(dirp);
1148     errno = ENOENT;
1149     return 0;
1150   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1151     free(dirp->path);
1152     free(dirp);
1153     errno = ENOTDIR;
1154     return 0;
1155   }
1156 
1157   // Append "*.*", or possibly "\\*.*", to path
1158   if (dirp->path[1] == ':' &&
1159       (dirp->path[2] == '\0' ||
1160       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1161     // No '\\' needed for cases like "Z:" or "Z:\"
1162     strcat(dirp->path, "*.*");
1163   } else {
1164     strcat(dirp->path, "\\*.*");
1165   }
1166 
1167   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1168   if (dirp->handle == INVALID_HANDLE_VALUE) {
1169     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1170       free(dirp->path);
1171       free(dirp);
1172       errno = EACCES;
1173       return 0;
1174     }
1175   }
1176   return dirp;
1177 }
1178 
1179 struct dirent * os::readdir(DIR *dirp) {
1180   assert(dirp != NULL, "just checking");      // hotspot change
1181   if (dirp->handle == INVALID_HANDLE_VALUE) {
1182     return NULL;
1183   }
1184 
1185   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1186 
1187   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1188     if (GetLastError() == ERROR_INVALID_HANDLE) {
1189       errno = EBADF;
1190       return NULL;
1191     }
1192     FindClose(dirp->handle);
1193     dirp->handle = INVALID_HANDLE_VALUE;
1194   }
1195 
1196   return &dirp->dirent;
1197 }
1198 
1199 int os::closedir(DIR *dirp) {
1200   assert(dirp != NULL, "just checking");      // hotspot change
1201   if (dirp->handle != INVALID_HANDLE_VALUE) {
1202     if (!FindClose(dirp->handle)) {
1203       errno = EBADF;
1204       return -1;
1205     }
1206     dirp->handle = INVALID_HANDLE_VALUE;
1207   }
1208   free(dirp->path);
1209   free(dirp);
1210   return 0;
1211 }
1212 
1213 // This must be hard coded because it's the system's temporary
1214 // directory not the java application's temp directory, ala java.io.tmpdir.
1215 const char* os::get_temp_directory() {
1216   static char path_buf[MAX_PATH];
1217   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1218     return path_buf;
1219   } else {
1220     path_buf[0] = '\0';
1221     return path_buf;
1222   }
1223 }
1224 
1225 // Needs to be in os specific directory because windows requires another
1226 // header file <direct.h>
1227 const char* os::get_current_directory(char *buf, size_t buflen) {
1228   int n = static_cast<int>(buflen);
1229   if (buflen > INT_MAX)  n = INT_MAX;
1230   return _getcwd(buf, n);
1231 }
1232 
1233 //-----------------------------------------------------------
1234 // Helper functions for fatal error handler
1235 #ifdef _WIN64
1236 // Helper routine which returns true if address in
1237 // within the NTDLL address space.
1238 //
1239 static bool _addr_in_ntdll(address addr) {
1240   HMODULE hmod;
1241   MODULEINFO minfo;
1242 
1243   hmod = GetModuleHandle("NTDLL.DLL");
1244   if (hmod == NULL) return false;
1245   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1246                                           &minfo, sizeof(MODULEINFO))) {
1247     return false;
1248   }
1249 
1250   if ((addr >= minfo.lpBaseOfDll) &&
1251       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1252     return true;
1253   } else {
1254     return false;
1255   }
1256 }
1257 #endif
1258 
1259 struct _modinfo {
1260   address addr;
1261   char*   full_path;   // point to a char buffer
1262   int     buflen;      // size of the buffer
1263   address base_addr;
1264 };
1265 
1266 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1267                                   address top_address, void * param) {
1268   struct _modinfo *pmod = (struct _modinfo *)param;
1269   if (!pmod) return -1;
1270 
1271   if (base_addr   <= pmod->addr &&
1272       top_address > pmod->addr) {
1273     // if a buffer is provided, copy path name to the buffer
1274     if (pmod->full_path) {
1275       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1276     }
1277     pmod->base_addr = base_addr;
1278     return 1;
1279   }
1280   return 0;
1281 }
1282 
1283 bool os::dll_address_to_library_name(address addr, char* buf,
1284                                      int buflen, int* offset) {
1285   // buf is not optional, but offset is optional
1286   assert(buf != NULL, "sanity check");
1287 
1288 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1289 //       return the full path to the DLL file, sometimes it returns path
1290 //       to the corresponding PDB file (debug info); sometimes it only
1291 //       returns partial path, which makes life painful.
1292 
1293   struct _modinfo mi;
1294   mi.addr      = addr;
1295   mi.full_path = buf;
1296   mi.buflen    = buflen;
1297   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1298     // buf already contains path name
1299     if (offset) *offset = addr - mi.base_addr;
1300     return true;
1301   }
1302 
1303   buf[0] = '\0';
1304   if (offset) *offset = -1;
1305   return false;
1306 }
1307 
1308 bool os::dll_address_to_function_name(address addr, char *buf,
1309                                       int buflen, int *offset,
1310                                       bool demangle) {
1311   // buf is not optional, but offset is optional
1312   assert(buf != NULL, "sanity check");
1313 
1314   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1315     return true;
1316   }
1317   if (offset != NULL)  *offset  = -1;
1318   buf[0] = '\0';
1319   return false;
1320 }
1321 
1322 // save the start and end address of jvm.dll into param[0] and param[1]
1323 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1324                            address top_address, void * param) {
1325   if (!param) return -1;
1326 
1327   if (base_addr   <= (address)_locate_jvm_dll &&
1328       top_address > (address)_locate_jvm_dll) {
1329     ((address*)param)[0] = base_addr;
1330     ((address*)param)[1] = top_address;
1331     return 1;
1332   }
1333   return 0;
1334 }
1335 
1336 address vm_lib_location[2];    // start and end address of jvm.dll
1337 
1338 // check if addr is inside jvm.dll
1339 bool os::address_is_in_vm(address addr) {
1340   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1341     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1342       assert(false, "Can't find jvm module.");
1343       return false;
1344     }
1345   }
1346 
1347   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1348 }
1349 
1350 // print module info; param is outputStream*
1351 static int _print_module(const char* fname, address base_address,
1352                          address top_address, void* param) {
1353   if (!param) return -1;
1354 
1355   outputStream* st = (outputStream*)param;
1356 
1357   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1358   return 0;
1359 }
1360 
1361 // Loads .dll/.so and
1362 // in case of error it checks if .dll/.so was built for the
1363 // same architecture as Hotspot is running on
1364 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1365   log_info(os)("attempting shared library load of %s", name);
1366 
1367   void * result = LoadLibrary(name);
1368   if (result != NULL) {
1369     Events::log(NULL, "Loaded shared library %s", name);
1370     // Recalculate pdb search path if a DLL was loaded successfully.
1371     SymbolEngine::recalc_search_path();
1372     log_info(os)("shared library load of %s was successful", name);
1373     return result;
1374   }
1375   DWORD errcode = GetLastError();
1376   // Read system error message into ebuf
1377   // It may or may not be overwritten below (in the for loop and just above)
1378   lasterror(ebuf, (size_t) ebuflen);
1379   ebuf[ebuflen - 1] = '\0';
1380   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1381   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1382 
1383   if (errcode == ERROR_MOD_NOT_FOUND) {
1384     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1385     ebuf[ebuflen - 1] = '\0';
1386     return NULL;
1387   }
1388 
1389   // Parsing dll below
1390   // If we can read dll-info and find that dll was built
1391   // for an architecture other than Hotspot is running in
1392   // - then print to buffer "DLL was built for a different architecture"
1393   // else call os::lasterror to obtain system error message
1394   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1395   if (fd < 0) {
1396     return NULL;
1397   }
1398 
1399   uint32_t signature_offset;
1400   uint16_t lib_arch = 0;
1401   bool failed_to_get_lib_arch =
1402     ( // Go to position 3c in the dll
1403      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1404      ||
1405      // Read location of signature
1406      (sizeof(signature_offset) !=
1407      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1408      ||
1409      // Go to COFF File Header in dll
1410      // that is located after "signature" (4 bytes long)
1411      (os::seek_to_file_offset(fd,
1412      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1413      ||
1414      // Read field that contains code of architecture
1415      // that dll was built for
1416      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1417     );
1418 
1419   ::close(fd);
1420   if (failed_to_get_lib_arch) {
1421     // file i/o error - report os::lasterror(...) msg
1422     return NULL;
1423   }
1424 
1425   typedef struct {
1426     uint16_t arch_code;
1427     char* arch_name;
1428   } arch_t;
1429 
1430   static const arch_t arch_array[] = {
1431     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1432     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1433   };
1434 #if (defined _M_AMD64)
1435   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1436 #elif (defined _M_IX86)
1437   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1438 #else
1439   #error Method os::dll_load requires that one of following \
1440          is defined :_M_AMD64 or _M_IX86
1441 #endif
1442 
1443 
1444   // Obtain a string for printf operation
1445   // lib_arch_str shall contain string what platform this .dll was built for
1446   // running_arch_str shall string contain what platform Hotspot was built for
1447   char *running_arch_str = NULL, *lib_arch_str = NULL;
1448   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1449     if (lib_arch == arch_array[i].arch_code) {
1450       lib_arch_str = arch_array[i].arch_name;
1451     }
1452     if (running_arch == arch_array[i].arch_code) {
1453       running_arch_str = arch_array[i].arch_name;
1454     }
1455   }
1456 
1457   assert(running_arch_str,
1458          "Didn't find running architecture code in arch_array");
1459 
1460   // If the architecture is right
1461   // but some other error took place - report os::lasterror(...) msg
1462   if (lib_arch == running_arch) {
1463     return NULL;
1464   }
1465 
1466   if (lib_arch_str != NULL) {
1467     ::_snprintf(ebuf, ebuflen - 1,
1468                 "Can't load %s-bit .dll on a %s-bit platform",
1469                 lib_arch_str, running_arch_str);
1470   } else {
1471     // don't know what architecture this dll was build for
1472     ::_snprintf(ebuf, ebuflen - 1,
1473                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1474                 lib_arch, running_arch_str);
1475   }
1476 
1477   return NULL;
1478 }
1479 
1480 void os::print_dll_info(outputStream *st) {
1481   st->print_cr("Dynamic libraries:");
1482   get_loaded_modules_info(_print_module, (void *)st);
1483 }
1484 
1485 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1486   HANDLE   hProcess;
1487 
1488 # define MAX_NUM_MODULES 128
1489   HMODULE     modules[MAX_NUM_MODULES];
1490   static char filename[MAX_PATH];
1491   int         result = 0;
1492 
1493   int pid = os::current_process_id();
1494   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1495                          FALSE, pid);
1496   if (hProcess == NULL) return 0;
1497 
1498   DWORD size_needed;
1499   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1500     CloseHandle(hProcess);
1501     return 0;
1502   }
1503 
1504   // number of modules that are currently loaded
1505   int num_modules = size_needed / sizeof(HMODULE);
1506 
1507   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1508     // Get Full pathname:
1509     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1510       filename[0] = '\0';
1511     }
1512 
1513     MODULEINFO modinfo;
1514     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1515       modinfo.lpBaseOfDll = NULL;
1516       modinfo.SizeOfImage = 0;
1517     }
1518 
1519     // Invoke callback function
1520     result = callback(filename, (address)modinfo.lpBaseOfDll,
1521                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1522     if (result) break;
1523   }
1524 
1525   CloseHandle(hProcess);
1526   return result;
1527 }
1528 
1529 bool os::get_host_name(char* buf, size_t buflen) {
1530   DWORD size = (DWORD)buflen;
1531   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1532 }
1533 
1534 void os::get_summary_os_info(char* buf, size_t buflen) {
1535   stringStream sst(buf, buflen);
1536   os::win32::print_windows_version(&sst);
1537   // chop off newline character
1538   char* nl = strchr(buf, '\n');
1539   if (nl != NULL) *nl = '\0';
1540 }
1541 
1542 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1543 #if _MSC_VER >= 1900
1544   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1545   int result = ::vsnprintf(buf, len, fmt, args);
1546   // If an encoding error occurred (result < 0) then it's not clear
1547   // whether the buffer is NUL terminated, so ensure it is.
1548   if ((result < 0) && (len > 0)) {
1549     buf[len - 1] = '\0';
1550   }
1551   return result;
1552 #else
1553   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1554   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1555   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1556   // go straight to _vscprintf.  The output is going to be truncated in
1557   // that case, except in the unusual case of empty output.  More
1558   // importantly, the documentation for various versions of Visual Studio
1559   // are inconsistent about the behavior of _vsnprintf when len == 0,
1560   // including it possibly being an error.
1561   int result = -1;
1562   if (len > 0) {
1563     result = _vsnprintf(buf, len, fmt, args);
1564     // If output (including NUL terminator) is truncated, the buffer
1565     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1566     if ((result < 0) || ((size_t)result >= len)) {
1567       buf[len - 1] = '\0';
1568     }
1569   }
1570   if (result < 0) {
1571     result = _vscprintf(fmt, args);
1572   }
1573   return result;
1574 #endif // _MSC_VER dispatch
1575 }
1576 
1577 static inline time_t get_mtime(const char* filename) {
1578   struct stat st;
1579   int ret = os::stat(filename, &st);
1580   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1581   return st.st_mtime;
1582 }
1583 
1584 int os::compare_file_modified_times(const char* file1, const char* file2) {
1585   time_t t1 = get_mtime(file1);
1586   time_t t2 = get_mtime(file2);
1587   return t1 - t2;
1588 }
1589 
1590 void os::print_os_info_brief(outputStream* st) {
1591   os::print_os_info(st);
1592 }
1593 
1594 void os::print_os_info(outputStream* st) {
1595 #ifdef ASSERT
1596   char buffer[1024];
1597   st->print("HostName: ");
1598   if (get_host_name(buffer, sizeof(buffer))) {
1599     st->print("%s ", buffer);
1600   } else {
1601     st->print("N/A ");
1602   }
1603 #endif
1604   st->print("OS:");
1605   os::win32::print_windows_version(st);
1606 
1607 #ifdef _LP64
1608   VM_Version::print_platform_virtualization_info(st);
1609 #endif
1610 }
1611 
1612 void os::win32::print_windows_version(outputStream* st) {
1613   OSVERSIONINFOEX osvi;
1614   VS_FIXEDFILEINFO *file_info;
1615   TCHAR kernel32_path[MAX_PATH];
1616   UINT len, ret;
1617 
1618   // Use the GetVersionEx information to see if we're on a server or
1619   // workstation edition of Windows. Starting with Windows 8.1 we can't
1620   // trust the OS version information returned by this API.
1621   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1622   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1623   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1624     st->print_cr("Call to GetVersionEx failed");
1625     return;
1626   }
1627   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1628 
1629   // Get the full path to \Windows\System32\kernel32.dll and use that for
1630   // determining what version of Windows we're running on.
1631   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1632   ret = GetSystemDirectory(kernel32_path, len);
1633   if (ret == 0 || ret > len) {
1634     st->print_cr("Call to GetSystemDirectory failed");
1635     return;
1636   }
1637   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1638 
1639   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1640   if (version_size == 0) {
1641     st->print_cr("Call to GetFileVersionInfoSize failed");
1642     return;
1643   }
1644 
1645   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1646   if (version_info == NULL) {
1647     st->print_cr("Failed to allocate version_info");
1648     return;
1649   }
1650 
1651   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1652     os::free(version_info);
1653     st->print_cr("Call to GetFileVersionInfo failed");
1654     return;
1655   }
1656 
1657   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1658     os::free(version_info);
1659     st->print_cr("Call to VerQueryValue failed");
1660     return;
1661   }
1662 
1663   int major_version = HIWORD(file_info->dwProductVersionMS);
1664   int minor_version = LOWORD(file_info->dwProductVersionMS);
1665   int build_number = HIWORD(file_info->dwProductVersionLS);
1666   int build_minor = LOWORD(file_info->dwProductVersionLS);
1667   int os_vers = major_version * 1000 + minor_version;
1668   os::free(version_info);
1669 
1670   st->print(" Windows ");
1671   switch (os_vers) {
1672 
1673   case 6000:
1674     if (is_workstation) {
1675       st->print("Vista");
1676     } else {
1677       st->print("Server 2008");
1678     }
1679     break;
1680 
1681   case 6001:
1682     if (is_workstation) {
1683       st->print("7");
1684     } else {
1685       st->print("Server 2008 R2");
1686     }
1687     break;
1688 
1689   case 6002:
1690     if (is_workstation) {
1691       st->print("8");
1692     } else {
1693       st->print("Server 2012");
1694     }
1695     break;
1696 
1697   case 6003:
1698     if (is_workstation) {
1699       st->print("8.1");
1700     } else {
1701       st->print("Server 2012 R2");
1702     }
1703     break;
1704 
1705   case 10000:
1706     if (is_workstation) {
1707       st->print("10");
1708     } else {
1709       // distinguish Windows Server 2016 and 2019 by build number
1710       // Windows server 2019 GA 10/2018 build number is 17763
1711       if (build_number > 17762) {
1712         st->print("Server 2019");
1713       } else {
1714         st->print("Server 2016");
1715       }
1716     }
1717     break;
1718 
1719   default:
1720     // Unrecognized windows, print out its major and minor versions
1721     st->print("%d.%d", major_version, minor_version);
1722     break;
1723   }
1724 
1725   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1726   // find out whether we are running on 64 bit processor or not
1727   SYSTEM_INFO si;
1728   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1729   GetNativeSystemInfo(&si);
1730   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1731     st->print(" , 64 bit");
1732   }
1733 
1734   st->print(" Build %d", build_number);
1735   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1736   st->cr();
1737 }
1738 
1739 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1740   // Nothing to do for now.
1741 }
1742 
1743 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1744   HKEY key;
1745   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1746                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1747   if (status == ERROR_SUCCESS) {
1748     DWORD size = (DWORD)buflen;
1749     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1750     if (status != ERROR_SUCCESS) {
1751         strncpy(buf, "## __CPU__", buflen);
1752     }
1753     RegCloseKey(key);
1754   } else {
1755     // Put generic cpu info to return
1756     strncpy(buf, "## __CPU__", buflen);
1757   }
1758 }
1759 
1760 void os::print_memory_info(outputStream* st) {
1761   st->print("Memory:");
1762   st->print(" %dk page", os::vm_page_size()>>10);
1763 
1764   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1765   // value if total memory is larger than 4GB
1766   MEMORYSTATUSEX ms;
1767   ms.dwLength = sizeof(ms);
1768   int r1 = GlobalMemoryStatusEx(&ms);
1769 
1770   if (r1 != 0) {
1771     st->print(", system-wide physical " INT64_FORMAT "M ",
1772              (int64_t) ms.ullTotalPhys >> 20);
1773     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1774 
1775     st->print("TotalPageFile size " INT64_FORMAT "M ",
1776              (int64_t) ms.ullTotalPageFile >> 20);
1777     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1778              (int64_t) ms.ullAvailPageFile >> 20);
1779 
1780     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1781 #if defined(_M_IX86)
1782     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1783              (int64_t) ms.ullTotalVirtual >> 20);
1784     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1785 #endif
1786   } else {
1787     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1788   }
1789 
1790   // extended memory statistics for a process
1791   PROCESS_MEMORY_COUNTERS_EX pmex;
1792   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1793   pmex.cb = sizeof(pmex);
1794   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1795 
1796   if (r2 != 0) {
1797     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1798              (int64_t) pmex.WorkingSetSize >> 20);
1799     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1800 
1801     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1802              (int64_t) pmex.PrivateUsage >> 20);
1803     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1804   } else {
1805     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1806   }
1807 
1808   st->cr();
1809 }
1810 
1811 bool os::signal_sent_by_kill(const void* siginfo) {
1812   // TODO: Is this possible?
1813   return false;
1814 }
1815 
1816 void os::print_siginfo(outputStream *st, const void* siginfo) {
1817   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1818   st->print("siginfo:");
1819 
1820   char tmp[64];
1821   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1822     strcpy(tmp, "EXCEPTION_??");
1823   }
1824   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1825 
1826   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1827        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1828        er->NumberParameters >= 2) {
1829     switch (er->ExceptionInformation[0]) {
1830     case 0: st->print(", reading address"); break;
1831     case 1: st->print(", writing address"); break;
1832     case 8: st->print(", data execution prevention violation at address"); break;
1833     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1834                        er->ExceptionInformation[0]);
1835     }
1836     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1837   } else {
1838     int num = er->NumberParameters;
1839     if (num > 0) {
1840       st->print(", ExceptionInformation=");
1841       for (int i = 0; i < num; i++) {
1842         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1843       }
1844     }
1845   }
1846   st->cr();
1847 }
1848 
1849 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1850   // TODO: Can we kill thread?
1851   return false;
1852 }
1853 
1854 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1855   // do nothing
1856 }
1857 
1858 static char saved_jvm_path[MAX_PATH] = {0};
1859 
1860 // Find the full path to the current module, jvm.dll
1861 void os::jvm_path(char *buf, jint buflen) {
1862   // Error checking.
1863   if (buflen < MAX_PATH) {
1864     assert(false, "must use a large-enough buffer");
1865     buf[0] = '\0';
1866     return;
1867   }
1868   // Lazy resolve the path to current module.
1869   if (saved_jvm_path[0] != 0) {
1870     strcpy(buf, saved_jvm_path);
1871     return;
1872   }
1873 
1874   buf[0] = '\0';
1875   if (Arguments::sun_java_launcher_is_altjvm()) {
1876     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1877     // for a JAVA_HOME environment variable and fix up the path so it
1878     // looks like jvm.dll is installed there (append a fake suffix
1879     // hotspot/jvm.dll).
1880     char* java_home_var = ::getenv("JAVA_HOME");
1881     if (java_home_var != NULL && java_home_var[0] != 0 &&
1882         strlen(java_home_var) < (size_t)buflen) {
1883       strncpy(buf, java_home_var, buflen);
1884 
1885       // determine if this is a legacy image or modules image
1886       // modules image doesn't have "jre" subdirectory
1887       size_t len = strlen(buf);
1888       char* jrebin_p = buf + len;
1889       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1890       if (0 != _access(buf, 0)) {
1891         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1892       }
1893       len = strlen(buf);
1894       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1895     }
1896   }
1897 
1898   if (buf[0] == '\0') {
1899     GetModuleFileName(vm_lib_handle, buf, buflen);
1900   }
1901   strncpy(saved_jvm_path, buf, MAX_PATH);
1902   saved_jvm_path[MAX_PATH - 1] = '\0';
1903 }
1904 
1905 
1906 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1907 #ifndef _WIN64
1908   st->print("_");
1909 #endif
1910 }
1911 
1912 
1913 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1914 #ifndef _WIN64
1915   st->print("@%d", args_size  * sizeof(int));
1916 #endif
1917 }
1918 
1919 // This method is a copy of JDK's sysGetLastErrorString
1920 // from src/windows/hpi/src/system_md.c
1921 
1922 size_t os::lasterror(char* buf, size_t len) {
1923   DWORD errval;
1924 
1925   if ((errval = GetLastError()) != 0) {
1926     // DOS error
1927     size_t n = (size_t)FormatMessage(
1928                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1929                                      NULL,
1930                                      errval,
1931                                      0,
1932                                      buf,
1933                                      (DWORD)len,
1934                                      NULL);
1935     if (n > 3) {
1936       // Drop final '.', CR, LF
1937       if (buf[n - 1] == '\n') n--;
1938       if (buf[n - 1] == '\r') n--;
1939       if (buf[n - 1] == '.') n--;
1940       buf[n] = '\0';
1941     }
1942     return n;
1943   }
1944 
1945   if (errno != 0) {
1946     // C runtime error that has no corresponding DOS error code
1947     const char* s = os::strerror(errno);
1948     size_t n = strlen(s);
1949     if (n >= len) n = len - 1;
1950     strncpy(buf, s, n);
1951     buf[n] = '\0';
1952     return n;
1953   }
1954 
1955   return 0;
1956 }
1957 
1958 int os::get_last_error() {
1959   DWORD error = GetLastError();
1960   if (error == 0) {
1961     error = errno;
1962   }
1963   return (int)error;
1964 }
1965 
1966 // sun.misc.Signal
1967 // NOTE that this is a workaround for an apparent kernel bug where if
1968 // a signal handler for SIGBREAK is installed then that signal handler
1969 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1970 // See bug 4416763.
1971 static void (*sigbreakHandler)(int) = NULL;
1972 
1973 static void UserHandler(int sig, void *siginfo, void *context) {
1974   os::signal_notify(sig);
1975   // We need to reinstate the signal handler each time...
1976   os::signal(sig, (void*)UserHandler);
1977 }
1978 
1979 void* os::user_handler() {
1980   return (void*) UserHandler;
1981 }
1982 
1983 void* os::signal(int signal_number, void* handler) {
1984   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1985     void (*oldHandler)(int) = sigbreakHandler;
1986     sigbreakHandler = (void (*)(int)) handler;
1987     return (void*) oldHandler;
1988   } else {
1989     return (void*)::signal(signal_number, (void (*)(int))handler);
1990   }
1991 }
1992 
1993 void os::signal_raise(int signal_number) {
1994   raise(signal_number);
1995 }
1996 
1997 // The Win32 C runtime library maps all console control events other than ^C
1998 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
1999 // logoff, and shutdown events.  We therefore install our own console handler
2000 // that raises SIGTERM for the latter cases.
2001 //
2002 static BOOL WINAPI consoleHandler(DWORD event) {
2003   switch (event) {
2004   case CTRL_C_EVENT:
2005     if (VMError::is_error_reported()) {
2006       // Ctrl-C is pressed during error reporting, likely because the error
2007       // handler fails to abort. Let VM die immediately.
2008       os::die();
2009     }
2010 
2011     os::signal_raise(SIGINT);
2012     return TRUE;
2013     break;
2014   case CTRL_BREAK_EVENT:
2015     if (sigbreakHandler != NULL) {
2016       (*sigbreakHandler)(SIGBREAK);
2017     }
2018     return TRUE;
2019     break;
2020   case CTRL_LOGOFF_EVENT: {
2021     // Don't terminate JVM if it is running in a non-interactive session,
2022     // such as a service process.
2023     USEROBJECTFLAGS flags;
2024     HANDLE handle = GetProcessWindowStation();
2025     if (handle != NULL &&
2026         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2027         sizeof(USEROBJECTFLAGS), NULL)) {
2028       // If it is a non-interactive session, let next handler to deal
2029       // with it.
2030       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2031         return FALSE;
2032       }
2033     }
2034   }
2035   case CTRL_CLOSE_EVENT:
2036   case CTRL_SHUTDOWN_EVENT:
2037     os::signal_raise(SIGTERM);
2038     return TRUE;
2039     break;
2040   default:
2041     break;
2042   }
2043   return FALSE;
2044 }
2045 
2046 // The following code is moved from os.cpp for making this
2047 // code platform specific, which it is by its very nature.
2048 
2049 // Return maximum OS signal used + 1 for internal use only
2050 // Used as exit signal for signal_thread
2051 int os::sigexitnum_pd() {
2052   return NSIG;
2053 }
2054 
2055 // a counter for each possible signal value, including signal_thread exit signal
2056 static volatile jint pending_signals[NSIG+1] = { 0 };
2057 static Semaphore* sig_sem = NULL;
2058 
2059 static void jdk_misc_signal_init() {
2060   // Initialize signal structures
2061   memset((void*)pending_signals, 0, sizeof(pending_signals));
2062 
2063   // Initialize signal semaphore
2064   sig_sem = new Semaphore();
2065 
2066   // Programs embedding the VM do not want it to attempt to receive
2067   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2068   // shutdown hooks mechanism introduced in 1.3.  For example, when
2069   // the VM is run as part of a Windows NT service (i.e., a servlet
2070   // engine in a web server), the correct behavior is for any console
2071   // control handler to return FALSE, not TRUE, because the OS's
2072   // "final" handler for such events allows the process to continue if
2073   // it is a service (while terminating it if it is not a service).
2074   // To make this behavior uniform and the mechanism simpler, we
2075   // completely disable the VM's usage of these console events if -Xrs
2076   // (=ReduceSignalUsage) is specified.  This means, for example, that
2077   // the CTRL-BREAK thread dump mechanism is also disabled in this
2078   // case.  See bugs 4323062, 4345157, and related bugs.
2079 
2080   // Add a CTRL-C handler
2081   SetConsoleCtrlHandler(consoleHandler, TRUE);
2082 }
2083 
2084 void os::signal_notify(int sig) {
2085   if (sig_sem != NULL) {
2086     Atomic::inc(&pending_signals[sig]);
2087     sig_sem->signal();
2088   } else {
2089     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2090     // initialization isn't called.
2091     assert(ReduceSignalUsage, "signal semaphore should be created");
2092   }
2093 }
2094 
2095 static int check_pending_signals() {
2096   while (true) {
2097     for (int i = 0; i < NSIG + 1; i++) {
2098       jint n = pending_signals[i];
2099       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2100         return i;
2101       }
2102     }
2103     JavaThread *thread = JavaThread::current();
2104 
2105     ThreadBlockInVM tbivm(thread);
2106 
2107     bool threadIsSuspended;
2108     do {
2109       thread->set_suspend_equivalent();
2110       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2111       sig_sem->wait();
2112 
2113       // were we externally suspended while we were waiting?
2114       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2115       if (threadIsSuspended) {
2116         // The semaphore has been incremented, but while we were waiting
2117         // another thread suspended us. We don't want to continue running
2118         // while suspended because that would surprise the thread that
2119         // suspended us.
2120         sig_sem->signal();
2121 
2122         thread->java_suspend_self();
2123       }
2124     } while (threadIsSuspended);
2125   }
2126 }
2127 
2128 int os::signal_wait() {
2129   return check_pending_signals();
2130 }
2131 
2132 // Implicit OS exception handling
2133 
2134 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2135                       address handler) {
2136   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2137   // Save pc in thread
2138 #ifdef _M_AMD64
2139   // Do not blow up if no thread info available.
2140   if (thread) {
2141     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2142   }
2143   // Set pc to handler
2144   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2145 #else
2146   // Do not blow up if no thread info available.
2147   if (thread) {
2148     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2149   }
2150   // Set pc to handler
2151   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2152 #endif
2153 
2154   // Continue the execution
2155   return EXCEPTION_CONTINUE_EXECUTION;
2156 }
2157 
2158 
2159 // Used for PostMortemDump
2160 extern "C" void safepoints();
2161 extern "C" void find(int x);
2162 extern "C" void events();
2163 
2164 // According to Windows API documentation, an illegal instruction sequence should generate
2165 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2166 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2167 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2168 
2169 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2170 
2171 // From "Execution Protection in the Windows Operating System" draft 0.35
2172 // Once a system header becomes available, the "real" define should be
2173 // included or copied here.
2174 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2175 
2176 // Windows Vista/2008 heap corruption check
2177 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2178 
2179 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2180 // C++ compiler contain this error code. Because this is a compiler-generated
2181 // error, the code is not listed in the Win32 API header files.
2182 // The code is actually a cryptic mnemonic device, with the initial "E"
2183 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2184 // ASCII values of "msc".
2185 
2186 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2187 
2188 #define def_excpt(val) { #val, (val) }
2189 
2190 static const struct { const char* name; uint number; } exceptlabels[] = {
2191     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2192     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2193     def_excpt(EXCEPTION_BREAKPOINT),
2194     def_excpt(EXCEPTION_SINGLE_STEP),
2195     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2196     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2197     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2198     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2199     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2200     def_excpt(EXCEPTION_FLT_OVERFLOW),
2201     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2202     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2203     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2204     def_excpt(EXCEPTION_INT_OVERFLOW),
2205     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2206     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2207     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2208     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2209     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2210     def_excpt(EXCEPTION_STACK_OVERFLOW),
2211     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2212     def_excpt(EXCEPTION_GUARD_PAGE),
2213     def_excpt(EXCEPTION_INVALID_HANDLE),
2214     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2215     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2216 };
2217 
2218 #undef def_excpt
2219 
2220 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2221   uint code = static_cast<uint>(exception_code);
2222   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2223     if (exceptlabels[i].number == code) {
2224       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2225       return buf;
2226     }
2227   }
2228 
2229   return NULL;
2230 }
2231 
2232 //-----------------------------------------------------------------------------
2233 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2234   // handle exception caused by idiv; should only happen for -MinInt/-1
2235   // (division by zero is handled explicitly)
2236 #ifdef  _M_AMD64
2237   PCONTEXT ctx = exceptionInfo->ContextRecord;
2238   address pc = (address)ctx->Rip;
2239   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2240   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2241   if (pc[0] == 0xF7) {
2242     // set correct result values and continue after idiv instruction
2243     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2244   } else {
2245     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2246   }
2247   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2248   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2249   // idiv opcode (0xF7).
2250   ctx->Rdx = (DWORD)0;             // remainder
2251   // Continue the execution
2252 #else
2253   PCONTEXT ctx = exceptionInfo->ContextRecord;
2254   address pc = (address)ctx->Eip;
2255   assert(pc[0] == 0xF7, "not an idiv opcode");
2256   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2257   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2258   // set correct result values and continue after idiv instruction
2259   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2260   ctx->Eax = (DWORD)min_jint;      // result
2261   ctx->Edx = (DWORD)0;             // remainder
2262   // Continue the execution
2263 #endif
2264   return EXCEPTION_CONTINUE_EXECUTION;
2265 }
2266 
2267 //-----------------------------------------------------------------------------
2268 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2269   PCONTEXT ctx = exceptionInfo->ContextRecord;
2270 #ifndef  _WIN64
2271   // handle exception caused by native method modifying control word
2272   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2273 
2274   switch (exception_code) {
2275   case EXCEPTION_FLT_DENORMAL_OPERAND:
2276   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2277   case EXCEPTION_FLT_INEXACT_RESULT:
2278   case EXCEPTION_FLT_INVALID_OPERATION:
2279   case EXCEPTION_FLT_OVERFLOW:
2280   case EXCEPTION_FLT_STACK_CHECK:
2281   case EXCEPTION_FLT_UNDERFLOW:
2282     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2283     if (fp_control_word != ctx->FloatSave.ControlWord) {
2284       // Restore FPCW and mask out FLT exceptions
2285       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2286       // Mask out pending FLT exceptions
2287       ctx->FloatSave.StatusWord &=  0xffffff00;
2288       return EXCEPTION_CONTINUE_EXECUTION;
2289     }
2290   }
2291 
2292   if (prev_uef_handler != NULL) {
2293     // We didn't handle this exception so pass it to the previous
2294     // UnhandledExceptionFilter.
2295     return (prev_uef_handler)(exceptionInfo);
2296   }
2297 #else // !_WIN64
2298   // On Windows, the mxcsr control bits are non-volatile across calls
2299   // See also CR 6192333
2300   //
2301   jint MxCsr = INITIAL_MXCSR;
2302   // we can't use StubRoutines::addr_mxcsr_std()
2303   // because in Win64 mxcsr is not saved there
2304   if (MxCsr != ctx->MxCsr) {
2305     ctx->MxCsr = MxCsr;
2306     return EXCEPTION_CONTINUE_EXECUTION;
2307   }
2308 #endif // !_WIN64
2309 
2310   return EXCEPTION_CONTINUE_SEARCH;
2311 }
2312 
2313 static inline void report_error(Thread* t, DWORD exception_code,
2314                                 address addr, void* siginfo, void* context) {
2315   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2316 
2317   // If UseOsErrorReporting, this will return here and save the error file
2318   // somewhere where we can find it in the minidump.
2319 }
2320 
2321 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2322         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2323   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2324   address addr = (address) exceptionRecord->ExceptionInformation[1];
2325   if (Interpreter::contains(pc)) {
2326     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2327     if (!fr->is_first_java_frame()) {
2328       // get_frame_at_stack_banging_point() is only called when we
2329       // have well defined stacks so java_sender() calls do not need
2330       // to assert safe_for_sender() first.
2331       *fr = fr->java_sender();
2332     }
2333   } else {
2334     // more complex code with compiled code
2335     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2336     CodeBlob* cb = CodeCache::find_blob(pc);
2337     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2338       // Not sure where the pc points to, fallback to default
2339       // stack overflow handling
2340       return false;
2341     } else {
2342       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2343       // in compiled code, the stack banging is performed just after the return pc
2344       // has been pushed on the stack
2345       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2346       if (!fr->is_java_frame()) {
2347         // See java_sender() comment above.
2348         *fr = fr->java_sender();
2349       }
2350     }
2351   }
2352   assert(fr->is_java_frame(), "Safety check");
2353   return true;
2354 }
2355 
2356 #if INCLUDE_AOT
2357 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2358   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2359   address addr = (address) exceptionRecord->ExceptionInformation[1];
2360   address pc = (address) exceptionInfo->ContextRecord->Rip;
2361 
2362   // Handle the case where we get an implicit exception in AOT generated
2363   // code.  AOT DLL's loaded are not registered for structured exceptions.
2364   // If the exception occurred in the codeCache or AOT code, pass control
2365   // to our normal exception handler.
2366   CodeBlob* cb = CodeCache::find_blob(pc);
2367   if (cb != NULL) {
2368     return topLevelExceptionFilter(exceptionInfo);
2369   }
2370 
2371   return EXCEPTION_CONTINUE_SEARCH;
2372 }
2373 #endif
2374 
2375 //-----------------------------------------------------------------------------
2376 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2377   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2378   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2379 #ifdef _M_AMD64
2380   address pc = (address) exceptionInfo->ContextRecord->Rip;
2381 #else
2382   address pc = (address) exceptionInfo->ContextRecord->Eip;
2383 #endif
2384   Thread* t = Thread::current_or_null_safe();
2385 
2386   // Handle SafeFetch32 and SafeFetchN exceptions.
2387   if (StubRoutines::is_safefetch_fault(pc)) {
2388     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2389   }
2390 
2391 #ifndef _WIN64
2392   // Execution protection violation - win32 running on AMD64 only
2393   // Handled first to avoid misdiagnosis as a "normal" access violation;
2394   // This is safe to do because we have a new/unique ExceptionInformation
2395   // code for this condition.
2396   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2397     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2398     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2399     address addr = (address) exceptionRecord->ExceptionInformation[1];
2400 
2401     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2402       int page_size = os::vm_page_size();
2403 
2404       // Make sure the pc and the faulting address are sane.
2405       //
2406       // If an instruction spans a page boundary, and the page containing
2407       // the beginning of the instruction is executable but the following
2408       // page is not, the pc and the faulting address might be slightly
2409       // different - we still want to unguard the 2nd page in this case.
2410       //
2411       // 15 bytes seems to be a (very) safe value for max instruction size.
2412       bool pc_is_near_addr =
2413         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2414       bool instr_spans_page_boundary =
2415         (align_down((intptr_t) pc ^ (intptr_t) addr,
2416                          (intptr_t) page_size) > 0);
2417 
2418       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2419         static volatile address last_addr =
2420           (address) os::non_memory_address_word();
2421 
2422         // In conservative mode, don't unguard unless the address is in the VM
2423         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2424             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2425 
2426           // Set memory to RWX and retry
2427           address page_start = align_down(addr, page_size);
2428           bool res = os::protect_memory((char*) page_start, page_size,
2429                                         os::MEM_PROT_RWX);
2430 
2431           log_debug(os)("Execution protection violation "
2432                         "at " INTPTR_FORMAT
2433                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2434                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2435 
2436           // Set last_addr so if we fault again at the same address, we don't
2437           // end up in an endless loop.
2438           //
2439           // There are two potential complications here.  Two threads trapping
2440           // at the same address at the same time could cause one of the
2441           // threads to think it already unguarded, and abort the VM.  Likely
2442           // very rare.
2443           //
2444           // The other race involves two threads alternately trapping at
2445           // different addresses and failing to unguard the page, resulting in
2446           // an endless loop.  This condition is probably even more unlikely
2447           // than the first.
2448           //
2449           // Although both cases could be avoided by using locks or thread
2450           // local last_addr, these solutions are unnecessary complication:
2451           // this handler is a best-effort safety net, not a complete solution.
2452           // It is disabled by default and should only be used as a workaround
2453           // in case we missed any no-execute-unsafe VM code.
2454 
2455           last_addr = addr;
2456 
2457           return EXCEPTION_CONTINUE_EXECUTION;
2458         }
2459       }
2460 
2461       // Last unguard failed or not unguarding
2462       tty->print_raw_cr("Execution protection violation");
2463       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2464                    exceptionInfo->ContextRecord);
2465       return EXCEPTION_CONTINUE_SEARCH;
2466     }
2467   }
2468 #endif // _WIN64
2469 
2470   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2471       VM_Version::is_cpuinfo_segv_addr(pc)) {
2472     // Verify that OS save/restore AVX registers.
2473     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2474   }
2475 
2476   if (t != NULL && t->is_Java_thread()) {
2477     JavaThread* thread = (JavaThread*) t;
2478     bool in_java = thread->thread_state() == _thread_in_Java;
2479 
2480     // Handle potential stack overflows up front.
2481     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2482       if (thread->stack_guards_enabled()) {
2483         if (in_java) {
2484           frame fr;
2485           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2486           address addr = (address) exceptionRecord->ExceptionInformation[1];
2487           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2488             assert(fr.is_java_frame(), "Must be a Java frame");
2489             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2490           }
2491         }
2492         // Yellow zone violation.  The o/s has unprotected the first yellow
2493         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2494         // update the enabled status, even if the zone contains only one page.
2495         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2496         thread->disable_stack_yellow_reserved_zone();
2497         // If not in java code, return and hope for the best.
2498         return in_java
2499             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2500             :  EXCEPTION_CONTINUE_EXECUTION;
2501       } else {
2502         // Fatal red zone violation.
2503         thread->disable_stack_red_zone();
2504         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2505         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2506                       exceptionInfo->ContextRecord);
2507         return EXCEPTION_CONTINUE_SEARCH;
2508       }
2509     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2510       // Either stack overflow or null pointer exception.
2511       if (in_java) {
2512         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2513         address addr = (address) exceptionRecord->ExceptionInformation[1];
2514         address stack_end = thread->stack_end();
2515         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2516           // Stack overflow.
2517           assert(!os::uses_stack_guard_pages(),
2518                  "should be caught by red zone code above.");
2519           return Handle_Exception(exceptionInfo,
2520                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2521         }
2522         // Check for safepoint polling and implicit null
2523         // We only expect null pointers in the stubs (vtable)
2524         // the rest are checked explicitly now.
2525         CodeBlob* cb = CodeCache::find_blob(pc);
2526         if (cb != NULL) {
2527           if (os::is_poll_address(addr)) {
2528             address stub = SharedRuntime::get_poll_stub(pc);
2529             return Handle_Exception(exceptionInfo, stub);
2530           }
2531         }
2532         {
2533 #ifdef _WIN64
2534           // If it's a legal stack address map the entire region in
2535           //
2536           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2537           address addr = (address) exceptionRecord->ExceptionInformation[1];
2538           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2539             addr = (address)((uintptr_t)addr &
2540                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2541             os::commit_memory((char *)addr, thread->stack_base() - addr,
2542                               !ExecMem);
2543             return EXCEPTION_CONTINUE_EXECUTION;
2544           } else
2545 #endif
2546           {
2547             // Null pointer exception.
2548             if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2549               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2550               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2551             }
2552             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2553                          exceptionInfo->ContextRecord);
2554             return EXCEPTION_CONTINUE_SEARCH;
2555           }
2556         }
2557       }
2558 
2559 #ifdef _WIN64
2560       // Special care for fast JNI field accessors.
2561       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2562       // in and the heap gets shrunk before the field access.
2563       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2564         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2565         if (addr != (address)-1) {
2566           return Handle_Exception(exceptionInfo, addr);
2567         }
2568       }
2569 #endif
2570 
2571       // Stack overflow or null pointer exception in native code.
2572       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2573                    exceptionInfo->ContextRecord);
2574       return EXCEPTION_CONTINUE_SEARCH;
2575     } // /EXCEPTION_ACCESS_VIOLATION
2576     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2577 
2578     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2579       CompiledMethod* nm = NULL;
2580       JavaThread* thread = (JavaThread*)t;
2581       if (in_java) {
2582         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2583         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2584       }
2585 
2586       bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2587       if (((thread->thread_state() == _thread_in_vm ||
2588            thread->thread_state() == _thread_in_native ||
2589            is_unsafe_arraycopy) &&
2590           thread->doing_unsafe_access()) ||
2591           (nm != NULL && nm->has_unsafe_access())) {
2592         address next_pc =  Assembler::locate_next_instruction(pc);
2593         if (is_unsafe_arraycopy) {
2594           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2595         }
2596         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2597       }
2598     }
2599 
2600     if (in_java) {
2601       switch (exception_code) {
2602       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2603         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2604 
2605       case EXCEPTION_INT_OVERFLOW:
2606         return Handle_IDiv_Exception(exceptionInfo);
2607 
2608       } // switch
2609     }
2610     if (((thread->thread_state() == _thread_in_Java) ||
2611          (thread->thread_state() == _thread_in_native)) &&
2612          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2613       LONG result=Handle_FLT_Exception(exceptionInfo);
2614       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2615     }
2616   }
2617 
2618   if (exception_code != EXCEPTION_BREAKPOINT) {
2619     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2620                  exceptionInfo->ContextRecord);
2621   }
2622   return EXCEPTION_CONTINUE_SEARCH;
2623 }
2624 
2625 #ifndef _WIN64
2626 // Special care for fast JNI accessors.
2627 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2628 // the heap gets shrunk before the field access.
2629 // Need to install our own structured exception handler since native code may
2630 // install its own.
2631 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2632   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2633   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2634     address pc = (address) exceptionInfo->ContextRecord->Eip;
2635     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2636     if (addr != (address)-1) {
2637       return Handle_Exception(exceptionInfo, addr);
2638     }
2639   }
2640   return EXCEPTION_CONTINUE_SEARCH;
2641 }
2642 
2643 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2644   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2645                                                      jobject obj,           \
2646                                                      jfieldID fieldID) {    \
2647     __try {                                                                 \
2648       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2649                                                                  obj,       \
2650                                                                  fieldID);  \
2651     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2652                                               _exception_info())) {         \
2653     }                                                                       \
2654     return 0;                                                               \
2655   }
2656 
2657 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2658 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2659 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2660 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2661 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2662 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2663 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2664 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2665 
2666 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2667   switch (type) {
2668   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2669   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2670   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2671   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2672   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2673   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2674   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2675   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2676   default:        ShouldNotReachHere();
2677   }
2678   return (address)-1;
2679 }
2680 #endif
2681 
2682 // Virtual Memory
2683 
2684 int os::vm_page_size() { return os::win32::vm_page_size(); }
2685 int os::vm_allocation_granularity() {
2686   return os::win32::vm_allocation_granularity();
2687 }
2688 
2689 // Windows large page support is available on Windows 2003. In order to use
2690 // large page memory, the administrator must first assign additional privilege
2691 // to the user:
2692 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2693 //   + select Local Policies -> User Rights Assignment
2694 //   + double click "Lock pages in memory", add users and/or groups
2695 //   + reboot
2696 // Note the above steps are needed for administrator as well, as administrators
2697 // by default do not have the privilege to lock pages in memory.
2698 //
2699 // Note about Windows 2003: although the API supports committing large page
2700 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2701 // scenario, I found through experiment it only uses large page if the entire
2702 // memory region is reserved and committed in a single VirtualAlloc() call.
2703 // This makes Windows large page support more or less like Solaris ISM, in
2704 // that the entire heap must be committed upfront. This probably will change
2705 // in the future, if so the code below needs to be revisited.
2706 
2707 #ifndef MEM_LARGE_PAGES
2708   #define MEM_LARGE_PAGES 0x20000000
2709 #endif
2710 
2711 static HANDLE    _hProcess;
2712 static HANDLE    _hToken;
2713 
2714 // Container for NUMA node list info
2715 class NUMANodeListHolder {
2716  private:
2717   int *_numa_used_node_list;  // allocated below
2718   int _numa_used_node_count;
2719 
2720   void free_node_list() {
2721     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2722   }
2723 
2724  public:
2725   NUMANodeListHolder() {
2726     _numa_used_node_count = 0;
2727     _numa_used_node_list = NULL;
2728     // do rest of initialization in build routine (after function pointers are set up)
2729   }
2730 
2731   ~NUMANodeListHolder() {
2732     free_node_list();
2733   }
2734 
2735   bool build() {
2736     DWORD_PTR proc_aff_mask;
2737     DWORD_PTR sys_aff_mask;
2738     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2739     ULONG highest_node_number;
2740     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2741     free_node_list();
2742     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2743     for (unsigned int i = 0; i <= highest_node_number; i++) {
2744       ULONGLONG proc_mask_numa_node;
2745       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2746       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2747         _numa_used_node_list[_numa_used_node_count++] = i;
2748       }
2749     }
2750     return (_numa_used_node_count > 1);
2751   }
2752 
2753   int get_count() { return _numa_used_node_count; }
2754   int get_node_list_entry(int n) {
2755     // for indexes out of range, returns -1
2756     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2757   }
2758 
2759 } numa_node_list_holder;
2760 
2761 
2762 
2763 static size_t _large_page_size = 0;
2764 
2765 static bool request_lock_memory_privilege() {
2766   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2767                           os::current_process_id());
2768 
2769   LUID luid;
2770   if (_hProcess != NULL &&
2771       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2772       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2773 
2774     TOKEN_PRIVILEGES tp;
2775     tp.PrivilegeCount = 1;
2776     tp.Privileges[0].Luid = luid;
2777     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2778 
2779     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2780     // privilege. Check GetLastError() too. See MSDN document.
2781     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2782         (GetLastError() == ERROR_SUCCESS)) {
2783       return true;
2784     }
2785   }
2786 
2787   return false;
2788 }
2789 
2790 static void cleanup_after_large_page_init() {
2791   if (_hProcess) CloseHandle(_hProcess);
2792   _hProcess = NULL;
2793   if (_hToken) CloseHandle(_hToken);
2794   _hToken = NULL;
2795 }
2796 
2797 static bool numa_interleaving_init() {
2798   bool success = false;
2799   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2800 
2801   // print a warning if UseNUMAInterleaving flag is specified on command line
2802   bool warn_on_failure = use_numa_interleaving_specified;
2803 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2804 
2805   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2806   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2807   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2808 
2809   if (numa_node_list_holder.build()) {
2810     if (log_is_enabled(Debug, os, cpu)) {
2811       Log(os, cpu) log;
2812       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2813       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2814         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2815       }
2816     }
2817     success = true;
2818   } else {
2819     WARN("Process does not cover multiple NUMA nodes.");
2820   }
2821   if (!success) {
2822     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2823   }
2824   return success;
2825 #undef WARN
2826 }
2827 
2828 // this routine is used whenever we need to reserve a contiguous VA range
2829 // but we need to make separate VirtualAlloc calls for each piece of the range
2830 // Reasons for doing this:
2831 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2832 //  * UseNUMAInterleaving requires a separate node for each piece
2833 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2834                                          DWORD prot,
2835                                          bool should_inject_error = false) {
2836   char * p_buf;
2837   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2838   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2839   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2840 
2841   // first reserve enough address space in advance since we want to be
2842   // able to break a single contiguous virtual address range into multiple
2843   // large page commits but WS2003 does not allow reserving large page space
2844   // so we just use 4K pages for reserve, this gives us a legal contiguous
2845   // address space. then we will deallocate that reservation, and re alloc
2846   // using large pages
2847   const size_t size_of_reserve = bytes + chunk_size;
2848   if (bytes > size_of_reserve) {
2849     // Overflowed.
2850     return NULL;
2851   }
2852   p_buf = (char *) VirtualAlloc(addr,
2853                                 size_of_reserve,  // size of Reserve
2854                                 MEM_RESERVE,
2855                                 PAGE_READWRITE);
2856   // If reservation failed, return NULL
2857   if (p_buf == NULL) return NULL;
2858   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2859   os::release_memory(p_buf, bytes + chunk_size);
2860 
2861   // we still need to round up to a page boundary (in case we are using large pages)
2862   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2863   // instead we handle this in the bytes_to_rq computation below
2864   p_buf = align_up(p_buf, page_size);
2865 
2866   // now go through and allocate one chunk at a time until all bytes are
2867   // allocated
2868   size_t  bytes_remaining = bytes;
2869   // An overflow of align_up() would have been caught above
2870   // in the calculation of size_of_reserve.
2871   char * next_alloc_addr = p_buf;
2872   HANDLE hProc = GetCurrentProcess();
2873 
2874 #ifdef ASSERT
2875   // Variable for the failure injection
2876   int ran_num = os::random();
2877   size_t fail_after = ran_num % bytes;
2878 #endif
2879 
2880   int count=0;
2881   while (bytes_remaining) {
2882     // select bytes_to_rq to get to the next chunk_size boundary
2883 
2884     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2885     // Note allocate and commit
2886     char * p_new;
2887 
2888 #ifdef ASSERT
2889     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2890 #else
2891     const bool inject_error_now = false;
2892 #endif
2893 
2894     if (inject_error_now) {
2895       p_new = NULL;
2896     } else {
2897       if (!UseNUMAInterleaving) {
2898         p_new = (char *) VirtualAlloc(next_alloc_addr,
2899                                       bytes_to_rq,
2900                                       flags,
2901                                       prot);
2902       } else {
2903         // get the next node to use from the used_node_list
2904         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2905         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2906         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2907       }
2908     }
2909 
2910     if (p_new == NULL) {
2911       // Free any allocated pages
2912       if (next_alloc_addr > p_buf) {
2913         // Some memory was committed so release it.
2914         size_t bytes_to_release = bytes - bytes_remaining;
2915         // NMT has yet to record any individual blocks, so it
2916         // need to create a dummy 'reserve' record to match
2917         // the release.
2918         MemTracker::record_virtual_memory_reserve((address)p_buf,
2919                                                   bytes_to_release, CALLER_PC);
2920         os::release_memory(p_buf, bytes_to_release);
2921       }
2922 #ifdef ASSERT
2923       if (should_inject_error) {
2924         log_develop_debug(pagesize)("Reserving pages individually failed.");
2925       }
2926 #endif
2927       return NULL;
2928     }
2929 
2930     bytes_remaining -= bytes_to_rq;
2931     next_alloc_addr += bytes_to_rq;
2932     count++;
2933   }
2934   // Although the memory is allocated individually, it is returned as one.
2935   // NMT records it as one block.
2936   if ((flags & MEM_COMMIT) != 0) {
2937     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
2938   } else {
2939     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
2940   }
2941 
2942   // made it this far, success
2943   return p_buf;
2944 }
2945 
2946 
2947 
2948 void os::large_page_init() {
2949   if (!UseLargePages) return;
2950 
2951   // print a warning if any large page related flag is specified on command line
2952   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2953                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2954   bool success = false;
2955 
2956 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2957   if (request_lock_memory_privilege()) {
2958     size_t s = GetLargePageMinimum();
2959     if (s) {
2960 #if defined(IA32) || defined(AMD64)
2961       if (s > 4*M || LargePageSizeInBytes > 4*M) {
2962         WARN("JVM cannot use large pages bigger than 4mb.");
2963       } else {
2964 #endif
2965         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
2966           _large_page_size = LargePageSizeInBytes;
2967         } else {
2968           _large_page_size = s;
2969         }
2970         success = true;
2971 #if defined(IA32) || defined(AMD64)
2972       }
2973 #endif
2974     } else {
2975       WARN("Large page is not supported by the processor.");
2976     }
2977   } else {
2978     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
2979   }
2980 #undef WARN
2981 
2982   const size_t default_page_size = (size_t) vm_page_size();
2983   if (success && _large_page_size > default_page_size) {
2984     _page_sizes[0] = _large_page_size;
2985     _page_sizes[1] = default_page_size;
2986     _page_sizes[2] = 0;
2987   }
2988 
2989   cleanup_after_large_page_init();
2990   UseLargePages = success;
2991 }
2992 
2993 int os::create_file_for_heap(const char* dir) {
2994 
2995   const char name_template[] = "/jvmheap.XXXXXX";
2996 
2997   size_t fullname_len = strlen(dir) + strlen(name_template);
2998   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
2999   if (fullname == NULL) {
3000     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3001     return -1;
3002   }
3003   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3004   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3005 
3006   os::native_path(fullname);
3007 
3008   char *path = _mktemp(fullname);
3009   if (path == NULL) {
3010     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3011     os::free(fullname);
3012     return -1;
3013   }
3014 
3015   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3016 
3017   os::free(fullname);
3018   if (fd < 0) {
3019     warning("Problem opening file for heap (%s)", os::strerror(errno));
3020     return -1;
3021   }
3022   return fd;
3023 }
3024 
3025 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3026 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3027   assert(fd != -1, "File descriptor is not valid");
3028 
3029   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3030 #ifdef _LP64
3031   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3032     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3033 #else
3034   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3035     0, (DWORD)size, NULL);
3036 #endif
3037   if (fileMapping == NULL) {
3038     if (GetLastError() == ERROR_DISK_FULL) {
3039       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3040     }
3041     else {
3042       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3043     }
3044 
3045     return NULL;
3046   }
3047 
3048   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3049 
3050   CloseHandle(fileMapping);
3051 
3052   return (char*)addr;
3053 }
3054 
3055 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3056   assert(fd != -1, "File descriptor is not valid");
3057   assert(base != NULL, "Base address cannot be NULL");
3058 
3059   release_memory(base, size);
3060   return map_memory_to_file(base, size, fd);
3061 }
3062 
3063 // On win32, one cannot release just a part of reserved memory, it's an
3064 // all or nothing deal.  When we split a reservation, we must break the
3065 // reservation into two reservations.
3066 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3067                                   bool realloc) {
3068   if (size > 0) {
3069     release_memory(base, size);
3070     if (realloc) {
3071       reserve_memory(split, base);
3072     }
3073     if (size != split) {
3074       reserve_memory(size - split, base + split);
3075     }
3076   }
3077 }
3078 
3079 // Multiple threads can race in this code but it's not possible to unmap small sections of
3080 // virtual space to get requested alignment, like posix-like os's.
3081 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3082 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3083   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3084          "Alignment must be a multiple of allocation granularity (page size)");
3085   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3086 
3087   size_t extra_size = size + alignment;
3088   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3089 
3090   char* aligned_base = NULL;
3091 
3092   do {
3093     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3094     if (extra_base == NULL) {
3095       return NULL;
3096     }
3097     // Do manual alignment
3098     aligned_base = align_up(extra_base, alignment);
3099 
3100     if (file_desc != -1) {
3101       os::unmap_memory(extra_base, extra_size);
3102     } else {
3103       os::release_memory(extra_base, extra_size);
3104     }
3105 
3106     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3107 
3108   } while (aligned_base == NULL);
3109 
3110   return aligned_base;
3111 }
3112 
3113 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3114   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3115          "reserve alignment");
3116   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3117   char* res;
3118   // note that if UseLargePages is on, all the areas that require interleaving
3119   // will go thru reserve_memory_special rather than thru here.
3120   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3121   if (!use_individual) {
3122     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3123   } else {
3124     elapsedTimer reserveTimer;
3125     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3126     // in numa interleaving, we have to allocate pages individually
3127     // (well really chunks of NUMAInterleaveGranularity size)
3128     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3129     if (res == NULL) {
3130       warning("NUMA page allocation failed");
3131     }
3132     if (Verbose && PrintMiscellaneous) {
3133       reserveTimer.stop();
3134       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3135                     reserveTimer.milliseconds(), reserveTimer.ticks());
3136     }
3137   }
3138   assert(res == NULL || addr == NULL || addr == res,
3139          "Unexpected address from reserve.");
3140 
3141   return res;
3142 }
3143 
3144 // Reserve memory at an arbitrary address, only if that area is
3145 // available (and not reserved for something else).
3146 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3147   // Windows os::reserve_memory() fails of the requested address range is
3148   // not avilable.
3149   return reserve_memory(bytes, requested_addr);
3150 }
3151 
3152 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3153   assert(file_desc >= 0, "file_desc is not valid");
3154   return map_memory_to_file(requested_addr, bytes, file_desc);
3155 }
3156 
3157 size_t os::large_page_size() {
3158   return _large_page_size;
3159 }
3160 
3161 bool os::can_commit_large_page_memory() {
3162   // Windows only uses large page memory when the entire region is reserved
3163   // and committed in a single VirtualAlloc() call. This may change in the
3164   // future, but with Windows 2003 it's not possible to commit on demand.
3165   return false;
3166 }
3167 
3168 bool os::can_execute_large_page_memory() {
3169   return true;
3170 }
3171 
3172 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3173                                  bool exec) {
3174   assert(UseLargePages, "only for large pages");
3175 
3176   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3177     return NULL; // Fallback to small pages.
3178   }
3179 
3180   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3181   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3182 
3183   // with large pages, there are two cases where we need to use Individual Allocation
3184   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3185   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3186   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3187     log_debug(pagesize)("Reserving large pages individually.");
3188 
3189     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3190     if (p_buf == NULL) {
3191       // give an appropriate warning message
3192       if (UseNUMAInterleaving) {
3193         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3194       }
3195       if (UseLargePagesIndividualAllocation) {
3196         warning("Individually allocated large pages failed, "
3197                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3198       }
3199       return NULL;
3200     }
3201 
3202     return p_buf;
3203 
3204   } else {
3205     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3206 
3207     // normal policy just allocate it all at once
3208     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3209     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3210     if (res != NULL) {
3211       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3212     }
3213 
3214     return res;
3215   }
3216 }
3217 
3218 bool os::release_memory_special(char* base, size_t bytes) {
3219   assert(base != NULL, "Sanity check");
3220   return release_memory(base, bytes);
3221 }
3222 
3223 void os::print_statistics() {
3224 }
3225 
3226 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3227   int err = os::get_last_error();
3228   char buf[256];
3229   size_t buf_len = os::lasterror(buf, sizeof(buf));
3230   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3231           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3232           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3233 }
3234 
3235 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3236   if (bytes == 0) {
3237     // Don't bother the OS with noops.
3238     return true;
3239   }
3240   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3241   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3242   // Don't attempt to print anything if the OS call fails. We're
3243   // probably low on resources, so the print itself may cause crashes.
3244 
3245   // unless we have NUMAInterleaving enabled, the range of a commit
3246   // is always within a reserve covered by a single VirtualAlloc
3247   // in that case we can just do a single commit for the requested size
3248   if (!UseNUMAInterleaving) {
3249     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3250       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3251       return false;
3252     }
3253     if (exec) {
3254       DWORD oldprot;
3255       // Windows doc says to use VirtualProtect to get execute permissions
3256       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3257         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3258         return false;
3259       }
3260     }
3261     return true;
3262   } else {
3263 
3264     // when NUMAInterleaving is enabled, the commit might cover a range that
3265     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3266     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3267     // returns represents the number of bytes that can be committed in one step.
3268     size_t bytes_remaining = bytes;
3269     char * next_alloc_addr = addr;
3270     while (bytes_remaining > 0) {
3271       MEMORY_BASIC_INFORMATION alloc_info;
3272       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3273       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3274       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3275                        PAGE_READWRITE) == NULL) {
3276         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3277                                             exec);)
3278         return false;
3279       }
3280       if (exec) {
3281         DWORD oldprot;
3282         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3283                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3284           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3285                                               exec);)
3286           return false;
3287         }
3288       }
3289       bytes_remaining -= bytes_to_rq;
3290       next_alloc_addr += bytes_to_rq;
3291     }
3292   }
3293   // if we made it this far, return true
3294   return true;
3295 }
3296 
3297 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3298                           bool exec) {
3299   // alignment_hint is ignored on this OS
3300   return pd_commit_memory(addr, size, exec);
3301 }
3302 
3303 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3304                                   const char* mesg) {
3305   assert(mesg != NULL, "mesg must be specified");
3306   if (!pd_commit_memory(addr, size, exec)) {
3307     warn_fail_commit_memory(addr, size, exec);
3308     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3309   }
3310 }
3311 
3312 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3313                                   size_t alignment_hint, bool exec,
3314                                   const char* mesg) {
3315   // alignment_hint is ignored on this OS
3316   pd_commit_memory_or_exit(addr, size, exec, mesg);
3317 }
3318 
3319 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3320   if (bytes == 0) {
3321     // Don't bother the OS with noops.
3322     return true;
3323   }
3324   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3325   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3326   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3327 }
3328 
3329 bool os::pd_release_memory(char* addr, size_t bytes) {
3330   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3331 }
3332 
3333 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3334   return os::commit_memory(addr, size, !ExecMem);
3335 }
3336 
3337 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3338   return os::uncommit_memory(addr, size);
3339 }
3340 
3341 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3342   uint count = 0;
3343   bool ret = false;
3344   size_t bytes_remaining = bytes;
3345   char * next_protect_addr = addr;
3346 
3347   // Use VirtualQuery() to get the chunk size.
3348   while (bytes_remaining) {
3349     MEMORY_BASIC_INFORMATION alloc_info;
3350     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3351       return false;
3352     }
3353 
3354     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3355     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3356     // but we don't distinguish here as both cases are protected by same API.
3357     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3358     warning("Failed protecting pages individually for chunk #%u", count);
3359     if (!ret) {
3360       return false;
3361     }
3362 
3363     bytes_remaining -= bytes_to_protect;
3364     next_protect_addr += bytes_to_protect;
3365     count++;
3366   }
3367   return ret;
3368 }
3369 
3370 // Set protections specified
3371 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3372                         bool is_committed) {
3373   unsigned int p = 0;
3374   switch (prot) {
3375   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3376   case MEM_PROT_READ: p = PAGE_READONLY; break;
3377   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3378   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3379   default:
3380     ShouldNotReachHere();
3381   }
3382 
3383   DWORD old_status;
3384 
3385   // Strange enough, but on Win32 one can change protection only for committed
3386   // memory, not a big deal anyway, as bytes less or equal than 64K
3387   if (!is_committed) {
3388     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3389                           "cannot commit protection page");
3390   }
3391   // One cannot use os::guard_memory() here, as on Win32 guard page
3392   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3393   //
3394   // Pages in the region become guard pages. Any attempt to access a guard page
3395   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3396   // the guard page status. Guard pages thus act as a one-time access alarm.
3397   bool ret;
3398   if (UseNUMAInterleaving) {
3399     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3400     // so we must protect the chunks individually.
3401     ret = protect_pages_individually(addr, bytes, p, &old_status);
3402   } else {
3403     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3404   }
3405 #ifdef ASSERT
3406   if (!ret) {
3407     int err = os::get_last_error();
3408     char buf[256];
3409     size_t buf_len = os::lasterror(buf, sizeof(buf));
3410     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3411           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3412           buf_len != 0 ? buf : "<no_error_string>", err);
3413   }
3414 #endif
3415   return ret;
3416 }
3417 
3418 bool os::guard_memory(char* addr, size_t bytes) {
3419   DWORD old_status;
3420   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3421 }
3422 
3423 bool os::unguard_memory(char* addr, size_t bytes) {
3424   DWORD old_status;
3425   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3426 }
3427 
3428 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3429 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3430 void os::numa_make_global(char *addr, size_t bytes)    { }
3431 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3432 bool os::numa_topology_changed()                       { return false; }
3433 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3434 int os::numa_get_group_id()                            { return 0; }
3435 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3436   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3437     // Provide an answer for UMA systems
3438     ids[0] = 0;
3439     return 1;
3440   } else {
3441     // check for size bigger than actual groups_num
3442     size = MIN2(size, numa_get_groups_num());
3443     for (int i = 0; i < (int)size; i++) {
3444       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3445     }
3446     return size;
3447   }
3448 }
3449 
3450 int os::numa_get_group_id_for_address(const void* address) {
3451   return 0;
3452 }
3453 
3454 bool os::get_page_info(char *start, page_info* info) {
3455   return false;
3456 }
3457 
3458 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3459                      page_info* page_found) {
3460   return end;
3461 }
3462 
3463 char* os::non_memory_address_word() {
3464   // Must never look like an address returned by reserve_memory,
3465   // even in its subfields (as defined by the CPU immediate fields,
3466   // if the CPU splits constants across multiple instructions).
3467   return (char*)-1;
3468 }
3469 
3470 #define MAX_ERROR_COUNT 100
3471 #define SYS_THREAD_ERROR 0xffffffffUL
3472 
3473 void os::pd_start_thread(Thread* thread) {
3474   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3475   // Returns previous suspend state:
3476   // 0:  Thread was not suspended
3477   // 1:  Thread is running now
3478   // >1: Thread is still suspended.
3479   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3480 }
3481 
3482 
3483 // Short sleep, direct OS call.
3484 //
3485 // ms = 0, means allow others (if any) to run.
3486 //
3487 void os::naked_short_sleep(jlong ms) {
3488   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3489   Sleep(ms);
3490 }
3491 
3492 // Windows does not provide sleep functionality with nanosecond resolution, so we
3493 // try to approximate this with spinning combined with yielding if another thread
3494 // is ready to run on the current processor.
3495 void os::naked_short_nanosleep(jlong ns) {
3496   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3497 
3498   int64_t start = os::javaTimeNanos();
3499   do {
3500     if (SwitchToThread() == 0) {
3501       // Nothing else is ready to run on this cpu, spin a little
3502       SpinPause();
3503     }
3504   } while (os::javaTimeNanos() - start < ns);
3505 }
3506 
3507 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3508 void os::infinite_sleep() {
3509   while (true) {    // sleep forever ...
3510     Sleep(100000);  // ... 100 seconds at a time
3511   }
3512 }
3513 
3514 typedef BOOL (WINAPI * STTSignature)(void);
3515 
3516 void os::naked_yield() {
3517   // Consider passing back the return value from SwitchToThread().
3518   SwitchToThread();
3519 }
3520 
3521 // Win32 only gives you access to seven real priorities at a time,
3522 // so we compress Java's ten down to seven.  It would be better
3523 // if we dynamically adjusted relative priorities.
3524 
3525 int os::java_to_os_priority[CriticalPriority + 1] = {
3526   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3527   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3528   THREAD_PRIORITY_LOWEST,                       // 2
3529   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3530   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3531   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3532   THREAD_PRIORITY_NORMAL,                       // 6
3533   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3534   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3535   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3536   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3537   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3538 };
3539 
3540 int prio_policy1[CriticalPriority + 1] = {
3541   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3542   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3543   THREAD_PRIORITY_LOWEST,                       // 2
3544   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3545   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3546   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3547   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3548   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3549   THREAD_PRIORITY_HIGHEST,                      // 8
3550   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3551   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3552   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3553 };
3554 
3555 static int prio_init() {
3556   // If ThreadPriorityPolicy is 1, switch tables
3557   if (ThreadPriorityPolicy == 1) {
3558     int i;
3559     for (i = 0; i < CriticalPriority + 1; i++) {
3560       os::java_to_os_priority[i] = prio_policy1[i];
3561     }
3562   }
3563   if (UseCriticalJavaThreadPriority) {
3564     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3565   }
3566   return 0;
3567 }
3568 
3569 OSReturn os::set_native_priority(Thread* thread, int priority) {
3570   if (!UseThreadPriorities) return OS_OK;
3571   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3572   return ret ? OS_OK : OS_ERR;
3573 }
3574 
3575 OSReturn os::get_native_priority(const Thread* const thread,
3576                                  int* priority_ptr) {
3577   if (!UseThreadPriorities) {
3578     *priority_ptr = java_to_os_priority[NormPriority];
3579     return OS_OK;
3580   }
3581   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3582   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3583     assert(false, "GetThreadPriority failed");
3584     return OS_ERR;
3585   }
3586   *priority_ptr = os_prio;
3587   return OS_OK;
3588 }
3589 
3590 // GetCurrentThreadId() returns DWORD
3591 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3592 
3593 static int _initial_pid = 0;
3594 
3595 int os::current_process_id() {
3596   return (_initial_pid ? _initial_pid : _getpid());
3597 }
3598 
3599 int    os::win32::_vm_page_size              = 0;
3600 int    os::win32::_vm_allocation_granularity = 0;
3601 int    os::win32::_processor_type            = 0;
3602 // Processor level is not available on non-NT systems, use vm_version instead
3603 int    os::win32::_processor_level           = 0;
3604 julong os::win32::_physical_memory           = 0;
3605 size_t os::win32::_default_stack_size        = 0;
3606 
3607 intx          os::win32::_os_thread_limit    = 0;
3608 volatile intx os::win32::_os_thread_count    = 0;
3609 
3610 bool   os::win32::_is_windows_server         = false;
3611 
3612 // 6573254
3613 // Currently, the bug is observed across all the supported Windows releases,
3614 // including the latest one (as of this writing - Windows Server 2012 R2)
3615 bool   os::win32::_has_exit_bug              = true;
3616 
3617 void os::win32::initialize_system_info() {
3618   SYSTEM_INFO si;
3619   GetSystemInfo(&si);
3620   _vm_page_size    = si.dwPageSize;
3621   _vm_allocation_granularity = si.dwAllocationGranularity;
3622   _processor_type  = si.dwProcessorType;
3623   _processor_level = si.wProcessorLevel;
3624   set_processor_count(si.dwNumberOfProcessors);
3625 
3626   MEMORYSTATUSEX ms;
3627   ms.dwLength = sizeof(ms);
3628 
3629   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3630   // dwMemoryLoad (% of memory in use)
3631   GlobalMemoryStatusEx(&ms);
3632   _physical_memory = ms.ullTotalPhys;
3633 
3634   if (FLAG_IS_DEFAULT(MaxRAM)) {
3635     // Adjust MaxRAM according to the maximum virtual address space available.
3636     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3637   }
3638 
3639   OSVERSIONINFOEX oi;
3640   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3641   GetVersionEx((OSVERSIONINFO*)&oi);
3642   switch (oi.dwPlatformId) {
3643   case VER_PLATFORM_WIN32_NT:
3644     {
3645       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3646       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3647           oi.wProductType == VER_NT_SERVER) {
3648         _is_windows_server = true;
3649       }
3650     }
3651     break;
3652   default: fatal("Unknown platform");
3653   }
3654 
3655   _default_stack_size = os::current_stack_size();
3656   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3657   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3658          "stack size not a multiple of page size");
3659 
3660   initialize_performance_counter();
3661 }
3662 
3663 
3664 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3665                                       int ebuflen) {
3666   char path[MAX_PATH];
3667   DWORD size;
3668   DWORD pathLen = (DWORD)sizeof(path);
3669   HINSTANCE result = NULL;
3670 
3671   // only allow library name without path component
3672   assert(strchr(name, '\\') == NULL, "path not allowed");
3673   assert(strchr(name, ':') == NULL, "path not allowed");
3674   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3675     jio_snprintf(ebuf, ebuflen,
3676                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3677     return NULL;
3678   }
3679 
3680   // search system directory
3681   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3682     if (size >= pathLen) {
3683       return NULL; // truncated
3684     }
3685     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3686       return NULL; // truncated
3687     }
3688     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3689       return result;
3690     }
3691   }
3692 
3693   // try Windows directory
3694   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3695     if (size >= pathLen) {
3696       return NULL; // truncated
3697     }
3698     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3699       return NULL; // truncated
3700     }
3701     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3702       return result;
3703     }
3704   }
3705 
3706   jio_snprintf(ebuf, ebuflen,
3707                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3708   return NULL;
3709 }
3710 
3711 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3712 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3713 
3714 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3715   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3716   return TRUE;
3717 }
3718 
3719 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3720   // Basic approach:
3721   //  - Each exiting thread registers its intent to exit and then does so.
3722   //  - A thread trying to terminate the process must wait for all
3723   //    threads currently exiting to complete their exit.
3724 
3725   if (os::win32::has_exit_bug()) {
3726     // The array holds handles of the threads that have started exiting by calling
3727     // _endthreadex().
3728     // Should be large enough to avoid blocking the exiting thread due to lack of
3729     // a free slot.
3730     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3731     static int handle_count = 0;
3732 
3733     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3734     static CRITICAL_SECTION crit_sect;
3735     static volatile DWORD process_exiting = 0;
3736     int i, j;
3737     DWORD res;
3738     HANDLE hproc, hthr;
3739 
3740     // We only attempt to register threads until a process exiting
3741     // thread manages to set the process_exiting flag. Any threads
3742     // that come through here after the process_exiting flag is set
3743     // are unregistered and will be caught in the SuspendThread()
3744     // infinite loop below.
3745     bool registered = false;
3746 
3747     // The first thread that reached this point, initializes the critical section.
3748     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3749       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3750     } else if (Atomic::load_acquire(&process_exiting) == 0) {
3751       if (what != EPT_THREAD) {
3752         // Atomically set process_exiting before the critical section
3753         // to increase the visibility between racing threads.
3754         Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
3755       }
3756       EnterCriticalSection(&crit_sect);
3757 
3758       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3759         // Remove from the array those handles of the threads that have completed exiting.
3760         for (i = 0, j = 0; i < handle_count; ++i) {
3761           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3762           if (res == WAIT_TIMEOUT) {
3763             handles[j++] = handles[i];
3764           } else {
3765             if (res == WAIT_FAILED) {
3766               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3767                       GetLastError(), __FILE__, __LINE__);
3768             }
3769             // Don't keep the handle, if we failed waiting for it.
3770             CloseHandle(handles[i]);
3771           }
3772         }
3773 
3774         // If there's no free slot in the array of the kept handles, we'll have to
3775         // wait until at least one thread completes exiting.
3776         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3777           // Raise the priority of the oldest exiting thread to increase its chances
3778           // to complete sooner.
3779           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3780           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3781           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3782             i = (res - WAIT_OBJECT_0);
3783             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3784             for (; i < handle_count; ++i) {
3785               handles[i] = handles[i + 1];
3786             }
3787           } else {
3788             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3789                     (res == WAIT_FAILED ? "failed" : "timed out"),
3790                     GetLastError(), __FILE__, __LINE__);
3791             // Don't keep handles, if we failed waiting for them.
3792             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3793               CloseHandle(handles[i]);
3794             }
3795             handle_count = 0;
3796           }
3797         }
3798 
3799         // Store a duplicate of the current thread handle in the array of handles.
3800         hproc = GetCurrentProcess();
3801         hthr = GetCurrentThread();
3802         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3803                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3804           warning("DuplicateHandle failed (%u) in %s: %d\n",
3805                   GetLastError(), __FILE__, __LINE__);
3806 
3807           // We can't register this thread (no more handles) so this thread
3808           // may be racing with a thread that is calling exit(). If the thread
3809           // that is calling exit() has managed to set the process_exiting
3810           // flag, then this thread will be caught in the SuspendThread()
3811           // infinite loop below which closes that race. A small timing
3812           // window remains before the process_exiting flag is set, but it
3813           // is only exposed when we are out of handles.
3814         } else {
3815           ++handle_count;
3816           registered = true;
3817 
3818           // The current exiting thread has stored its handle in the array, and now
3819           // should leave the critical section before calling _endthreadex().
3820         }
3821 
3822       } else if (what != EPT_THREAD && handle_count > 0) {
3823         jlong start_time, finish_time, timeout_left;
3824         // Before ending the process, make sure all the threads that had called
3825         // _endthreadex() completed.
3826 
3827         // Set the priority level of the current thread to the same value as
3828         // the priority level of exiting threads.
3829         // This is to ensure it will be given a fair chance to execute if
3830         // the timeout expires.
3831         hthr = GetCurrentThread();
3832         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3833         start_time = os::javaTimeNanos();
3834         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3835         for (i = 0; ; ) {
3836           int portion_count = handle_count - i;
3837           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3838             portion_count = MAXIMUM_WAIT_OBJECTS;
3839           }
3840           for (j = 0; j < portion_count; ++j) {
3841             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3842           }
3843           timeout_left = (finish_time - start_time) / 1000000L;
3844           if (timeout_left < 0) {
3845             timeout_left = 0;
3846           }
3847           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3848           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3849             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3850                     (res == WAIT_FAILED ? "failed" : "timed out"),
3851                     GetLastError(), __FILE__, __LINE__);
3852             // Reset portion_count so we close the remaining
3853             // handles due to this error.
3854             portion_count = handle_count - i;
3855           }
3856           for (j = 0; j < portion_count; ++j) {
3857             CloseHandle(handles[i + j]);
3858           }
3859           if ((i += portion_count) >= handle_count) {
3860             break;
3861           }
3862           start_time = os::javaTimeNanos();
3863         }
3864         handle_count = 0;
3865       }
3866 
3867       LeaveCriticalSection(&crit_sect);
3868     }
3869 
3870     if (!registered &&
3871         Atomic::load_acquire(&process_exiting) != 0 &&
3872         process_exiting != GetCurrentThreadId()) {
3873       // Some other thread is about to call exit(), so we don't let
3874       // the current unregistered thread proceed to exit() or _endthreadex()
3875       while (true) {
3876         SuspendThread(GetCurrentThread());
3877         // Avoid busy-wait loop, if SuspendThread() failed.
3878         Sleep(EXIT_TIMEOUT);
3879       }
3880     }
3881   }
3882 
3883   // We are here if either
3884   // - there's no 'race at exit' bug on this OS release;
3885   // - initialization of the critical section failed (unlikely);
3886   // - the current thread has registered itself and left the critical section;
3887   // - the process-exiting thread has raised the flag and left the critical section.
3888   if (what == EPT_THREAD) {
3889     _endthreadex((unsigned)exit_code);
3890   } else if (what == EPT_PROCESS) {
3891     ::exit(exit_code);
3892   } else {
3893     _exit(exit_code);
3894   }
3895 
3896   // Should not reach here
3897   return exit_code;
3898 }
3899 
3900 #undef EXIT_TIMEOUT
3901 
3902 void os::win32::setmode_streams() {
3903   _setmode(_fileno(stdin), _O_BINARY);
3904   _setmode(_fileno(stdout), _O_BINARY);
3905   _setmode(_fileno(stderr), _O_BINARY);
3906 }
3907 
3908 void os::wait_for_keypress_at_exit(void) {
3909   if (PauseAtExit) {
3910     fprintf(stderr, "Press any key to continue...\n");
3911     fgetc(stdin);
3912   }
3913 }
3914 
3915 
3916 bool os::message_box(const char* title, const char* message) {
3917   int result = MessageBox(NULL, message, title,
3918                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3919   return result == IDYES;
3920 }
3921 
3922 #ifndef PRODUCT
3923 #ifndef _WIN64
3924 // Helpers to check whether NX protection is enabled
3925 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3926   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3927       pex->ExceptionRecord->NumberParameters > 0 &&
3928       pex->ExceptionRecord->ExceptionInformation[0] ==
3929       EXCEPTION_INFO_EXEC_VIOLATION) {
3930     return EXCEPTION_EXECUTE_HANDLER;
3931   }
3932   return EXCEPTION_CONTINUE_SEARCH;
3933 }
3934 
3935 void nx_check_protection() {
3936   // If NX is enabled we'll get an exception calling into code on the stack
3937   char code[] = { (char)0xC3 }; // ret
3938   void *code_ptr = (void *)code;
3939   __try {
3940     __asm call code_ptr
3941   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3942     tty->print_raw_cr("NX protection detected.");
3943   }
3944 }
3945 #endif // _WIN64
3946 #endif // PRODUCT
3947 
3948 // This is called _before_ the global arguments have been parsed
3949 void os::init(void) {
3950   _initial_pid = _getpid();
3951 
3952   init_random(1234567);
3953 
3954   win32::initialize_system_info();
3955   win32::setmode_streams();
3956   init_page_sizes((size_t) win32::vm_page_size());
3957 
3958   // This may be overridden later when argument processing is done.
3959   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
3960 
3961   // Initialize main_process and main_thread
3962   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
3963   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3964                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3965     fatal("DuplicateHandle failed\n");
3966   }
3967   main_thread_id = (int) GetCurrentThreadId();
3968 
3969   // initialize fast thread access - only used for 32-bit
3970   win32::initialize_thread_ptr_offset();
3971 }
3972 
3973 // To install functions for atexit processing
3974 extern "C" {
3975   static void perfMemory_exit_helper() {
3976     perfMemory_exit();
3977   }
3978 }
3979 
3980 static jint initSock();
3981 
3982 // this is called _after_ the global arguments have been parsed
3983 jint os::init_2(void) {
3984 
3985   // This could be set any time but all platforms
3986   // have to set it the same so we have to mirror Solaris.
3987   DEBUG_ONLY(os::set_mutex_init_done();)
3988 
3989   // Setup Windows Exceptions
3990 
3991 #if INCLUDE_AOT
3992   // If AOT is enabled we need to install a vectored exception handler
3993   // in order to forward implicit exceptions from code in AOT
3994   // generated DLLs.  This is necessary since these DLLs are not
3995   // registered for structured exceptions like codecache methods are.
3996   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
3997     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
3998   }
3999 #endif
4000 
4001   // for debugging float code generation bugs
4002   if (ForceFloatExceptions) {
4003 #ifndef  _WIN64
4004     static long fp_control_word = 0;
4005     __asm { fstcw fp_control_word }
4006     // see Intel PPro Manual, Vol. 2, p 7-16
4007     const long precision = 0x20;
4008     const long underflow = 0x10;
4009     const long overflow  = 0x08;
4010     const long zero_div  = 0x04;
4011     const long denorm    = 0x02;
4012     const long invalid   = 0x01;
4013     fp_control_word |= invalid;
4014     __asm { fldcw fp_control_word }
4015 #endif
4016   }
4017 
4018   // If stack_commit_size is 0, windows will reserve the default size,
4019   // but only commit a small portion of it.
4020   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4021   size_t default_reserve_size = os::win32::default_stack_size();
4022   size_t actual_reserve_size = stack_commit_size;
4023   if (stack_commit_size < default_reserve_size) {
4024     // If stack_commit_size == 0, we want this too
4025     actual_reserve_size = default_reserve_size;
4026   }
4027 
4028   // Check minimum allowable stack size for thread creation and to initialize
4029   // the java system classes, including StackOverflowError - depends on page
4030   // size.  Add two 4K pages for compiler2 recursion in main thread.
4031   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4032   // class initialization depending on 32 or 64 bit VM.
4033   size_t min_stack_allowed =
4034             (size_t)(JavaThread::stack_guard_zone_size() +
4035                      JavaThread::stack_shadow_zone_size() +
4036                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4037 
4038   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4039 
4040   if (actual_reserve_size < min_stack_allowed) {
4041     tty->print_cr("\nThe Java thread stack size specified is too small. "
4042                   "Specify at least %dk",
4043                   min_stack_allowed / K);
4044     return JNI_ERR;
4045   }
4046 
4047   JavaThread::set_stack_size_at_create(stack_commit_size);
4048 
4049   // Calculate theoretical max. size of Threads to guard gainst artifical
4050   // out-of-memory situations, where all available address-space has been
4051   // reserved by thread stacks.
4052   assert(actual_reserve_size != 0, "Must have a stack");
4053 
4054   // Calculate the thread limit when we should start doing Virtual Memory
4055   // banging. Currently when the threads will have used all but 200Mb of space.
4056   //
4057   // TODO: consider performing a similar calculation for commit size instead
4058   // as reserve size, since on a 64-bit platform we'll run into that more
4059   // often than running out of virtual memory space.  We can use the
4060   // lower value of the two calculations as the os_thread_limit.
4061   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4062   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4063 
4064   // at exit methods are called in the reverse order of their registration.
4065   // there is no limit to the number of functions registered. atexit does
4066   // not set errno.
4067 
4068   if (PerfAllowAtExitRegistration) {
4069     // only register atexit functions if PerfAllowAtExitRegistration is set.
4070     // atexit functions can be delayed until process exit time, which
4071     // can be problematic for embedded VM situations. Embedded VMs should
4072     // call DestroyJavaVM() to assure that VM resources are released.
4073 
4074     // note: perfMemory_exit_helper atexit function may be removed in
4075     // the future if the appropriate cleanup code can be added to the
4076     // VM_Exit VMOperation's doit method.
4077     if (atexit(perfMemory_exit_helper) != 0) {
4078       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4079     }
4080   }
4081 
4082 #ifndef _WIN64
4083   // Print something if NX is enabled (win32 on AMD64)
4084   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4085 #endif
4086 
4087   // initialize thread priority policy
4088   prio_init();
4089 
4090   if (UseNUMA && !ForceNUMA) {
4091     UseNUMA = false; // We don't fully support this yet
4092   }
4093 
4094   if (UseNUMAInterleaving) {
4095     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4096     bool success = numa_interleaving_init();
4097     if (!success) UseNUMAInterleaving = false;
4098   }
4099 
4100   if (initSock() != JNI_OK) {
4101     return JNI_ERR;
4102   }
4103 
4104   SymbolEngine::recalc_search_path();
4105 
4106   // Initialize data for jdk.internal.misc.Signal
4107   if (!ReduceSignalUsage) {
4108     jdk_misc_signal_init();
4109   }
4110 
4111   return JNI_OK;
4112 }
4113 
4114 // Mark the polling page as unreadable
4115 void os::make_polling_page_unreadable(void) {
4116   DWORD old_status;
4117   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4118                       PAGE_NOACCESS, &old_status)) {
4119     fatal("Could not disable polling page");
4120   }
4121 }
4122 
4123 // Mark the polling page as readable
4124 void os::make_polling_page_readable(void) {
4125   DWORD old_status;
4126   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4127                       PAGE_READONLY, &old_status)) {
4128     fatal("Could not enable polling page");
4129   }
4130 }
4131 
4132 // combine the high and low DWORD into a ULONGLONG
4133 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4134   ULONGLONG value = high_word;
4135   value <<= sizeof(high_word) * 8;
4136   value |= low_word;
4137   return value;
4138 }
4139 
4140 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4141 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4142   ::memset((void*)sbuf, 0, sizeof(struct stat));
4143   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4144   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4145                                   file_data.ftLastWriteTime.dwLowDateTime);
4146   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4147                                   file_data.ftCreationTime.dwLowDateTime);
4148   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4149                                   file_data.ftLastAccessTime.dwLowDateTime);
4150   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4151     sbuf->st_mode |= S_IFDIR;
4152   } else {
4153     sbuf->st_mode |= S_IFREG;
4154   }
4155 }
4156 
4157 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4158 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4159 // additional_space is the number of additionally allocated wchars after the terminating L'\0'.
4160 // This is based on pathToNTPath() in io_util_md.cpp, but omits the optimizations for
4161 // short paths.
4162 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4163   if ((path == NULL) || (path[0] == '\0')) {
4164     err = ENOENT;
4165     return NULL;
4166   }
4167 
4168   size_t path_len = strlen(path);
4169   // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4170   char* buf = (char*) os::malloc(1 + MAX2((size_t) 3, path_len), mtInternal);
4171   wchar_t* result = NULL;
4172 
4173   if (buf == NULL) {
4174     err = ENOMEM;
4175   } else {
4176     memcpy(buf, path, path_len + 1);
4177     os::native_path(buf);
4178 
4179     wchar_t* prefix;
4180     int prefix_off = 0;
4181     bool is_abs = true;
4182     bool needs_fullpath = true;
4183 
4184     if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4185       prefix = L"\\\\?\\";
4186     } else if (buf[0] == '\\' && buf[1] == '\\') {
4187       if (buf[2] == '?' && buf[3] == '\\') {
4188         prefix = L"";
4189         needs_fullpath = false;
4190       } else {
4191         prefix = L"\\\\?\\UNC";
4192         prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4193       }
4194     } else {
4195       is_abs = false;
4196       prefix = L"\\\\?\\";
4197     }
4198 
4199     size_t buf_len = strlen(buf);
4200     size_t prefix_len = wcslen(prefix);
4201     size_t full_path_size = is_abs ? 1 + buf_len : JVM_MAXPATHLEN;
4202     size_t result_size = prefix_len + full_path_size - prefix_off;
4203     result = (wchar_t*) os::malloc(sizeof(wchar_t) * (additional_space + result_size), mtInternal);
4204 
4205     if (result == NULL) {
4206       err = ENOMEM;
4207     } else {
4208       size_t converted_chars;
4209       wchar_t* path_start = result + prefix_len - prefix_off;
4210       err = ::mbstowcs_s(&converted_chars, path_start, buf_len + 1, buf, buf_len);
4211 
4212       if ((err == ERROR_SUCCESS) && needs_fullpath) {
4213         wchar_t* tmp = (wchar_t*) os::malloc(sizeof(wchar_t) * full_path_size, mtInternal);
4214 
4215         if (tmp == NULL) {
4216           err = ENOMEM;
4217         } else {
4218           if (!_wfullpath(tmp, path_start, full_path_size)) {
4219             err = ENOENT;
4220           } else {
4221             ::memcpy(path_start, tmp, (1 + wcslen(tmp)) * sizeof(wchar_t));
4222           }
4223 
4224           os::free(tmp);
4225         }
4226       }
4227 
4228       memcpy(result, prefix, sizeof(wchar_t) * prefix_len);
4229 
4230       // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4231       size_t result_len = wcslen(result);
4232 
4233       if (result[result_len - 1] == L'\\') {
4234         if (!(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4235           result[result_len - 1] = L'\0';
4236         }
4237       }
4238     }
4239   }
4240 
4241   os::free(buf);
4242 
4243   if (err != ERROR_SUCCESS) {
4244     os::free(result);
4245     result = NULL;
4246   }
4247 
4248   return result;
4249 }
4250 
4251 int os::stat(const char *path, struct stat *sbuf) {
4252   errno_t err;
4253   wchar_t* wide_path = wide_abs_unc_path(path, err);
4254 
4255   if (wide_path == NULL) {
4256     errno = err;
4257     return -1;
4258   }
4259 
4260   WIN32_FILE_ATTRIBUTE_DATA file_data;;
4261   BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4262   os::free(wide_path);
4263 
4264   if (!bret) {
4265     errno = ::GetLastError();
4266     return -1;
4267   }
4268 
4269   file_attribute_data_to_stat(sbuf, file_data);
4270   return 0;
4271 }
4272 
4273 static HANDLE create_read_only_file_handle(const char* file) {
4274   errno_t err;
4275   wchar_t* wide_path = wide_abs_unc_path(file, err);
4276 
4277   if (wide_path == NULL) {
4278     errno = err;
4279     return INVALID_HANDLE_VALUE;
4280   }
4281 
4282   HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4283                                 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4284   os::free(wide_path);
4285 
4286   return handle;
4287 }
4288 
4289 bool os::same_files(const char* file1, const char* file2) {
4290 
4291   if (file1 == NULL && file2 == NULL) {
4292     return true;
4293   }
4294 
4295   if (file1 == NULL || file2 == NULL) {
4296     return false;
4297   }
4298 
4299   if (strcmp(file1, file2) == 0) {
4300     return true;
4301   }
4302 
4303   HANDLE handle1 = create_read_only_file_handle(file1);
4304   HANDLE handle2 = create_read_only_file_handle(file2);
4305   bool result = false;
4306 
4307   // if we could open both paths...
4308   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4309     BY_HANDLE_FILE_INFORMATION fileInfo1;
4310     BY_HANDLE_FILE_INFORMATION fileInfo2;
4311     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4312       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4313       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4314       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4315         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4316         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4317         result = true;
4318       }
4319     }
4320   }
4321 
4322   //free the handles
4323   if (handle1 != INVALID_HANDLE_VALUE) {
4324     ::CloseHandle(handle1);
4325   }
4326 
4327   if (handle2 != INVALID_HANDLE_VALUE) {
4328     ::CloseHandle(handle2);
4329   }
4330 
4331   return result;
4332 }
4333 
4334 #define FT2INT64(ft) \
4335   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4336 
4337 
4338 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4339 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4340 // of a thread.
4341 //
4342 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4343 // the fast estimate available on the platform.
4344 
4345 // current_thread_cpu_time() is not optimized for Windows yet
4346 jlong os::current_thread_cpu_time() {
4347   // return user + sys since the cost is the same
4348   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4349 }
4350 
4351 jlong os::thread_cpu_time(Thread* thread) {
4352   // consistent with what current_thread_cpu_time() returns.
4353   return os::thread_cpu_time(thread, true /* user+sys */);
4354 }
4355 
4356 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4357   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4358 }
4359 
4360 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4361   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4362   // If this function changes, os::is_thread_cpu_time_supported() should too
4363   FILETIME CreationTime;
4364   FILETIME ExitTime;
4365   FILETIME KernelTime;
4366   FILETIME UserTime;
4367 
4368   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4369                       &ExitTime, &KernelTime, &UserTime) == 0) {
4370     return -1;
4371   } else if (user_sys_cpu_time) {
4372     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4373   } else {
4374     return FT2INT64(UserTime) * 100;
4375   }
4376 }
4377 
4378 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4379   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4380   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4381   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4382   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4383 }
4384 
4385 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4386   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4387   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4388   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4389   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4390 }
4391 
4392 bool os::is_thread_cpu_time_supported() {
4393   // see os::thread_cpu_time
4394   FILETIME CreationTime;
4395   FILETIME ExitTime;
4396   FILETIME KernelTime;
4397   FILETIME UserTime;
4398 
4399   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4400                       &KernelTime, &UserTime) == 0) {
4401     return false;
4402   } else {
4403     return true;
4404   }
4405 }
4406 
4407 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4408 // It does have primitives (PDH API) to get CPU usage and run queue length.
4409 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4410 // If we wanted to implement loadavg on Windows, we have a few options:
4411 //
4412 // a) Query CPU usage and run queue length and "fake" an answer by
4413 //    returning the CPU usage if it's under 100%, and the run queue
4414 //    length otherwise.  It turns out that querying is pretty slow
4415 //    on Windows, on the order of 200 microseconds on a fast machine.
4416 //    Note that on the Windows the CPU usage value is the % usage
4417 //    since the last time the API was called (and the first call
4418 //    returns 100%), so we'd have to deal with that as well.
4419 //
4420 // b) Sample the "fake" answer using a sampling thread and store
4421 //    the answer in a global variable.  The call to loadavg would
4422 //    just return the value of the global, avoiding the slow query.
4423 //
4424 // c) Sample a better answer using exponential decay to smooth the
4425 //    value.  This is basically the algorithm used by UNIX kernels.
4426 //
4427 // Note that sampling thread starvation could affect both (b) and (c).
4428 int os::loadavg(double loadavg[], int nelem) {
4429   return -1;
4430 }
4431 
4432 
4433 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4434 bool os::dont_yield() {
4435   return DontYieldALot;
4436 }
4437 
4438 int os::open(const char *path, int oflag, int mode) {
4439   errno_t err;
4440   wchar_t* wide_path = wide_abs_unc_path(path, err);
4441 
4442   if (wide_path == NULL) {
4443     errno = err;
4444     return -1;
4445   }
4446   int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4447   os::free(wide_path);
4448 
4449   if (fd == -1) {
4450     errno = ::GetLastError();
4451   }
4452 
4453   return fd;
4454 }
4455 
4456 FILE* os::open(int fd, const char* mode) {
4457   return ::_fdopen(fd, mode);
4458 }
4459 
4460 // Is a (classpath) directory empty?
4461 bool os::dir_is_empty(const char* path) {
4462   errno_t err;
4463   wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4464 
4465   if (wide_path == NULL) {
4466     errno = err;
4467     return false;
4468   }
4469 
4470   // Make sure we end with "\\*"
4471   if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4472     wcscat(wide_path, L"*");
4473   } else {
4474     wcscat(wide_path, L"\\*");
4475   }
4476 
4477   WIN32_FIND_DATAW fd;
4478   HANDLE f = ::FindFirstFileW(wide_path, &fd);
4479   os::free(wide_path);
4480   bool is_empty = true;
4481 
4482   if (f != INVALID_HANDLE_VALUE) {
4483     while (is_empty && ::FindNextFileW(f, &fd)) {
4484       // An empty directory contains only the current directory file
4485       // and the previous directory file.
4486       if ((wcscmp(fd.cFileName, L".") != 0) &&
4487           (wcscmp(fd.cFileName, L"..") != 0)) {
4488         is_empty = false;
4489       }
4490     }
4491     FindClose(f);
4492   } else {
4493     errno = ::GetLastError();
4494   }
4495 
4496   return is_empty;
4497 }
4498 
4499 // create binary file, rewriting existing file if required
4500 int os::create_binary_file(const char* path, bool rewrite_existing) {
4501   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4502   if (!rewrite_existing) {
4503     oflags |= _O_EXCL;
4504   }
4505   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4506 }
4507 
4508 // return current position of file pointer
4509 jlong os::current_file_offset(int fd) {
4510   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4511 }
4512 
4513 // move file pointer to the specified offset
4514 jlong os::seek_to_file_offset(int fd, jlong offset) {
4515   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4516 }
4517 
4518 
4519 jlong os::lseek(int fd, jlong offset, int whence) {
4520   return (jlong) ::_lseeki64(fd, offset, whence);
4521 }
4522 
4523 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4524   OVERLAPPED ov;
4525   DWORD nread;
4526   BOOL result;
4527 
4528   ZeroMemory(&ov, sizeof(ov));
4529   ov.Offset = (DWORD)offset;
4530   ov.OffsetHigh = (DWORD)(offset >> 32);
4531 
4532   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4533 
4534   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4535 
4536   return result ? nread : 0;
4537 }
4538 
4539 
4540 // This method is a slightly reworked copy of JDK's sysNativePath
4541 // from src/windows/hpi/src/path_md.c
4542 
4543 // Convert a pathname to native format.  On win32, this involves forcing all
4544 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4545 // sometimes rejects '/') and removing redundant separators.  The input path is
4546 // assumed to have been converted into the character encoding used by the local
4547 // system.  Because this might be a double-byte encoding, care is taken to
4548 // treat double-byte lead characters correctly.
4549 //
4550 // This procedure modifies the given path in place, as the result is never
4551 // longer than the original.  There is no error return; this operation always
4552 // succeeds.
4553 char * os::native_path(char *path) {
4554   char *src = path, *dst = path, *end = path;
4555   char *colon = NULL;  // If a drive specifier is found, this will
4556                        // point to the colon following the drive letter
4557 
4558   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4559   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4560           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4561 
4562   // Check for leading separators
4563 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4564   while (isfilesep(*src)) {
4565     src++;
4566   }
4567 
4568   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4569     // Remove leading separators if followed by drive specifier.  This
4570     // hack is necessary to support file URLs containing drive
4571     // specifiers (e.g., "file://c:/path").  As a side effect,
4572     // "/c:/path" can be used as an alternative to "c:/path".
4573     *dst++ = *src++;
4574     colon = dst;
4575     *dst++ = ':';
4576     src++;
4577   } else {
4578     src = path;
4579     if (isfilesep(src[0]) && isfilesep(src[1])) {
4580       // UNC pathname: Retain first separator; leave src pointed at
4581       // second separator so that further separators will be collapsed
4582       // into the second separator.  The result will be a pathname
4583       // beginning with "\\\\" followed (most likely) by a host name.
4584       src = dst = path + 1;
4585       path[0] = '\\';     // Force first separator to '\\'
4586     }
4587   }
4588 
4589   end = dst;
4590 
4591   // Remove redundant separators from remainder of path, forcing all
4592   // separators to be '\\' rather than '/'. Also, single byte space
4593   // characters are removed from the end of the path because those
4594   // are not legal ending characters on this operating system.
4595   //
4596   while (*src != '\0') {
4597     if (isfilesep(*src)) {
4598       *dst++ = '\\'; src++;
4599       while (isfilesep(*src)) src++;
4600       if (*src == '\0') {
4601         // Check for trailing separator
4602         end = dst;
4603         if (colon == dst - 2) break;  // "z:\\"
4604         if (dst == path + 1) break;   // "\\"
4605         if (dst == path + 2 && isfilesep(path[0])) {
4606           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4607           // beginning of a UNC pathname.  Even though it is not, by
4608           // itself, a valid UNC pathname, we leave it as is in order
4609           // to be consistent with the path canonicalizer as well
4610           // as the win32 APIs, which treat this case as an invalid
4611           // UNC pathname rather than as an alias for the root
4612           // directory of the current drive.
4613           break;
4614         }
4615         end = --dst;  // Path does not denote a root directory, so
4616                       // remove trailing separator
4617         break;
4618       }
4619       end = dst;
4620     } else {
4621       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4622         *dst++ = *src++;
4623         if (*src) *dst++ = *src++;
4624         end = dst;
4625       } else {  // Copy a single-byte character
4626         char c = *src++;
4627         *dst++ = c;
4628         // Space is not a legal ending character
4629         if (c != ' ') end = dst;
4630       }
4631     }
4632   }
4633 
4634   *end = '\0';
4635 
4636   // For "z:", add "." to work around a bug in the C runtime library
4637   if (colon == dst - 1) {
4638     path[2] = '.';
4639     path[3] = '\0';
4640   }
4641 
4642   return path;
4643 }
4644 
4645 // This code is a copy of JDK's sysSetLength
4646 // from src/windows/hpi/src/sys_api_md.c
4647 
4648 int os::ftruncate(int fd, jlong length) {
4649   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4650   long high = (long)(length >> 32);
4651   DWORD ret;
4652 
4653   if (h == (HANDLE)(-1)) {
4654     return -1;
4655   }
4656 
4657   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4658   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4659     return -1;
4660   }
4661 
4662   if (::SetEndOfFile(h) == FALSE) {
4663     return -1;
4664   }
4665 
4666   return 0;
4667 }
4668 
4669 int os::get_fileno(FILE* fp) {
4670   return _fileno(fp);
4671 }
4672 
4673 // This code is a copy of JDK's sysSync
4674 // from src/windows/hpi/src/sys_api_md.c
4675 // except for the legacy workaround for a bug in Win 98
4676 
4677 int os::fsync(int fd) {
4678   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4679 
4680   if ((!::FlushFileBuffers(handle)) &&
4681       (GetLastError() != ERROR_ACCESS_DENIED)) {
4682     // from winerror.h
4683     return -1;
4684   }
4685   return 0;
4686 }
4687 
4688 static int nonSeekAvailable(int, long *);
4689 static int stdinAvailable(int, long *);
4690 
4691 // This code is a copy of JDK's sysAvailable
4692 // from src/windows/hpi/src/sys_api_md.c
4693 
4694 int os::available(int fd, jlong *bytes) {
4695   jlong cur, end;
4696   struct _stati64 stbuf64;
4697 
4698   if (::_fstati64(fd, &stbuf64) >= 0) {
4699     int mode = stbuf64.st_mode;
4700     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4701       int ret;
4702       long lpbytes;
4703       if (fd == 0) {
4704         ret = stdinAvailable(fd, &lpbytes);
4705       } else {
4706         ret = nonSeekAvailable(fd, &lpbytes);
4707       }
4708       (*bytes) = (jlong)(lpbytes);
4709       return ret;
4710     }
4711     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4712       return FALSE;
4713     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4714       return FALSE;
4715     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4716       return FALSE;
4717     }
4718     *bytes = end - cur;
4719     return TRUE;
4720   } else {
4721     return FALSE;
4722   }
4723 }
4724 
4725 void os::flockfile(FILE* fp) {
4726   _lock_file(fp);
4727 }
4728 
4729 void os::funlockfile(FILE* fp) {
4730   _unlock_file(fp);
4731 }
4732 
4733 // This code is a copy of JDK's nonSeekAvailable
4734 // from src/windows/hpi/src/sys_api_md.c
4735 
4736 static int nonSeekAvailable(int fd, long *pbytes) {
4737   // This is used for available on non-seekable devices
4738   // (like both named and anonymous pipes, such as pipes
4739   //  connected to an exec'd process).
4740   // Standard Input is a special case.
4741   HANDLE han;
4742 
4743   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4744     return FALSE;
4745   }
4746 
4747   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4748     // PeekNamedPipe fails when at EOF.  In that case we
4749     // simply make *pbytes = 0 which is consistent with the
4750     // behavior we get on Solaris when an fd is at EOF.
4751     // The only alternative is to raise an Exception,
4752     // which isn't really warranted.
4753     //
4754     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4755       return FALSE;
4756     }
4757     *pbytes = 0;
4758   }
4759   return TRUE;
4760 }
4761 
4762 #define MAX_INPUT_EVENTS 2000
4763 
4764 // This code is a copy of JDK's stdinAvailable
4765 // from src/windows/hpi/src/sys_api_md.c
4766 
4767 static int stdinAvailable(int fd, long *pbytes) {
4768   HANDLE han;
4769   DWORD numEventsRead = 0;  // Number of events read from buffer
4770   DWORD numEvents = 0;      // Number of events in buffer
4771   DWORD i = 0;              // Loop index
4772   DWORD curLength = 0;      // Position marker
4773   DWORD actualLength = 0;   // Number of bytes readable
4774   BOOL error = FALSE;       // Error holder
4775   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4776 
4777   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4778     return FALSE;
4779   }
4780 
4781   // Construct an array of input records in the console buffer
4782   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4783   if (error == 0) {
4784     return nonSeekAvailable(fd, pbytes);
4785   }
4786 
4787   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4788   if (numEvents > MAX_INPUT_EVENTS) {
4789     numEvents = MAX_INPUT_EVENTS;
4790   }
4791 
4792   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4793   if (lpBuffer == NULL) {
4794     return FALSE;
4795   }
4796 
4797   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4798   if (error == 0) {
4799     os::free(lpBuffer);
4800     return FALSE;
4801   }
4802 
4803   // Examine input records for the number of bytes available
4804   for (i=0; i<numEvents; i++) {
4805     if (lpBuffer[i].EventType == KEY_EVENT) {
4806 
4807       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4808                                       &(lpBuffer[i].Event);
4809       if (keyRecord->bKeyDown == TRUE) {
4810         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4811         curLength++;
4812         if (*keyPressed == '\r') {
4813           actualLength = curLength;
4814         }
4815       }
4816     }
4817   }
4818 
4819   if (lpBuffer != NULL) {
4820     os::free(lpBuffer);
4821   }
4822 
4823   *pbytes = (long) actualLength;
4824   return TRUE;
4825 }
4826 
4827 // Map a block of memory.
4828 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4829                         char *addr, size_t bytes, bool read_only,
4830                         bool allow_exec) {
4831   HANDLE hFile;
4832   char* base;
4833 
4834   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4835                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4836   if (hFile == NULL) {
4837     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4838     return NULL;
4839   }
4840 
4841   if (allow_exec) {
4842     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4843     // unless it comes from a PE image (which the shared archive is not.)
4844     // Even VirtualProtect refuses to give execute access to mapped memory
4845     // that was not previously executable.
4846     //
4847     // Instead, stick the executable region in anonymous memory.  Yuck.
4848     // Penalty is that ~4 pages will not be shareable - in the future
4849     // we might consider DLLizing the shared archive with a proper PE
4850     // header so that mapping executable + sharing is possible.
4851 
4852     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4853                                 PAGE_READWRITE);
4854     if (base == NULL) {
4855       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4856       CloseHandle(hFile);
4857       return NULL;
4858     }
4859 
4860     // Record virtual memory allocation
4861     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
4862 
4863     DWORD bytes_read;
4864     OVERLAPPED overlapped;
4865     overlapped.Offset = (DWORD)file_offset;
4866     overlapped.OffsetHigh = 0;
4867     overlapped.hEvent = NULL;
4868     // ReadFile guarantees that if the return value is true, the requested
4869     // number of bytes were read before returning.
4870     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4871     if (!res) {
4872       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4873       release_memory(base, bytes);
4874       CloseHandle(hFile);
4875       return NULL;
4876     }
4877   } else {
4878     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4879                                     NULL /* file_name */);
4880     if (hMap == NULL) {
4881       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4882       CloseHandle(hFile);
4883       return NULL;
4884     }
4885 
4886     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4887     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4888                                   (DWORD)bytes, addr);
4889     if (base == NULL) {
4890       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4891       CloseHandle(hMap);
4892       CloseHandle(hFile);
4893       return NULL;
4894     }
4895 
4896     if (CloseHandle(hMap) == 0) {
4897       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4898       CloseHandle(hFile);
4899       return base;
4900     }
4901   }
4902 
4903   if (allow_exec) {
4904     DWORD old_protect;
4905     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4906     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4907 
4908     if (!res) {
4909       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4910       // Don't consider this a hard error, on IA32 even if the
4911       // VirtualProtect fails, we should still be able to execute
4912       CloseHandle(hFile);
4913       return base;
4914     }
4915   }
4916 
4917   if (CloseHandle(hFile) == 0) {
4918     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4919     return base;
4920   }
4921 
4922   return base;
4923 }
4924 
4925 
4926 // Remap a block of memory.
4927 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4928                           char *addr, size_t bytes, bool read_only,
4929                           bool allow_exec) {
4930   // This OS does not allow existing memory maps to be remapped so we
4931   // would have to unmap the memory before we remap it.
4932 
4933   // Because there is a small window between unmapping memory and mapping
4934   // it in again with different protections, CDS archives are mapped RW
4935   // on windows, so this function isn't called.
4936   ShouldNotReachHere();
4937   return NULL;
4938 }
4939 
4940 
4941 // Unmap a block of memory.
4942 // Returns true=success, otherwise false.
4943 
4944 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4945   MEMORY_BASIC_INFORMATION mem_info;
4946   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4947     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
4948     return false;
4949   }
4950 
4951   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4952   // Instead, executable region was allocated using VirtualAlloc(). See
4953   // pd_map_memory() above.
4954   //
4955   // The following flags should match the 'exec_access' flages used for
4956   // VirtualProtect() in pd_map_memory().
4957   if (mem_info.Protect == PAGE_EXECUTE_READ ||
4958       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4959     return pd_release_memory(addr, bytes);
4960   }
4961 
4962   BOOL result = UnmapViewOfFile(addr);
4963   if (result == 0) {
4964     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
4965     return false;
4966   }
4967   return true;
4968 }
4969 
4970 void os::pause() {
4971   char filename[MAX_PATH];
4972   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4973     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
4974   } else {
4975     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4976   }
4977 
4978   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4979   if (fd != -1) {
4980     struct stat buf;
4981     ::close(fd);
4982     while (::stat(filename, &buf) == 0) {
4983       Sleep(100);
4984     }
4985   } else {
4986     jio_fprintf(stderr,
4987                 "Could not open pause file '%s', continuing immediately.\n", filename);
4988   }
4989 }
4990 
4991 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
4992 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
4993 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
4994 
4995 os::ThreadCrashProtection::ThreadCrashProtection() {
4996 }
4997 
4998 // See the caveats for this class in os_windows.hpp
4999 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5000 // into this method and returns false. If no OS EXCEPTION was raised, returns
5001 // true.
5002 // The callback is supposed to provide the method that should be protected.
5003 //
5004 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5005 
5006   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5007 
5008   _protected_thread = Thread::current_or_null();
5009   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5010 
5011   bool success = true;
5012   __try {
5013     _crash_protection = this;
5014     cb.call();
5015   } __except(EXCEPTION_EXECUTE_HANDLER) {
5016     // only for protection, nothing to do
5017     success = false;
5018   }
5019   _crash_protection = NULL;
5020   _protected_thread = NULL;
5021   Thread::muxRelease(&_crash_mux);
5022   return success;
5023 }
5024 
5025 
5026 class HighResolutionInterval : public CHeapObj<mtThread> {
5027   // The default timer resolution seems to be 10 milliseconds.
5028   // (Where is this written down?)
5029   // If someone wants to sleep for only a fraction of the default,
5030   // then we set the timer resolution down to 1 millisecond for
5031   // the duration of their interval.
5032   // We carefully set the resolution back, since otherwise we
5033   // seem to incur an overhead (3%?) that we don't need.
5034   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5035   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5036   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5037   // timeBeginPeriod() if the relative error exceeded some threshold.
5038   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5039   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5040   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5041   // resolution timers running.
5042  private:
5043   jlong resolution;
5044  public:
5045   HighResolutionInterval(jlong ms) {
5046     resolution = ms % 10L;
5047     if (resolution != 0) {
5048       MMRESULT result = timeBeginPeriod(1L);
5049     }
5050   }
5051   ~HighResolutionInterval() {
5052     if (resolution != 0) {
5053       MMRESULT result = timeEndPeriod(1L);
5054     }
5055     resolution = 0L;
5056   }
5057 };
5058 
5059 // An Event wraps a win32 "CreateEvent" kernel handle.
5060 //
5061 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5062 //
5063 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5064 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5065 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5066 //     In addition, an unpark() operation might fetch the handle field, but the
5067 //     event could recycle between the fetch and the SetEvent() operation.
5068 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5069 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5070 //     on an stale but recycled handle would be harmless, but in practice this might
5071 //     confuse other non-Sun code, so it's not a viable approach.
5072 //
5073 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5074 //     with the Event.  The event handle is never closed.  This could be construed
5075 //     as handle leakage, but only up to the maximum # of threads that have been extant
5076 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5077 //     permit a process to have hundreds of thousands of open handles.
5078 //
5079 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5080 //     and release unused handles.
5081 //
5082 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5083 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5084 //
5085 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5086 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5087 //
5088 // We use (2).
5089 //
5090 // TODO-FIXME:
5091 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5092 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5093 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5094 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5095 //     into a single win32 CreateEvent() handle.
5096 //
5097 // Assumption:
5098 //    Only one parker can exist on an event, which is why we allocate
5099 //    them per-thread. Multiple unparkers can coexist.
5100 //
5101 // _Event transitions in park()
5102 //   -1 => -1 : illegal
5103 //    1 =>  0 : pass - return immediately
5104 //    0 => -1 : block; then set _Event to 0 before returning
5105 //
5106 // _Event transitions in unpark()
5107 //    0 => 1 : just return
5108 //    1 => 1 : just return
5109 //   -1 => either 0 or 1; must signal target thread
5110 //         That is, we can safely transition _Event from -1 to either
5111 //         0 or 1.
5112 //
5113 // _Event serves as a restricted-range semaphore.
5114 //   -1 : thread is blocked, i.e. there is a waiter
5115 //    0 : neutral: thread is running or ready,
5116 //        could have been signaled after a wait started
5117 //    1 : signaled - thread is running or ready
5118 //
5119 // Another possible encoding of _Event would be with
5120 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5121 //
5122 
5123 int os::PlatformEvent::park(jlong Millis) {
5124   // Transitions for _Event:
5125   //   -1 => -1 : illegal
5126   //    1 =>  0 : pass - return immediately
5127   //    0 => -1 : block; then set _Event to 0 before returning
5128 
5129   guarantee(_ParkHandle != NULL , "Invariant");
5130   guarantee(Millis > 0          , "Invariant");
5131 
5132   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5133   // the initial park() operation.
5134   // Consider: use atomic decrement instead of CAS-loop
5135 
5136   int v;
5137   for (;;) {
5138     v = _Event;
5139     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5140   }
5141   guarantee((v == 0) || (v == 1), "invariant");
5142   if (v != 0) return OS_OK;
5143 
5144   // Do this the hard way by blocking ...
5145   // TODO: consider a brief spin here, gated on the success of recent
5146   // spin attempts by this thread.
5147   //
5148   // We decompose long timeouts into series of shorter timed waits.
5149   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5150   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5151   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5152   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5153   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5154   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5155   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5156   // for the already waited time.  This policy does not admit any new outcomes.
5157   // In the future, however, we might want to track the accumulated wait time and
5158   // adjust Millis accordingly if we encounter a spurious wakeup.
5159 
5160   const int MAXTIMEOUT = 0x10000000;
5161   DWORD rv = WAIT_TIMEOUT;
5162   while (_Event < 0 && Millis > 0) {
5163     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5164     if (Millis > MAXTIMEOUT) {
5165       prd = MAXTIMEOUT;
5166     }
5167     HighResolutionInterval *phri = NULL;
5168     if (!ForceTimeHighResolution) {
5169       phri = new HighResolutionInterval(prd);
5170     }
5171     rv = ::WaitForSingleObject(_ParkHandle, prd);
5172     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5173     if (rv == WAIT_TIMEOUT) {
5174       Millis -= prd;
5175     }
5176     delete phri; // if it is NULL, harmless
5177   }
5178   v = _Event;
5179   _Event = 0;
5180   // see comment at end of os::PlatformEvent::park() below:
5181   OrderAccess::fence();
5182   // If we encounter a nearly simultanous timeout expiry and unpark()
5183   // we return OS_OK indicating we awoke via unpark().
5184   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5185   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5186 }
5187 
5188 void os::PlatformEvent::park() {
5189   // Transitions for _Event:
5190   //   -1 => -1 : illegal
5191   //    1 =>  0 : pass - return immediately
5192   //    0 => -1 : block; then set _Event to 0 before returning
5193 
5194   guarantee(_ParkHandle != NULL, "Invariant");
5195   // Invariant: Only the thread associated with the Event/PlatformEvent
5196   // may call park().
5197   // Consider: use atomic decrement instead of CAS-loop
5198   int v;
5199   for (;;) {
5200     v = _Event;
5201     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5202   }
5203   guarantee((v == 0) || (v == 1), "invariant");
5204   if (v != 0) return;
5205 
5206   // Do this the hard way by blocking ...
5207   // TODO: consider a brief spin here, gated on the success of recent
5208   // spin attempts by this thread.
5209   while (_Event < 0) {
5210     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5211     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5212   }
5213 
5214   // Usually we'll find _Event == 0 at this point, but as
5215   // an optional optimization we clear it, just in case can
5216   // multiple unpark() operations drove _Event up to 1.
5217   _Event = 0;
5218   OrderAccess::fence();
5219   guarantee(_Event >= 0, "invariant");
5220 }
5221 
5222 void os::PlatformEvent::unpark() {
5223   guarantee(_ParkHandle != NULL, "Invariant");
5224 
5225   // Transitions for _Event:
5226   //    0 => 1 : just return
5227   //    1 => 1 : just return
5228   //   -1 => either 0 or 1; must signal target thread
5229   //         That is, we can safely transition _Event from -1 to either
5230   //         0 or 1.
5231   // See also: "Semaphores in Plan 9" by Mullender & Cox
5232   //
5233   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5234   // that it will take two back-to-back park() calls for the owning
5235   // thread to block. This has the benefit of forcing a spurious return
5236   // from the first park() call after an unpark() call which will help
5237   // shake out uses of park() and unpark() without condition variables.
5238 
5239   if (Atomic::xchg(&_Event, 1) >= 0) return;
5240 
5241   ::SetEvent(_ParkHandle);
5242 }
5243 
5244 
5245 // JSR166
5246 // -------------------------------------------------------
5247 
5248 // The Windows implementation of Park is very straightforward: Basic
5249 // operations on Win32 Events turn out to have the right semantics to
5250 // use them directly. We opportunistically resuse the event inherited
5251 // from Monitor.
5252 
5253 void Parker::park(bool isAbsolute, jlong time) {
5254   guarantee(_ParkEvent != NULL, "invariant");
5255   // First, demultiplex/decode time arguments
5256   if (time < 0) { // don't wait
5257     return;
5258   } else if (time == 0 && !isAbsolute) {
5259     time = INFINITE;
5260   } else if (isAbsolute) {
5261     time -= os::javaTimeMillis(); // convert to relative time
5262     if (time <= 0) {  // already elapsed
5263       return;
5264     }
5265   } else { // relative
5266     time /= 1000000;  // Must coarsen from nanos to millis
5267     if (time == 0) {  // Wait for the minimal time unit if zero
5268       time = 1;
5269     }
5270   }
5271 
5272   JavaThread* thread = JavaThread::current();
5273 
5274   // Don't wait if interrupted or already triggered
5275   if (thread->is_interrupted(false) ||
5276       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5277     ResetEvent(_ParkEvent);
5278     return;
5279   } else {
5280     ThreadBlockInVM tbivm(thread);
5281     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5282     thread->set_suspend_equivalent();
5283 
5284     WaitForSingleObject(_ParkEvent, time);
5285     ResetEvent(_ParkEvent);
5286 
5287     // If externally suspended while waiting, re-suspend
5288     if (thread->handle_special_suspend_equivalent_condition()) {
5289       thread->java_suspend_self();
5290     }
5291   }
5292 }
5293 
5294 void Parker::unpark() {
5295   guarantee(_ParkEvent != NULL, "invariant");
5296   SetEvent(_ParkEvent);
5297 }
5298 
5299 // Platform Monitor implementation
5300 
5301 // Must already be locked
5302 int os::PlatformMonitor::wait(jlong millis) {
5303   assert(millis >= 0, "negative timeout");
5304   int ret = OS_TIMEOUT;
5305   int status = SleepConditionVariableCS(&_cond, &_mutex,
5306                                         millis == 0 ? INFINITE : millis);
5307   if (status != 0) {
5308     ret = OS_OK;
5309   }
5310   #ifndef PRODUCT
5311   else {
5312     DWORD err = GetLastError();
5313     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5314   }
5315   #endif
5316   return ret;
5317 }
5318 
5319 // Run the specified command in a separate process. Return its exit value,
5320 // or -1 on failure (e.g. can't create a new process).
5321 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5322   STARTUPINFO si;
5323   PROCESS_INFORMATION pi;
5324   DWORD exit_code;
5325 
5326   char * cmd_string;
5327   const char * cmd_prefix = "cmd /C ";
5328   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5329   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5330   if (cmd_string == NULL) {
5331     return -1;
5332   }
5333   cmd_string[0] = '\0';
5334   strcat(cmd_string, cmd_prefix);
5335   strcat(cmd_string, cmd);
5336 
5337   // now replace all '\n' with '&'
5338   char * substring = cmd_string;
5339   while ((substring = strchr(substring, '\n')) != NULL) {
5340     substring[0] = '&';
5341     substring++;
5342   }
5343   memset(&si, 0, sizeof(si));
5344   si.cb = sizeof(si);
5345   memset(&pi, 0, sizeof(pi));
5346   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5347                             cmd_string,    // command line
5348                             NULL,   // process security attribute
5349                             NULL,   // thread security attribute
5350                             TRUE,   // inherits system handles
5351                             0,      // no creation flags
5352                             NULL,   // use parent's environment block
5353                             NULL,   // use parent's starting directory
5354                             &si,    // (in) startup information
5355                             &pi);   // (out) process information
5356 
5357   if (rslt) {
5358     // Wait until child process exits.
5359     WaitForSingleObject(pi.hProcess, INFINITE);
5360 
5361     GetExitCodeProcess(pi.hProcess, &exit_code);
5362 
5363     // Close process and thread handles.
5364     CloseHandle(pi.hProcess);
5365     CloseHandle(pi.hThread);
5366   } else {
5367     exit_code = -1;
5368   }
5369 
5370   FREE_C_HEAP_ARRAY(char, cmd_string);
5371   return (int)exit_code;
5372 }
5373 
5374 bool os::find(address addr, outputStream* st) {
5375   int offset = -1;
5376   bool result = false;
5377   char buf[256];
5378   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5379     st->print(PTR_FORMAT " ", addr);
5380     if (strlen(buf) < sizeof(buf) - 1) {
5381       char* p = strrchr(buf, '\\');
5382       if (p) {
5383         st->print("%s", p + 1);
5384       } else {
5385         st->print("%s", buf);
5386       }
5387     } else {
5388         // The library name is probably truncated. Let's omit the library name.
5389         // See also JDK-8147512.
5390     }
5391     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5392       st->print("::%s + 0x%x", buf, offset);
5393     }
5394     st->cr();
5395     result = true;
5396   }
5397   return result;
5398 }
5399 
5400 static jint initSock() {
5401   WSADATA wsadata;
5402 
5403   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5404     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5405                 ::GetLastError());
5406     return JNI_ERR;
5407   }
5408   return JNI_OK;
5409 }
5410 
5411 struct hostent* os::get_host_by_name(char* name) {
5412   return (struct hostent*)gethostbyname(name);
5413 }
5414 
5415 int os::socket_close(int fd) {
5416   return ::closesocket(fd);
5417 }
5418 
5419 int os::socket(int domain, int type, int protocol) {
5420   return ::socket(domain, type, protocol);
5421 }
5422 
5423 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5424   return ::connect(fd, him, len);
5425 }
5426 
5427 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5428   return ::recv(fd, buf, (int)nBytes, flags);
5429 }
5430 
5431 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5432   return ::send(fd, buf, (int)nBytes, flags);
5433 }
5434 
5435 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5436   return ::send(fd, buf, (int)nBytes, flags);
5437 }
5438 
5439 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5440 #if defined(IA32)
5441   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5442 #elif defined (AMD64)
5443   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5444 #endif
5445 
5446 // returns true if thread could be suspended,
5447 // false otherwise
5448 static bool do_suspend(HANDLE* h) {
5449   if (h != NULL) {
5450     if (SuspendThread(*h) != ~0) {
5451       return true;
5452     }
5453   }
5454   return false;
5455 }
5456 
5457 // resume the thread
5458 // calling resume on an active thread is a no-op
5459 static void do_resume(HANDLE* h) {
5460   if (h != NULL) {
5461     ResumeThread(*h);
5462   }
5463 }
5464 
5465 // retrieve a suspend/resume context capable handle
5466 // from the tid. Caller validates handle return value.
5467 void get_thread_handle_for_extended_context(HANDLE* h,
5468                                             OSThread::thread_id_t tid) {
5469   if (h != NULL) {
5470     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5471   }
5472 }
5473 
5474 // Thread sampling implementation
5475 //
5476 void os::SuspendedThreadTask::internal_do_task() {
5477   CONTEXT    ctxt;
5478   HANDLE     h = NULL;
5479 
5480   // get context capable handle for thread
5481   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5482 
5483   // sanity
5484   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5485     return;
5486   }
5487 
5488   // suspend the thread
5489   if (do_suspend(&h)) {
5490     ctxt.ContextFlags = sampling_context_flags;
5491     // get thread context
5492     GetThreadContext(h, &ctxt);
5493     SuspendedThreadTaskContext context(_thread, &ctxt);
5494     // pass context to Thread Sampling impl
5495     do_task(context);
5496     // resume thread
5497     do_resume(&h);
5498   }
5499 
5500   // close handle
5501   CloseHandle(h);
5502 }
5503 
5504 bool os::start_debugging(char *buf, int buflen) {
5505   int len = (int)strlen(buf);
5506   char *p = &buf[len];
5507 
5508   jio_snprintf(p, buflen-len,
5509              "\n\n"
5510              "Do you want to debug the problem?\n\n"
5511              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5512              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5513              "Otherwise, select 'No' to abort...",
5514              os::current_process_id(), os::current_thread_id());
5515 
5516   bool yes = os::message_box("Unexpected Error", buf);
5517 
5518   if (yes) {
5519     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5520     // exception. If VM is running inside a debugger, the debugger will
5521     // catch the exception. Otherwise, the breakpoint exception will reach
5522     // the default windows exception handler, which can spawn a debugger and
5523     // automatically attach to the dying VM.
5524     os::breakpoint();
5525     yes = false;
5526   }
5527   return yes;
5528 }
5529 
5530 void* os::get_default_process_handle() {
5531   return (void*)GetModuleHandle(NULL);
5532 }
5533 
5534 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5535 // which is used to find statically linked in agents.
5536 // Additionally for windows, takes into account __stdcall names.
5537 // Parameters:
5538 //            sym_name: Symbol in library we are looking for
5539 //            lib_name: Name of library to look in, NULL for shared libs.
5540 //            is_absolute_path == true if lib_name is absolute path to agent
5541 //                                     such as "C:/a/b/L.dll"
5542 //            == false if only the base name of the library is passed in
5543 //               such as "L"
5544 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5545                                     bool is_absolute_path) {
5546   char *agent_entry_name;
5547   size_t len;
5548   size_t name_len;
5549   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5550   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5551   const char *start;
5552 
5553   if (lib_name != NULL) {
5554     len = name_len = strlen(lib_name);
5555     if (is_absolute_path) {
5556       // Need to strip path, prefix and suffix
5557       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5558         lib_name = ++start;
5559       } else {
5560         // Need to check for drive prefix
5561         if ((start = strchr(lib_name, ':')) != NULL) {
5562           lib_name = ++start;
5563         }
5564       }
5565       if (len <= (prefix_len + suffix_len)) {
5566         return NULL;
5567       }
5568       lib_name += prefix_len;
5569       name_len = strlen(lib_name) - suffix_len;
5570     }
5571   }
5572   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5573   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5574   if (agent_entry_name == NULL) {
5575     return NULL;
5576   }
5577   if (lib_name != NULL) {
5578     const char *p = strrchr(sym_name, '@');
5579     if (p != NULL && p != sym_name) {
5580       // sym_name == _Agent_OnLoad@XX
5581       strncpy(agent_entry_name, sym_name, (p - sym_name));
5582       agent_entry_name[(p-sym_name)] = '\0';
5583       // agent_entry_name == _Agent_OnLoad
5584       strcat(agent_entry_name, "_");
5585       strncat(agent_entry_name, lib_name, name_len);
5586       strcat(agent_entry_name, p);
5587       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5588     } else {
5589       strcpy(agent_entry_name, sym_name);
5590       strcat(agent_entry_name, "_");
5591       strncat(agent_entry_name, lib_name, name_len);
5592     }
5593   } else {
5594     strcpy(agent_entry_name, sym_name);
5595   }
5596   return agent_entry_name;
5597 }
5598 
5599 #ifndef PRODUCT
5600 
5601 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5602 // contiguous memory block at a particular address.
5603 // The test first tries to find a good approximate address to allocate at by using the same
5604 // method to allocate some memory at any address. The test then tries to allocate memory in
5605 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5606 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5607 // the previously allocated memory is available for allocation. The only actual failure
5608 // that is reported is when the test tries to allocate at a particular location but gets a
5609 // different valid one. A NULL return value at this point is not considered an error but may
5610 // be legitimate.
5611 void TestReserveMemorySpecial_test() {
5612   if (!UseLargePages) {
5613     return;
5614   }
5615   // save current value of globals
5616   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5617   bool old_use_numa_interleaving = UseNUMAInterleaving;
5618 
5619   // set globals to make sure we hit the correct code path
5620   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5621 
5622   // do an allocation at an address selected by the OS to get a good one.
5623   const size_t large_allocation_size = os::large_page_size() * 4;
5624   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5625   if (result == NULL) {
5626   } else {
5627     os::release_memory_special(result, large_allocation_size);
5628 
5629     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5630     // we managed to get it once.
5631     const size_t expected_allocation_size = os::large_page_size();
5632     char* expected_location = result + os::large_page_size();
5633     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5634     if (actual_location == NULL) {
5635     } else {
5636       // release memory
5637       os::release_memory_special(actual_location, expected_allocation_size);
5638       // only now check, after releasing any memory to avoid any leaks.
5639       assert(actual_location == expected_location,
5640              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5641              expected_location, expected_allocation_size, actual_location);
5642     }
5643   }
5644 
5645   // restore globals
5646   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5647   UseNUMAInterleaving = old_use_numa_interleaving;
5648 }
5649 #endif // PRODUCT
5650 
5651 /*
5652   All the defined signal names for Windows.
5653 
5654   NOTE that not all of these names are accepted by FindSignal!
5655 
5656   For various reasons some of these may be rejected at runtime.
5657 
5658   Here are the names currently accepted by a user of sun.misc.Signal with
5659   1.4.1 (ignoring potential interaction with use of chaining, etc):
5660 
5661      (LIST TBD)
5662 
5663 */
5664 int os::get_signal_number(const char* name) {
5665   static const struct {
5666     const char* name;
5667     int         number;
5668   } siglabels [] =
5669     // derived from version 6.0 VC98/include/signal.h
5670   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5671   "FPE",        SIGFPE,         // floating point exception
5672   "SEGV",       SIGSEGV,        // segment violation
5673   "INT",        SIGINT,         // interrupt
5674   "TERM",       SIGTERM,        // software term signal from kill
5675   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5676   "ILL",        SIGILL};        // illegal instruction
5677   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5678     if (strcmp(name, siglabels[i].name) == 0) {
5679       return siglabels[i].number;
5680     }
5681   }
5682   return -1;
5683 }
5684 
5685 // Fast current thread access
5686 
5687 int os::win32::_thread_ptr_offset = 0;
5688 
5689 static void call_wrapper_dummy() {}
5690 
5691 // We need to call the os_exception_wrapper once so that it sets
5692 // up the offset from FS of the thread pointer.
5693 void os::win32::initialize_thread_ptr_offset() {
5694   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5695                            NULL, methodHandle(), NULL, NULL);
5696 }
5697 
5698 bool os::supports_map_sync() {
5699   return false;
5700 }