1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/allocation.inline.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "services/attachListener.hpp"
  67 #include "services/memTracker.hpp"
  68 #include "services/runtimeService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/growableArray.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 #include "symbolengine.hpp"
  77 #include "windbghelp.hpp"
  78 
  79 
  80 #ifdef _DEBUG
  81 #include <crtdbg.h>
  82 #endif
  83 
  84 
  85 #include <windows.h>
  86 #include <sys/types.h>
  87 #include <sys/stat.h>
  88 #include <sys/timeb.h>
  89 #include <objidl.h>
  90 #include <shlobj.h>
  91 
  92 #include <malloc.h>
  93 #include <signal.h>
  94 #include <direct.h>
  95 #include <errno.h>
  96 #include <fcntl.h>
  97 #include <io.h>
  98 #include <process.h>              // For _beginthreadex(), _endthreadex()
  99 #include <imagehlp.h>             // For os::dll_address_to_function_name
 100 // for enumerating dll libraries
 101 #include <vdmdbg.h>
 102 #include <psapi.h>
 103 #include <mmsystem.h>
 104 #include <winsock2.h>
 105 
 106 // for timer info max values which include all bits
 107 #define ALL_64_BITS CONST64(-1)
 108 
 109 // For DLL loading/load error detection
 110 // Values of PE COFF
 111 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 112 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 113 
 114 static HANDLE main_process;
 115 static HANDLE main_thread;
 116 static int    main_thread_id;
 117 
 118 static FILETIME process_creation_time;
 119 static FILETIME process_exit_time;
 120 static FILETIME process_user_time;
 121 static FILETIME process_kernel_time;
 122 
 123 #ifdef _M_AMD64
 124   #define __CPU__ amd64
 125 #else
 126   #define __CPU__ i486
 127 #endif
 128 
 129 #if INCLUDE_AOT
 130 PVOID  topLevelVectoredExceptionHandler = NULL;
 131 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 132 #endif
 133 
 134 // save DLL module handle, used by GetModuleFileName
 135 
 136 HINSTANCE vm_lib_handle;
 137 
 138 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 139   switch (reason) {
 140   case DLL_PROCESS_ATTACH:
 141     vm_lib_handle = hinst;
 142     if (ForceTimeHighResolution) {
 143       timeBeginPeriod(1L);
 144     }
 145     WindowsDbgHelp::pre_initialize();
 146     SymbolEngine::pre_initialize();
 147     break;
 148   case DLL_PROCESS_DETACH:
 149     if (ForceTimeHighResolution) {
 150       timeEndPeriod(1L);
 151     }
 152 #if INCLUDE_AOT
 153     if (topLevelVectoredExceptionHandler != NULL) {
 154       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 155       topLevelVectoredExceptionHandler = NULL;
 156     }
 157 #endif
 158     break;
 159   default:
 160     break;
 161   }
 162   return true;
 163 }
 164 
 165 static inline double fileTimeAsDouble(FILETIME* time) {
 166   const double high  = (double) ((unsigned int) ~0);
 167   const double split = 10000000.0;
 168   double result = (time->dwLowDateTime / split) +
 169                    time->dwHighDateTime * (high/split);
 170   return result;
 171 }
 172 
 173 // Implementation of os
 174 
 175 bool os::unsetenv(const char* name) {
 176   assert(name != NULL, "Null pointer");
 177   return (SetEnvironmentVariable(name, NULL) == TRUE);
 178 }
 179 
 180 // No setuid programs under Windows.
 181 bool os::have_special_privileges() {
 182   return false;
 183 }
 184 
 185 
 186 // This method is  a periodic task to check for misbehaving JNI applications
 187 // under CheckJNI, we can add any periodic checks here.
 188 // For Windows at the moment does nothing
 189 void os::run_periodic_checks() {
 190   return;
 191 }
 192 
 193 // previous UnhandledExceptionFilter, if there is one
 194 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 195 
 196 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 197 
 198 void os::init_system_properties_values() {
 199   // sysclasspath, java_home, dll_dir
 200   {
 201     char *home_path;
 202     char *dll_path;
 203     char *pslash;
 204     const char *bin = "\\bin";
 205     char home_dir[MAX_PATH + 1];
 206     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 207 
 208     if (alt_home_dir != NULL)  {
 209       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 210       home_dir[MAX_PATH] = '\0';
 211     } else {
 212       os::jvm_path(home_dir, sizeof(home_dir));
 213       // Found the full path to jvm.dll.
 214       // Now cut the path to <java_home>/jre if we can.
 215       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 216       pslash = strrchr(home_dir, '\\');
 217       if (pslash != NULL) {
 218         *pslash = '\0';                   // get rid of \{client|server}
 219         pslash = strrchr(home_dir, '\\');
 220         if (pslash != NULL) {
 221           *pslash = '\0';                 // get rid of \bin
 222         }
 223       }
 224     }
 225 
 226     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 227     if (home_path == NULL) {
 228       return;
 229     }
 230     strcpy(home_path, home_dir);
 231     Arguments::set_java_home(home_path);
 232     FREE_C_HEAP_ARRAY(char, home_path);
 233 
 234     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 235                                 mtInternal);
 236     if (dll_path == NULL) {
 237       return;
 238     }
 239     strcpy(dll_path, home_dir);
 240     strcat(dll_path, bin);
 241     Arguments::set_dll_dir(dll_path);
 242     FREE_C_HEAP_ARRAY(char, dll_path);
 243 
 244     if (!set_boot_path('\\', ';')) {
 245       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 246     }
 247   }
 248 
 249 // library_path
 250 #define EXT_DIR "\\lib\\ext"
 251 #define BIN_DIR "\\bin"
 252 #define PACKAGE_DIR "\\Sun\\Java"
 253   {
 254     // Win32 library search order (See the documentation for LoadLibrary):
 255     //
 256     // 1. The directory from which application is loaded.
 257     // 2. The system wide Java Extensions directory (Java only)
 258     // 3. System directory (GetSystemDirectory)
 259     // 4. Windows directory (GetWindowsDirectory)
 260     // 5. The PATH environment variable
 261     // 6. The current directory
 262 
 263     char *library_path;
 264     char tmp[MAX_PATH];
 265     char *path_str = ::getenv("PATH");
 266 
 267     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 268                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 269 
 270     library_path[0] = '\0';
 271 
 272     GetModuleFileName(NULL, tmp, sizeof(tmp));
 273     *(strrchr(tmp, '\\')) = '\0';
 274     strcat(library_path, tmp);
 275 
 276     GetWindowsDirectory(tmp, sizeof(tmp));
 277     strcat(library_path, ";");
 278     strcat(library_path, tmp);
 279     strcat(library_path, PACKAGE_DIR BIN_DIR);
 280 
 281     GetSystemDirectory(tmp, sizeof(tmp));
 282     strcat(library_path, ";");
 283     strcat(library_path, tmp);
 284 
 285     GetWindowsDirectory(tmp, sizeof(tmp));
 286     strcat(library_path, ";");
 287     strcat(library_path, tmp);
 288 
 289     if (path_str) {
 290       strcat(library_path, ";");
 291       strcat(library_path, path_str);
 292     }
 293 
 294     strcat(library_path, ";.");
 295 
 296     Arguments::set_library_path(library_path);
 297     FREE_C_HEAP_ARRAY(char, library_path);
 298   }
 299 
 300   // Default extensions directory
 301   {
 302     char path[MAX_PATH];
 303     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 304     GetWindowsDirectory(path, MAX_PATH);
 305     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 306             path, PACKAGE_DIR, EXT_DIR);
 307     Arguments::set_ext_dirs(buf);
 308   }
 309   #undef EXT_DIR
 310   #undef BIN_DIR
 311   #undef PACKAGE_DIR
 312 
 313 #ifndef _WIN64
 314   // set our UnhandledExceptionFilter and save any previous one
 315   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 316 #endif
 317 
 318   // Done
 319   return;
 320 }
 321 
 322 void os::breakpoint() {
 323   DebugBreak();
 324 }
 325 
 326 // Invoked from the BREAKPOINT Macro
 327 extern "C" void breakpoint() {
 328   os::breakpoint();
 329 }
 330 
 331 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 332 // So far, this method is only used by Native Memory Tracking, which is
 333 // only supported on Windows XP or later.
 334 //
 335 int os::get_native_stack(address* stack, int frames, int toSkip) {
 336   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 337   for (int index = captured; index < frames; index ++) {
 338     stack[index] = NULL;
 339   }
 340   return captured;
 341 }
 342 
 343 
 344 // os::current_stack_base()
 345 //
 346 //   Returns the base of the stack, which is the stack's
 347 //   starting address.  This function must be called
 348 //   while running on the stack of the thread being queried.
 349 
 350 address os::current_stack_base() {
 351   MEMORY_BASIC_INFORMATION minfo;
 352   address stack_bottom;
 353   size_t stack_size;
 354 
 355   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 356   stack_bottom =  (address)minfo.AllocationBase;
 357   stack_size = minfo.RegionSize;
 358 
 359   // Add up the sizes of all the regions with the same
 360   // AllocationBase.
 361   while (1) {
 362     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 363     if (stack_bottom == (address)minfo.AllocationBase) {
 364       stack_size += minfo.RegionSize;
 365     } else {
 366       break;
 367     }
 368   }
 369   return stack_bottom + stack_size;
 370 }
 371 
 372 size_t os::current_stack_size() {
 373   size_t sz;
 374   MEMORY_BASIC_INFORMATION minfo;
 375   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 376   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 377   return sz;
 378 }
 379 
 380 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 381   MEMORY_BASIC_INFORMATION minfo;
 382   committed_start = NULL;
 383   committed_size = 0;
 384   address top = start + size;
 385   const address start_addr = start;
 386   while (start < top) {
 387     VirtualQuery(start, &minfo, sizeof(minfo));
 388     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 389       if (committed_start != NULL) {
 390         break;
 391       }
 392     } else {  // committed
 393       if (committed_start == NULL) {
 394         committed_start = start;
 395       }
 396       size_t offset = start - (address)minfo.BaseAddress;
 397       committed_size += minfo.RegionSize - offset;
 398     }
 399     start = (address)minfo.BaseAddress + minfo.RegionSize;
 400   }
 401 
 402   if (committed_start == NULL) {
 403     assert(committed_size == 0, "Sanity");
 404     return false;
 405   } else {
 406     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 407     // current region may go beyond the limit, trim to the limit
 408     committed_size = MIN2(committed_size, size_t(top - committed_start));
 409     return true;
 410   }
 411 }
 412 
 413 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 414   const struct tm* time_struct_ptr = localtime(clock);
 415   if (time_struct_ptr != NULL) {
 416     *res = *time_struct_ptr;
 417     return res;
 418   }
 419   return NULL;
 420 }
 421 
 422 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 423   const struct tm* time_struct_ptr = gmtime(clock);
 424   if (time_struct_ptr != NULL) {
 425     *res = *time_struct_ptr;
 426     return res;
 427   }
 428   return NULL;
 429 }
 430 
 431 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 432 
 433 // Thread start routine for all newly created threads
 434 static unsigned __stdcall thread_native_entry(Thread* thread) {
 435 
 436   thread->record_stack_base_and_size();
 437 
 438   // Try to randomize the cache line index of hot stack frames.
 439   // This helps when threads of the same stack traces evict each other's
 440   // cache lines. The threads can be either from the same JVM instance, or
 441   // from different JVM instances. The benefit is especially true for
 442   // processors with hyperthreading technology.
 443   static int counter = 0;
 444   int pid = os::current_process_id();
 445   _alloca(((pid ^ counter++) & 7) * 128);
 446 
 447   thread->initialize_thread_current();
 448 
 449   OSThread* osthr = thread->osthread();
 450   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 451 
 452   if (UseNUMA) {
 453     int lgrp_id = os::numa_get_group_id();
 454     if (lgrp_id != -1) {
 455       thread->set_lgrp_id(lgrp_id);
 456     }
 457   }
 458 
 459   // Diagnostic code to investigate JDK-6573254
 460   int res = 30115;  // non-java thread
 461   if (thread->is_Java_thread()) {
 462     res = 20115;    // java thread
 463   }
 464 
 465   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 466 
 467   // Install a win32 structured exception handler around every thread created
 468   // by VM, so VM can generate error dump when an exception occurred in non-
 469   // Java thread (e.g. VM thread).
 470   __try {
 471     thread->call_run();
 472   } __except(topLevelExceptionFilter(
 473                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 474     // Nothing to do.
 475   }
 476 
 477   // Note: at this point the thread object may already have deleted itself.
 478   // Do not dereference it from here on out.
 479 
 480   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 481 
 482   // One less thread is executing
 483   // When the VMThread gets here, the main thread may have already exited
 484   // which frees the CodeHeap containing the Atomic::add code
 485   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 486     Atomic::dec(&os::win32::_os_thread_count);
 487   }
 488 
 489   // Thread must not return from exit_process_or_thread(), but if it does,
 490   // let it proceed to exit normally
 491   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 492 }
 493 
 494 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 495                                   int thread_id) {
 496   // Allocate the OSThread object
 497   OSThread* osthread = new OSThread(NULL, NULL);
 498   if (osthread == NULL) return NULL;
 499 
 500   // Initialize the JDK library's interrupt event.
 501   // This should really be done when OSThread is constructed,
 502   // but there is no way for a constructor to report failure to
 503   // allocate the event.
 504   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 505   if (interrupt_event == NULL) {
 506     delete osthread;
 507     return NULL;
 508   }
 509   osthread->set_interrupt_event(interrupt_event);
 510 
 511   // Store info on the Win32 thread into the OSThread
 512   osthread->set_thread_handle(thread_handle);
 513   osthread->set_thread_id(thread_id);
 514 
 515   if (UseNUMA) {
 516     int lgrp_id = os::numa_get_group_id();
 517     if (lgrp_id != -1) {
 518       thread->set_lgrp_id(lgrp_id);
 519     }
 520   }
 521 
 522   // Initial thread state is INITIALIZED, not SUSPENDED
 523   osthread->set_state(INITIALIZED);
 524 
 525   return osthread;
 526 }
 527 
 528 
 529 bool os::create_attached_thread(JavaThread* thread) {
 530 #ifdef ASSERT
 531   thread->verify_not_published();
 532 #endif
 533   HANDLE thread_h;
 534   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 535                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 536     fatal("DuplicateHandle failed\n");
 537   }
 538   OSThread* osthread = create_os_thread(thread, thread_h,
 539                                         (int)current_thread_id());
 540   if (osthread == NULL) {
 541     return false;
 542   }
 543 
 544   // Initial thread state is RUNNABLE
 545   osthread->set_state(RUNNABLE);
 546 
 547   thread->set_osthread(osthread);
 548 
 549   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 550     os::current_thread_id());
 551 
 552   return true;
 553 }
 554 
 555 bool os::create_main_thread(JavaThread* thread) {
 556 #ifdef ASSERT
 557   thread->verify_not_published();
 558 #endif
 559   if (_starting_thread == NULL) {
 560     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 561     if (_starting_thread == NULL) {
 562       return false;
 563     }
 564   }
 565 
 566   // The primordial thread is runnable from the start)
 567   _starting_thread->set_state(RUNNABLE);
 568 
 569   thread->set_osthread(_starting_thread);
 570   return true;
 571 }
 572 
 573 // Helper function to trace _beginthreadex attributes,
 574 //  similar to os::Posix::describe_pthread_attr()
 575 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 576                                                size_t stacksize, unsigned initflag) {
 577   stringStream ss(buf, buflen);
 578   if (stacksize == 0) {
 579     ss.print("stacksize: default, ");
 580   } else {
 581     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 582   }
 583   ss.print("flags: ");
 584   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 585   #define ALL(X) \
 586     X(CREATE_SUSPENDED) \
 587     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 588   ALL(PRINT_FLAG)
 589   #undef ALL
 590   #undef PRINT_FLAG
 591   return buf;
 592 }
 593 
 594 // Allocate and initialize a new OSThread
 595 bool os::create_thread(Thread* thread, ThreadType thr_type,
 596                        size_t stack_size) {
 597   unsigned thread_id;
 598 
 599   // Allocate the OSThread object
 600   OSThread* osthread = new OSThread(NULL, NULL);
 601   if (osthread == NULL) {
 602     return false;
 603   }
 604 
 605   // Initialize the JDK library's interrupt event.
 606   // This should really be done when OSThread is constructed,
 607   // but there is no way for a constructor to report failure to
 608   // allocate the event.
 609   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 610   if (interrupt_event == NULL) {
 611     delete osthread;
 612     return false;
 613   }
 614   osthread->set_interrupt_event(interrupt_event);
 615   osthread->set_interrupted(false);
 616 
 617   thread->set_osthread(osthread);
 618 
 619   if (stack_size == 0) {
 620     switch (thr_type) {
 621     case os::java_thread:
 622       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 623       if (JavaThread::stack_size_at_create() > 0) {
 624         stack_size = JavaThread::stack_size_at_create();
 625       }
 626       break;
 627     case os::compiler_thread:
 628       if (CompilerThreadStackSize > 0) {
 629         stack_size = (size_t)(CompilerThreadStackSize * K);
 630         break;
 631       } // else fall through:
 632         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 633     case os::vm_thread:
 634     case os::pgc_thread:
 635     case os::cgc_thread:
 636     case os::watcher_thread:
 637       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 638       break;
 639     }
 640   }
 641 
 642   // Create the Win32 thread
 643   //
 644   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 645   // does not specify stack size. Instead, it specifies the size of
 646   // initially committed space. The stack size is determined by
 647   // PE header in the executable. If the committed "stack_size" is larger
 648   // than default value in the PE header, the stack is rounded up to the
 649   // nearest multiple of 1MB. For example if the launcher has default
 650   // stack size of 320k, specifying any size less than 320k does not
 651   // affect the actual stack size at all, it only affects the initial
 652   // commitment. On the other hand, specifying 'stack_size' larger than
 653   // default value may cause significant increase in memory usage, because
 654   // not only the stack space will be rounded up to MB, but also the
 655   // entire space is committed upfront.
 656   //
 657   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 658   // for CreateThread() that can treat 'stack_size' as stack size. However we
 659   // are not supposed to call CreateThread() directly according to MSDN
 660   // document because JVM uses C runtime library. The good news is that the
 661   // flag appears to work with _beginthredex() as well.
 662 
 663   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 664   HANDLE thread_handle =
 665     (HANDLE)_beginthreadex(NULL,
 666                            (unsigned)stack_size,
 667                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 668                            thread,
 669                            initflag,
 670                            &thread_id);
 671 
 672   char buf[64];
 673   if (thread_handle != NULL) {
 674     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 675       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 676   } else {
 677     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 678       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 679     // Log some OS information which might explain why creating the thread failed.
 680     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 681     LogStream st(Log(os, thread)::info());
 682     os::print_memory_info(&st);
 683   }
 684 
 685   if (thread_handle == NULL) {
 686     // Need to clean up stuff we've allocated so far
 687     CloseHandle(osthread->interrupt_event());
 688     thread->set_osthread(NULL);
 689     delete osthread;
 690     return false;
 691   }
 692 
 693   Atomic::inc(&os::win32::_os_thread_count);
 694 
 695   // Store info on the Win32 thread into the OSThread
 696   osthread->set_thread_handle(thread_handle);
 697   osthread->set_thread_id(thread_id);
 698 
 699   // Initial thread state is INITIALIZED, not SUSPENDED
 700   osthread->set_state(INITIALIZED);
 701 
 702   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 703   return true;
 704 }
 705 
 706 
 707 // Free Win32 resources related to the OSThread
 708 void os::free_thread(OSThread* osthread) {
 709   assert(osthread != NULL, "osthread not set");
 710 
 711   // We are told to free resources of the argument thread,
 712   // but we can only really operate on the current thread.
 713   assert(Thread::current()->osthread() == osthread,
 714          "os::free_thread but not current thread");
 715 
 716   CloseHandle(osthread->thread_handle());
 717   CloseHandle(osthread->interrupt_event());
 718   delete osthread;
 719 }
 720 
 721 static jlong first_filetime;
 722 static jlong initial_performance_count;
 723 static jlong performance_frequency;
 724 
 725 
 726 jlong as_long(LARGE_INTEGER x) {
 727   jlong result = 0; // initialization to avoid warning
 728   set_high(&result, x.HighPart);
 729   set_low(&result, x.LowPart);
 730   return result;
 731 }
 732 
 733 
 734 jlong os::elapsed_counter() {
 735   LARGE_INTEGER count;
 736   QueryPerformanceCounter(&count);
 737   return as_long(count) - initial_performance_count;
 738 }
 739 
 740 
 741 jlong os::elapsed_frequency() {
 742   return performance_frequency;
 743 }
 744 
 745 
 746 julong os::available_memory() {
 747   return win32::available_memory();
 748 }
 749 
 750 julong os::win32::available_memory() {
 751   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 752   // value if total memory is larger than 4GB
 753   MEMORYSTATUSEX ms;
 754   ms.dwLength = sizeof(ms);
 755   GlobalMemoryStatusEx(&ms);
 756 
 757   return (julong)ms.ullAvailPhys;
 758 }
 759 
 760 julong os::physical_memory() {
 761   return win32::physical_memory();
 762 }
 763 
 764 bool os::has_allocatable_memory_limit(julong* limit) {
 765   MEMORYSTATUSEX ms;
 766   ms.dwLength = sizeof(ms);
 767   GlobalMemoryStatusEx(&ms);
 768 #ifdef _LP64
 769   *limit = (julong)ms.ullAvailVirtual;
 770   return true;
 771 #else
 772   // Limit to 1400m because of the 2gb address space wall
 773   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 774   return true;
 775 #endif
 776 }
 777 
 778 int os::active_processor_count() {
 779   // User has overridden the number of active processors
 780   if (ActiveProcessorCount > 0) {
 781     log_trace(os)("active_processor_count: "
 782                   "active processor count set by user : %d",
 783                   ActiveProcessorCount);
 784     return ActiveProcessorCount;
 785   }
 786 
 787   DWORD_PTR lpProcessAffinityMask = 0;
 788   DWORD_PTR lpSystemAffinityMask = 0;
 789   int proc_count = processor_count();
 790   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 791       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 792     // Nof active processors is number of bits in process affinity mask
 793     int bitcount = 0;
 794     while (lpProcessAffinityMask != 0) {
 795       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 796       bitcount++;
 797     }
 798     return bitcount;
 799   } else {
 800     return proc_count;
 801   }
 802 }
 803 
 804 void os::set_native_thread_name(const char *name) {
 805 
 806   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 807   //
 808   // Note that unfortunately this only works if the process
 809   // is already attached to a debugger; debugger must observe
 810   // the exception below to show the correct name.
 811 
 812   // If there is no debugger attached skip raising the exception
 813   if (!IsDebuggerPresent()) {
 814     return;
 815   }
 816 
 817   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 818   struct {
 819     DWORD dwType;     // must be 0x1000
 820     LPCSTR szName;    // pointer to name (in user addr space)
 821     DWORD dwThreadID; // thread ID (-1=caller thread)
 822     DWORD dwFlags;    // reserved for future use, must be zero
 823   } info;
 824 
 825   info.dwType = 0x1000;
 826   info.szName = name;
 827   info.dwThreadID = -1;
 828   info.dwFlags = 0;
 829 
 830   __try {
 831     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 832   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 833 }
 834 
 835 bool os::distribute_processes(uint length, uint* distribution) {
 836   // Not yet implemented.
 837   return false;
 838 }
 839 
 840 bool os::bind_to_processor(uint processor_id) {
 841   // Not yet implemented.
 842   return false;
 843 }
 844 
 845 void os::win32::initialize_performance_counter() {
 846   LARGE_INTEGER count;
 847   QueryPerformanceFrequency(&count);
 848   performance_frequency = as_long(count);
 849   QueryPerformanceCounter(&count);
 850   initial_performance_count = as_long(count);
 851 }
 852 
 853 
 854 double os::elapsedTime() {
 855   return (double) elapsed_counter() / (double) elapsed_frequency();
 856 }
 857 
 858 
 859 // Windows format:
 860 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 861 // Java format:
 862 //   Java standards require the number of milliseconds since 1/1/1970
 863 
 864 // Constant offset - calculated using offset()
 865 static jlong  _offset   = 116444736000000000;
 866 // Fake time counter for reproducible results when debugging
 867 static jlong  fake_time = 0;
 868 
 869 #ifdef ASSERT
 870 // Just to be safe, recalculate the offset in debug mode
 871 static jlong _calculated_offset = 0;
 872 static int   _has_calculated_offset = 0;
 873 
 874 jlong offset() {
 875   if (_has_calculated_offset) return _calculated_offset;
 876   SYSTEMTIME java_origin;
 877   java_origin.wYear          = 1970;
 878   java_origin.wMonth         = 1;
 879   java_origin.wDayOfWeek     = 0; // ignored
 880   java_origin.wDay           = 1;
 881   java_origin.wHour          = 0;
 882   java_origin.wMinute        = 0;
 883   java_origin.wSecond        = 0;
 884   java_origin.wMilliseconds  = 0;
 885   FILETIME jot;
 886   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 887     fatal("Error = %d\nWindows error", GetLastError());
 888   }
 889   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 890   _has_calculated_offset = 1;
 891   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 892   return _calculated_offset;
 893 }
 894 #else
 895 jlong offset() {
 896   return _offset;
 897 }
 898 #endif
 899 
 900 jlong windows_to_java_time(FILETIME wt) {
 901   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 902   return (a - offset()) / 10000;
 903 }
 904 
 905 // Returns time ticks in (10th of micro seconds)
 906 jlong windows_to_time_ticks(FILETIME wt) {
 907   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 908   return (a - offset());
 909 }
 910 
 911 FILETIME java_to_windows_time(jlong l) {
 912   jlong a = (l * 10000) + offset();
 913   FILETIME result;
 914   result.dwHighDateTime = high(a);
 915   result.dwLowDateTime  = low(a);
 916   return result;
 917 }
 918 
 919 bool os::supports_vtime() { return true; }
 920 bool os::enable_vtime() { return false; }
 921 bool os::vtime_enabled() { return false; }
 922 
 923 double os::elapsedVTime() {
 924   FILETIME created;
 925   FILETIME exited;
 926   FILETIME kernel;
 927   FILETIME user;
 928   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 929     // the resolution of windows_to_java_time() should be sufficient (ms)
 930     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 931   } else {
 932     return elapsedTime();
 933   }
 934 }
 935 
 936 jlong os::javaTimeMillis() {
 937   FILETIME wt;
 938   GetSystemTimeAsFileTime(&wt);
 939   return windows_to_java_time(wt);
 940 }
 941 
 942 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 943   FILETIME wt;
 944   GetSystemTimeAsFileTime(&wt);
 945   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 946   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 947   seconds = secs;
 948   nanos = jlong(ticks - (secs*10000000)) * 100;
 949 }
 950 
 951 jlong os::javaTimeNanos() {
 952     LARGE_INTEGER current_count;
 953     QueryPerformanceCounter(&current_count);
 954     double current = as_long(current_count);
 955     double freq = performance_frequency;
 956     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 957     return time;
 958 }
 959 
 960 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 961   jlong freq = performance_frequency;
 962   if (freq < NANOSECS_PER_SEC) {
 963     // the performance counter is 64 bits and we will
 964     // be multiplying it -- so no wrap in 64 bits
 965     info_ptr->max_value = ALL_64_BITS;
 966   } else if (freq > NANOSECS_PER_SEC) {
 967     // use the max value the counter can reach to
 968     // determine the max value which could be returned
 969     julong max_counter = (julong)ALL_64_BITS;
 970     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 971   } else {
 972     // the performance counter is 64 bits and we will
 973     // be using it directly -- so no wrap in 64 bits
 974     info_ptr->max_value = ALL_64_BITS;
 975   }
 976 
 977   // using a counter, so no skipping
 978   info_ptr->may_skip_backward = false;
 979   info_ptr->may_skip_forward = false;
 980 
 981   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 982 }
 983 
 984 char* os::local_time_string(char *buf, size_t buflen) {
 985   SYSTEMTIME st;
 986   GetLocalTime(&st);
 987   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 988                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 989   return buf;
 990 }
 991 
 992 bool os::getTimesSecs(double* process_real_time,
 993                       double* process_user_time,
 994                       double* process_system_time) {
 995   HANDLE h_process = GetCurrentProcess();
 996   FILETIME create_time, exit_time, kernel_time, user_time;
 997   BOOL result = GetProcessTimes(h_process,
 998                                 &create_time,
 999                                 &exit_time,
1000                                 &kernel_time,
1001                                 &user_time);
1002   if (result != 0) {
1003     FILETIME wt;
1004     GetSystemTimeAsFileTime(&wt);
1005     jlong rtc_millis = windows_to_java_time(wt);
1006     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
1007     *process_user_time =
1008       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1009     *process_system_time =
1010       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1011     return true;
1012   } else {
1013     return false;
1014   }
1015 }
1016 
1017 void os::shutdown() {
1018   // allow PerfMemory to attempt cleanup of any persistent resources
1019   perfMemory_exit();
1020 
1021   // flush buffered output, finish log files
1022   ostream_abort();
1023 
1024   // Check for abort hook
1025   abort_hook_t abort_hook = Arguments::abort_hook();
1026   if (abort_hook != NULL) {
1027     abort_hook();
1028   }
1029 }
1030 
1031 
1032 static HANDLE dumpFile = NULL;
1033 
1034 // Check if dump file can be created.
1035 void os::check_dump_limit(char* buffer, size_t buffsz) {
1036   bool status = true;
1037   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1038     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1039     status = false;
1040   }
1041 
1042 #ifndef ASSERT
1043   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1044     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1045     status = false;
1046   }
1047 #endif
1048 
1049   if (status) {
1050     const char* cwd = get_current_directory(NULL, 0);
1051     int pid = current_process_id();
1052     if (cwd != NULL) {
1053       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1054     } else {
1055       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1056     }
1057 
1058     if (dumpFile == NULL &&
1059        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1060                  == INVALID_HANDLE_VALUE) {
1061       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1062       status = false;
1063     }
1064   }
1065   VMError::record_coredump_status(buffer, status);
1066 }
1067 
1068 void os::abort(bool dump_core, void* siginfo, const void* context) {
1069   EXCEPTION_POINTERS ep;
1070   MINIDUMP_EXCEPTION_INFORMATION mei;
1071   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1072 
1073   HANDLE hProcess = GetCurrentProcess();
1074   DWORD processId = GetCurrentProcessId();
1075   MINIDUMP_TYPE dumpType;
1076 
1077   shutdown();
1078   if (!dump_core || dumpFile == NULL) {
1079     if (dumpFile != NULL) {
1080       CloseHandle(dumpFile);
1081     }
1082     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1083   }
1084 
1085   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1086     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1087 
1088   if (siginfo != NULL && context != NULL) {
1089     ep.ContextRecord = (PCONTEXT) context;
1090     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1091 
1092     mei.ThreadId = GetCurrentThreadId();
1093     mei.ExceptionPointers = &ep;
1094     pmei = &mei;
1095   } else {
1096     pmei = NULL;
1097   }
1098 
1099   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1100   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1101   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1102       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1103     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1104   }
1105   CloseHandle(dumpFile);
1106   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1107 }
1108 
1109 // Die immediately, no exit hook, no abort hook, no cleanup.
1110 void os::die() {
1111   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1112 }
1113 
1114 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1115 //  * dirent_md.c       1.15 00/02/02
1116 //
1117 // The declarations for DIR and struct dirent are in jvm_win32.h.
1118 
1119 // Caller must have already run dirname through JVM_NativePath, which removes
1120 // duplicate slashes and converts all instances of '/' into '\\'.
1121 
1122 DIR * os::opendir(const char *dirname) {
1123   assert(dirname != NULL, "just checking");   // hotspot change
1124   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1125   DWORD fattr;                                // hotspot change
1126   char alt_dirname[4] = { 0, 0, 0, 0 };
1127 
1128   if (dirp == 0) {
1129     errno = ENOMEM;
1130     return 0;
1131   }
1132 
1133   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1134   // as a directory in FindFirstFile().  We detect this case here and
1135   // prepend the current drive name.
1136   //
1137   if (dirname[1] == '\0' && dirname[0] == '\\') {
1138     alt_dirname[0] = _getdrive() + 'A' - 1;
1139     alt_dirname[1] = ':';
1140     alt_dirname[2] = '\\';
1141     alt_dirname[3] = '\0';
1142     dirname = alt_dirname;
1143   }
1144 
1145   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1146   if (dirp->path == 0) {
1147     free(dirp);
1148     errno = ENOMEM;
1149     return 0;
1150   }
1151   strcpy(dirp->path, dirname);
1152 
1153   fattr = GetFileAttributes(dirp->path);
1154   if (fattr == 0xffffffff) {
1155     free(dirp->path);
1156     free(dirp);
1157     errno = ENOENT;
1158     return 0;
1159   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1160     free(dirp->path);
1161     free(dirp);
1162     errno = ENOTDIR;
1163     return 0;
1164   }
1165 
1166   // Append "*.*", or possibly "\\*.*", to path
1167   if (dirp->path[1] == ':' &&
1168       (dirp->path[2] == '\0' ||
1169       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1170     // No '\\' needed for cases like "Z:" or "Z:\"
1171     strcat(dirp->path, "*.*");
1172   } else {
1173     strcat(dirp->path, "\\*.*");
1174   }
1175 
1176   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1177   if (dirp->handle == INVALID_HANDLE_VALUE) {
1178     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1179       free(dirp->path);
1180       free(dirp);
1181       errno = EACCES;
1182       return 0;
1183     }
1184   }
1185   return dirp;
1186 }
1187 
1188 struct dirent * os::readdir(DIR *dirp) {
1189   assert(dirp != NULL, "just checking");      // hotspot change
1190   if (dirp->handle == INVALID_HANDLE_VALUE) {
1191     return NULL;
1192   }
1193 
1194   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1195 
1196   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1197     if (GetLastError() == ERROR_INVALID_HANDLE) {
1198       errno = EBADF;
1199       return NULL;
1200     }
1201     FindClose(dirp->handle);
1202     dirp->handle = INVALID_HANDLE_VALUE;
1203   }
1204 
1205   return &dirp->dirent;
1206 }
1207 
1208 int os::closedir(DIR *dirp) {
1209   assert(dirp != NULL, "just checking");      // hotspot change
1210   if (dirp->handle != INVALID_HANDLE_VALUE) {
1211     if (!FindClose(dirp->handle)) {
1212       errno = EBADF;
1213       return -1;
1214     }
1215     dirp->handle = INVALID_HANDLE_VALUE;
1216   }
1217   free(dirp->path);
1218   free(dirp);
1219   return 0;
1220 }
1221 
1222 // This must be hard coded because it's the system's temporary
1223 // directory not the java application's temp directory, ala java.io.tmpdir.
1224 const char* os::get_temp_directory() {
1225   static char path_buf[MAX_PATH];
1226   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1227     return path_buf;
1228   } else {
1229     path_buf[0] = '\0';
1230     return path_buf;
1231   }
1232 }
1233 
1234 // Needs to be in os specific directory because windows requires another
1235 // header file <direct.h>
1236 const char* os::get_current_directory(char *buf, size_t buflen) {
1237   int n = static_cast<int>(buflen);
1238   if (buflen > INT_MAX)  n = INT_MAX;
1239   return _getcwd(buf, n);
1240 }
1241 
1242 //-----------------------------------------------------------
1243 // Helper functions for fatal error handler
1244 #ifdef _WIN64
1245 // Helper routine which returns true if address in
1246 // within the NTDLL address space.
1247 //
1248 static bool _addr_in_ntdll(address addr) {
1249   HMODULE hmod;
1250   MODULEINFO minfo;
1251 
1252   hmod = GetModuleHandle("NTDLL.DLL");
1253   if (hmod == NULL) return false;
1254   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1255                                           &minfo, sizeof(MODULEINFO))) {
1256     return false;
1257   }
1258 
1259   if ((addr >= minfo.lpBaseOfDll) &&
1260       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1261     return true;
1262   } else {
1263     return false;
1264   }
1265 }
1266 #endif
1267 
1268 struct _modinfo {
1269   address addr;
1270   char*   full_path;   // point to a char buffer
1271   int     buflen;      // size of the buffer
1272   address base_addr;
1273 };
1274 
1275 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1276                                   address top_address, void * param) {
1277   struct _modinfo *pmod = (struct _modinfo *)param;
1278   if (!pmod) return -1;
1279 
1280   if (base_addr   <= pmod->addr &&
1281       top_address > pmod->addr) {
1282     // if a buffer is provided, copy path name to the buffer
1283     if (pmod->full_path) {
1284       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1285     }
1286     pmod->base_addr = base_addr;
1287     return 1;
1288   }
1289   return 0;
1290 }
1291 
1292 bool os::dll_address_to_library_name(address addr, char* buf,
1293                                      int buflen, int* offset) {
1294   // buf is not optional, but offset is optional
1295   assert(buf != NULL, "sanity check");
1296 
1297 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1298 //       return the full path to the DLL file, sometimes it returns path
1299 //       to the corresponding PDB file (debug info); sometimes it only
1300 //       returns partial path, which makes life painful.
1301 
1302   struct _modinfo mi;
1303   mi.addr      = addr;
1304   mi.full_path = buf;
1305   mi.buflen    = buflen;
1306   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1307     // buf already contains path name
1308     if (offset) *offset = addr - mi.base_addr;
1309     return true;
1310   }
1311 
1312   buf[0] = '\0';
1313   if (offset) *offset = -1;
1314   return false;
1315 }
1316 
1317 bool os::dll_address_to_function_name(address addr, char *buf,
1318                                       int buflen, int *offset,
1319                                       bool demangle) {
1320   // buf is not optional, but offset is optional
1321   assert(buf != NULL, "sanity check");
1322 
1323   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1324     return true;
1325   }
1326   if (offset != NULL)  *offset  = -1;
1327   buf[0] = '\0';
1328   return false;
1329 }
1330 
1331 // save the start and end address of jvm.dll into param[0] and param[1]
1332 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1333                            address top_address, void * param) {
1334   if (!param) return -1;
1335 
1336   if (base_addr   <= (address)_locate_jvm_dll &&
1337       top_address > (address)_locate_jvm_dll) {
1338     ((address*)param)[0] = base_addr;
1339     ((address*)param)[1] = top_address;
1340     return 1;
1341   }
1342   return 0;
1343 }
1344 
1345 address vm_lib_location[2];    // start and end address of jvm.dll
1346 
1347 // check if addr is inside jvm.dll
1348 bool os::address_is_in_vm(address addr) {
1349   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1350     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1351       assert(false, "Can't find jvm module.");
1352       return false;
1353     }
1354   }
1355 
1356   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1357 }
1358 
1359 // print module info; param is outputStream*
1360 static int _print_module(const char* fname, address base_address,
1361                          address top_address, void* param) {
1362   if (!param) return -1;
1363 
1364   outputStream* st = (outputStream*)param;
1365 
1366   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1367   return 0;
1368 }
1369 
1370 // Loads .dll/.so and
1371 // in case of error it checks if .dll/.so was built for the
1372 // same architecture as Hotspot is running on
1373 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1374   log_info(os)("attempting shared library load of %s", name);
1375 
1376   void * result = LoadLibrary(name);
1377   if (result != NULL) {
1378     Events::log(NULL, "Loaded shared library %s", name);
1379     // Recalculate pdb search path if a DLL was loaded successfully.
1380     SymbolEngine::recalc_search_path();
1381     log_info(os)("shared library load of %s was successful", name);
1382     return result;
1383   }
1384   DWORD errcode = GetLastError();
1385   // Read system error message into ebuf
1386   // It may or may not be overwritten below (in the for loop and just above)
1387   lasterror(ebuf, (size_t) ebuflen);
1388   ebuf[ebuflen - 1] = '\0';
1389   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1390   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1391 
1392   if (errcode == ERROR_MOD_NOT_FOUND) {
1393     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1394     ebuf[ebuflen - 1] = '\0';
1395     return NULL;
1396   }
1397 
1398   // Parsing dll below
1399   // If we can read dll-info and find that dll was built
1400   // for an architecture other than Hotspot is running in
1401   // - then print to buffer "DLL was built for a different architecture"
1402   // else call os::lasterror to obtain system error message
1403   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1404   if (fd < 0) {
1405     return NULL;
1406   }
1407 
1408   uint32_t signature_offset;
1409   uint16_t lib_arch = 0;
1410   bool failed_to_get_lib_arch =
1411     ( // Go to position 3c in the dll
1412      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1413      ||
1414      // Read location of signature
1415      (sizeof(signature_offset) !=
1416      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1417      ||
1418      // Go to COFF File Header in dll
1419      // that is located after "signature" (4 bytes long)
1420      (os::seek_to_file_offset(fd,
1421      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1422      ||
1423      // Read field that contains code of architecture
1424      // that dll was built for
1425      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1426     );
1427 
1428   ::close(fd);
1429   if (failed_to_get_lib_arch) {
1430     // file i/o error - report os::lasterror(...) msg
1431     return NULL;
1432   }
1433 
1434   typedef struct {
1435     uint16_t arch_code;
1436     char* arch_name;
1437   } arch_t;
1438 
1439   static const arch_t arch_array[] = {
1440     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1441     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1442   };
1443 #if (defined _M_AMD64)
1444   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1445 #elif (defined _M_IX86)
1446   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1447 #else
1448   #error Method os::dll_load requires that one of following \
1449          is defined :_M_AMD64 or _M_IX86
1450 #endif
1451 
1452 
1453   // Obtain a string for printf operation
1454   // lib_arch_str shall contain string what platform this .dll was built for
1455   // running_arch_str shall string contain what platform Hotspot was built for
1456   char *running_arch_str = NULL, *lib_arch_str = NULL;
1457   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1458     if (lib_arch == arch_array[i].arch_code) {
1459       lib_arch_str = arch_array[i].arch_name;
1460     }
1461     if (running_arch == arch_array[i].arch_code) {
1462       running_arch_str = arch_array[i].arch_name;
1463     }
1464   }
1465 
1466   assert(running_arch_str,
1467          "Didn't find running architecture code in arch_array");
1468 
1469   // If the architecture is right
1470   // but some other error took place - report os::lasterror(...) msg
1471   if (lib_arch == running_arch) {
1472     return NULL;
1473   }
1474 
1475   if (lib_arch_str != NULL) {
1476     ::_snprintf(ebuf, ebuflen - 1,
1477                 "Can't load %s-bit .dll on a %s-bit platform",
1478                 lib_arch_str, running_arch_str);
1479   } else {
1480     // don't know what architecture this dll was build for
1481     ::_snprintf(ebuf, ebuflen - 1,
1482                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1483                 lib_arch, running_arch_str);
1484   }
1485 
1486   return NULL;
1487 }
1488 
1489 void os::print_dll_info(outputStream *st) {
1490   st->print_cr("Dynamic libraries:");
1491   get_loaded_modules_info(_print_module, (void *)st);
1492 }
1493 
1494 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1495   HANDLE   hProcess;
1496 
1497 # define MAX_NUM_MODULES 128
1498   HMODULE     modules[MAX_NUM_MODULES];
1499   static char filename[MAX_PATH];
1500   int         result = 0;
1501 
1502   int pid = os::current_process_id();
1503   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1504                          FALSE, pid);
1505   if (hProcess == NULL) return 0;
1506 
1507   DWORD size_needed;
1508   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1509     CloseHandle(hProcess);
1510     return 0;
1511   }
1512 
1513   // number of modules that are currently loaded
1514   int num_modules = size_needed / sizeof(HMODULE);
1515 
1516   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1517     // Get Full pathname:
1518     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1519       filename[0] = '\0';
1520     }
1521 
1522     MODULEINFO modinfo;
1523     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1524       modinfo.lpBaseOfDll = NULL;
1525       modinfo.SizeOfImage = 0;
1526     }
1527 
1528     // Invoke callback function
1529     result = callback(filename, (address)modinfo.lpBaseOfDll,
1530                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1531     if (result) break;
1532   }
1533 
1534   CloseHandle(hProcess);
1535   return result;
1536 }
1537 
1538 bool os::get_host_name(char* buf, size_t buflen) {
1539   DWORD size = (DWORD)buflen;
1540   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1541 }
1542 
1543 void os::get_summary_os_info(char* buf, size_t buflen) {
1544   stringStream sst(buf, buflen);
1545   os::win32::print_windows_version(&sst);
1546   // chop off newline character
1547   char* nl = strchr(buf, '\n');
1548   if (nl != NULL) *nl = '\0';
1549 }
1550 
1551 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1552 #if _MSC_VER >= 1900
1553   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1554   int result = ::vsnprintf(buf, len, fmt, args);
1555   // If an encoding error occurred (result < 0) then it's not clear
1556   // whether the buffer is NUL terminated, so ensure it is.
1557   if ((result < 0) && (len > 0)) {
1558     buf[len - 1] = '\0';
1559   }
1560   return result;
1561 #else
1562   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1563   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1564   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1565   // go straight to _vscprintf.  The output is going to be truncated in
1566   // that case, except in the unusual case of empty output.  More
1567   // importantly, the documentation for various versions of Visual Studio
1568   // are inconsistent about the behavior of _vsnprintf when len == 0,
1569   // including it possibly being an error.
1570   int result = -1;
1571   if (len > 0) {
1572     result = _vsnprintf(buf, len, fmt, args);
1573     // If output (including NUL terminator) is truncated, the buffer
1574     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1575     if ((result < 0) || ((size_t)result >= len)) {
1576       buf[len - 1] = '\0';
1577     }
1578   }
1579   if (result < 0) {
1580     result = _vscprintf(fmt, args);
1581   }
1582   return result;
1583 #endif // _MSC_VER dispatch
1584 }
1585 
1586 static inline time_t get_mtime(const char* filename) {
1587   struct stat st;
1588   int ret = os::stat(filename, &st);
1589   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1590   return st.st_mtime;
1591 }
1592 
1593 int os::compare_file_modified_times(const char* file1, const char* file2) {
1594   time_t t1 = get_mtime(file1);
1595   time_t t2 = get_mtime(file2);
1596   return t1 - t2;
1597 }
1598 
1599 void os::print_os_info_brief(outputStream* st) {
1600   os::print_os_info(st);
1601 }
1602 
1603 void os::print_os_info(outputStream* st) {
1604 #ifdef ASSERT
1605   char buffer[1024];
1606   st->print("HostName: ");
1607   if (get_host_name(buffer, sizeof(buffer))) {
1608     st->print("%s ", buffer);
1609   } else {
1610     st->print("N/A ");
1611   }
1612 #endif
1613   st->print("OS:");
1614   os::win32::print_windows_version(st);
1615 
1616 #ifdef _LP64
1617   VM_Version::print_platform_virtualization_info(st);
1618 #endif
1619 }
1620 
1621 void os::win32::print_windows_version(outputStream* st) {
1622   OSVERSIONINFOEX osvi;
1623   VS_FIXEDFILEINFO *file_info;
1624   TCHAR kernel32_path[MAX_PATH];
1625   UINT len, ret;
1626 
1627   // Use the GetVersionEx information to see if we're on a server or
1628   // workstation edition of Windows. Starting with Windows 8.1 we can't
1629   // trust the OS version information returned by this API.
1630   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1631   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1632   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1633     st->print_cr("Call to GetVersionEx failed");
1634     return;
1635   }
1636   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1637 
1638   // Get the full path to \Windows\System32\kernel32.dll and use that for
1639   // determining what version of Windows we're running on.
1640   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1641   ret = GetSystemDirectory(kernel32_path, len);
1642   if (ret == 0 || ret > len) {
1643     st->print_cr("Call to GetSystemDirectory failed");
1644     return;
1645   }
1646   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1647 
1648   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1649   if (version_size == 0) {
1650     st->print_cr("Call to GetFileVersionInfoSize failed");
1651     return;
1652   }
1653 
1654   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1655   if (version_info == NULL) {
1656     st->print_cr("Failed to allocate version_info");
1657     return;
1658   }
1659 
1660   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1661     os::free(version_info);
1662     st->print_cr("Call to GetFileVersionInfo failed");
1663     return;
1664   }
1665 
1666   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1667     os::free(version_info);
1668     st->print_cr("Call to VerQueryValue failed");
1669     return;
1670   }
1671 
1672   int major_version = HIWORD(file_info->dwProductVersionMS);
1673   int minor_version = LOWORD(file_info->dwProductVersionMS);
1674   int build_number = HIWORD(file_info->dwProductVersionLS);
1675   int build_minor = LOWORD(file_info->dwProductVersionLS);
1676   int os_vers = major_version * 1000 + minor_version;
1677   os::free(version_info);
1678 
1679   st->print(" Windows ");
1680   switch (os_vers) {
1681 
1682   case 6000:
1683     if (is_workstation) {
1684       st->print("Vista");
1685     } else {
1686       st->print("Server 2008");
1687     }
1688     break;
1689 
1690   case 6001:
1691     if (is_workstation) {
1692       st->print("7");
1693     } else {
1694       st->print("Server 2008 R2");
1695     }
1696     break;
1697 
1698   case 6002:
1699     if (is_workstation) {
1700       st->print("8");
1701     } else {
1702       st->print("Server 2012");
1703     }
1704     break;
1705 
1706   case 6003:
1707     if (is_workstation) {
1708       st->print("8.1");
1709     } else {
1710       st->print("Server 2012 R2");
1711     }
1712     break;
1713 
1714   case 10000:
1715     if (is_workstation) {
1716       st->print("10");
1717     } else {
1718       // distinguish Windows Server 2016 and 2019 by build number
1719       // Windows server 2019 GA 10/2018 build number is 17763
1720       if (build_number > 17762) {
1721         st->print("Server 2019");
1722       } else {
1723         st->print("Server 2016");
1724       }
1725     }
1726     break;
1727 
1728   default:
1729     // Unrecognized windows, print out its major and minor versions
1730     st->print("%d.%d", major_version, minor_version);
1731     break;
1732   }
1733 
1734   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1735   // find out whether we are running on 64 bit processor or not
1736   SYSTEM_INFO si;
1737   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1738   GetNativeSystemInfo(&si);
1739   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1740     st->print(" , 64 bit");
1741   }
1742 
1743   st->print(" Build %d", build_number);
1744   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1745   st->cr();
1746 }
1747 
1748 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1749   // Nothing to do for now.
1750 }
1751 
1752 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1753   HKEY key;
1754   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1755                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1756   if (status == ERROR_SUCCESS) {
1757     DWORD size = (DWORD)buflen;
1758     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1759     if (status != ERROR_SUCCESS) {
1760         strncpy(buf, "## __CPU__", buflen);
1761     }
1762     RegCloseKey(key);
1763   } else {
1764     // Put generic cpu info to return
1765     strncpy(buf, "## __CPU__", buflen);
1766   }
1767 }
1768 
1769 void os::print_memory_info(outputStream* st) {
1770   st->print("Memory:");
1771   st->print(" %dk page", os::vm_page_size()>>10);
1772 
1773   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1774   // value if total memory is larger than 4GB
1775   MEMORYSTATUSEX ms;
1776   ms.dwLength = sizeof(ms);
1777   int r1 = GlobalMemoryStatusEx(&ms);
1778 
1779   if (r1 != 0) {
1780     st->print(", system-wide physical " INT64_FORMAT "M ",
1781              (int64_t) ms.ullTotalPhys >> 20);
1782     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1783 
1784     st->print("TotalPageFile size " INT64_FORMAT "M ",
1785              (int64_t) ms.ullTotalPageFile >> 20);
1786     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1787              (int64_t) ms.ullAvailPageFile >> 20);
1788 
1789     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1790 #if defined(_M_IX86)
1791     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1792              (int64_t) ms.ullTotalVirtual >> 20);
1793     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1794 #endif
1795   } else {
1796     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1797   }
1798 
1799   // extended memory statistics for a process
1800   PROCESS_MEMORY_COUNTERS_EX pmex;
1801   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1802   pmex.cb = sizeof(pmex);
1803   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1804 
1805   if (r2 != 0) {
1806     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1807              (int64_t) pmex.WorkingSetSize >> 20);
1808     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1809 
1810     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1811              (int64_t) pmex.PrivateUsage >> 20);
1812     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1813   } else {
1814     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1815   }
1816 
1817   st->cr();
1818 }
1819 
1820 bool os::signal_sent_by_kill(const void* siginfo) {
1821   // TODO: Is this possible?
1822   return false;
1823 }
1824 
1825 void os::print_siginfo(outputStream *st, const void* siginfo) {
1826   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1827   st->print("siginfo:");
1828 
1829   char tmp[64];
1830   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1831     strcpy(tmp, "EXCEPTION_??");
1832   }
1833   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1834 
1835   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1836        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1837        er->NumberParameters >= 2) {
1838     switch (er->ExceptionInformation[0]) {
1839     case 0: st->print(", reading address"); break;
1840     case 1: st->print(", writing address"); break;
1841     case 8: st->print(", data execution prevention violation at address"); break;
1842     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1843                        er->ExceptionInformation[0]);
1844     }
1845     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1846   } else {
1847     int num = er->NumberParameters;
1848     if (num > 0) {
1849       st->print(", ExceptionInformation=");
1850       for (int i = 0; i < num; i++) {
1851         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1852       }
1853     }
1854   }
1855   st->cr();
1856 }
1857 
1858 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1859   // TODO: Can we kill thread?
1860   return false;
1861 }
1862 
1863 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1864   // do nothing
1865 }
1866 
1867 static char saved_jvm_path[MAX_PATH] = {0};
1868 
1869 // Find the full path to the current module, jvm.dll
1870 void os::jvm_path(char *buf, jint buflen) {
1871   // Error checking.
1872   if (buflen < MAX_PATH) {
1873     assert(false, "must use a large-enough buffer");
1874     buf[0] = '\0';
1875     return;
1876   }
1877   // Lazy resolve the path to current module.
1878   if (saved_jvm_path[0] != 0) {
1879     strcpy(buf, saved_jvm_path);
1880     return;
1881   }
1882 
1883   buf[0] = '\0';
1884   if (Arguments::sun_java_launcher_is_altjvm()) {
1885     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1886     // for a JAVA_HOME environment variable and fix up the path so it
1887     // looks like jvm.dll is installed there (append a fake suffix
1888     // hotspot/jvm.dll).
1889     char* java_home_var = ::getenv("JAVA_HOME");
1890     if (java_home_var != NULL && java_home_var[0] != 0 &&
1891         strlen(java_home_var) < (size_t)buflen) {
1892       strncpy(buf, java_home_var, buflen);
1893 
1894       // determine if this is a legacy image or modules image
1895       // modules image doesn't have "jre" subdirectory
1896       size_t len = strlen(buf);
1897       char* jrebin_p = buf + len;
1898       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1899       if (0 != _access(buf, 0)) {
1900         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1901       }
1902       len = strlen(buf);
1903       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1904     }
1905   }
1906 
1907   if (buf[0] == '\0') {
1908     GetModuleFileName(vm_lib_handle, buf, buflen);
1909   }
1910   strncpy(saved_jvm_path, buf, MAX_PATH);
1911   saved_jvm_path[MAX_PATH - 1] = '\0';
1912 }
1913 
1914 
1915 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1916 #ifndef _WIN64
1917   st->print("_");
1918 #endif
1919 }
1920 
1921 
1922 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1923 #ifndef _WIN64
1924   st->print("@%d", args_size  * sizeof(int));
1925 #endif
1926 }
1927 
1928 // This method is a copy of JDK's sysGetLastErrorString
1929 // from src/windows/hpi/src/system_md.c
1930 
1931 size_t os::lasterror(char* buf, size_t len) {
1932   DWORD errval;
1933 
1934   if ((errval = GetLastError()) != 0) {
1935     // DOS error
1936     size_t n = (size_t)FormatMessage(
1937                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1938                                      NULL,
1939                                      errval,
1940                                      0,
1941                                      buf,
1942                                      (DWORD)len,
1943                                      NULL);
1944     if (n > 3) {
1945       // Drop final '.', CR, LF
1946       if (buf[n - 1] == '\n') n--;
1947       if (buf[n - 1] == '\r') n--;
1948       if (buf[n - 1] == '.') n--;
1949       buf[n] = '\0';
1950     }
1951     return n;
1952   }
1953 
1954   if (errno != 0) {
1955     // C runtime error that has no corresponding DOS error code
1956     const char* s = os::strerror(errno);
1957     size_t n = strlen(s);
1958     if (n >= len) n = len - 1;
1959     strncpy(buf, s, n);
1960     buf[n] = '\0';
1961     return n;
1962   }
1963 
1964   return 0;
1965 }
1966 
1967 int os::get_last_error() {
1968   DWORD error = GetLastError();
1969   if (error == 0) {
1970     error = errno;
1971   }
1972   return (int)error;
1973 }
1974 
1975 // sun.misc.Signal
1976 // NOTE that this is a workaround for an apparent kernel bug where if
1977 // a signal handler for SIGBREAK is installed then that signal handler
1978 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1979 // See bug 4416763.
1980 static void (*sigbreakHandler)(int) = NULL;
1981 
1982 static void UserHandler(int sig, void *siginfo, void *context) {
1983   os::signal_notify(sig);
1984   // We need to reinstate the signal handler each time...
1985   os::signal(sig, (void*)UserHandler);
1986 }
1987 
1988 void* os::user_handler() {
1989   return (void*) UserHandler;
1990 }
1991 
1992 void* os::signal(int signal_number, void* handler) {
1993   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1994     void (*oldHandler)(int) = sigbreakHandler;
1995     sigbreakHandler = (void (*)(int)) handler;
1996     return (void*) oldHandler;
1997   } else {
1998     return (void*)::signal(signal_number, (void (*)(int))handler);
1999   }
2000 }
2001 
2002 void os::signal_raise(int signal_number) {
2003   raise(signal_number);
2004 }
2005 
2006 // The Win32 C runtime library maps all console control events other than ^C
2007 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2008 // logoff, and shutdown events.  We therefore install our own console handler
2009 // that raises SIGTERM for the latter cases.
2010 //
2011 static BOOL WINAPI consoleHandler(DWORD event) {
2012   switch (event) {
2013   case CTRL_C_EVENT:
2014     if (VMError::is_error_reported()) {
2015       // Ctrl-C is pressed during error reporting, likely because the error
2016       // handler fails to abort. Let VM die immediately.
2017       os::die();
2018     }
2019 
2020     os::signal_raise(SIGINT);
2021     return TRUE;
2022     break;
2023   case CTRL_BREAK_EVENT:
2024     if (sigbreakHandler != NULL) {
2025       (*sigbreakHandler)(SIGBREAK);
2026     }
2027     return TRUE;
2028     break;
2029   case CTRL_LOGOFF_EVENT: {
2030     // Don't terminate JVM if it is running in a non-interactive session,
2031     // such as a service process.
2032     USEROBJECTFLAGS flags;
2033     HANDLE handle = GetProcessWindowStation();
2034     if (handle != NULL &&
2035         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2036         sizeof(USEROBJECTFLAGS), NULL)) {
2037       // If it is a non-interactive session, let next handler to deal
2038       // with it.
2039       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2040         return FALSE;
2041       }
2042     }
2043   }
2044   case CTRL_CLOSE_EVENT:
2045   case CTRL_SHUTDOWN_EVENT:
2046     os::signal_raise(SIGTERM);
2047     return TRUE;
2048     break;
2049   default:
2050     break;
2051   }
2052   return FALSE;
2053 }
2054 
2055 // The following code is moved from os.cpp for making this
2056 // code platform specific, which it is by its very nature.
2057 
2058 // Return maximum OS signal used + 1 for internal use only
2059 // Used as exit signal for signal_thread
2060 int os::sigexitnum_pd() {
2061   return NSIG;
2062 }
2063 
2064 // a counter for each possible signal value, including signal_thread exit signal
2065 static volatile jint pending_signals[NSIG+1] = { 0 };
2066 static Semaphore* sig_sem = NULL;
2067 
2068 static void jdk_misc_signal_init() {
2069   // Initialize signal structures
2070   memset((void*)pending_signals, 0, sizeof(pending_signals));
2071 
2072   // Initialize signal semaphore
2073   sig_sem = new Semaphore();
2074 
2075   // Programs embedding the VM do not want it to attempt to receive
2076   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2077   // shutdown hooks mechanism introduced in 1.3.  For example, when
2078   // the VM is run as part of a Windows NT service (i.e., a servlet
2079   // engine in a web server), the correct behavior is for any console
2080   // control handler to return FALSE, not TRUE, because the OS's
2081   // "final" handler for such events allows the process to continue if
2082   // it is a service (while terminating it if it is not a service).
2083   // To make this behavior uniform and the mechanism simpler, we
2084   // completely disable the VM's usage of these console events if -Xrs
2085   // (=ReduceSignalUsage) is specified.  This means, for example, that
2086   // the CTRL-BREAK thread dump mechanism is also disabled in this
2087   // case.  See bugs 4323062, 4345157, and related bugs.
2088 
2089   // Add a CTRL-C handler
2090   SetConsoleCtrlHandler(consoleHandler, TRUE);
2091 }
2092 
2093 void os::signal_notify(int sig) {
2094   if (sig_sem != NULL) {
2095     Atomic::inc(&pending_signals[sig]);
2096     sig_sem->signal();
2097   } else {
2098     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2099     // initialization isn't called.
2100     assert(ReduceSignalUsage, "signal semaphore should be created");
2101   }
2102 }
2103 
2104 static int check_pending_signals() {
2105   while (true) {
2106     for (int i = 0; i < NSIG + 1; i++) {
2107       jint n = pending_signals[i];
2108       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2109         return i;
2110       }
2111     }
2112     JavaThread *thread = JavaThread::current();
2113 
2114     ThreadBlockInVM tbivm(thread);
2115 
2116     bool threadIsSuspended;
2117     do {
2118       thread->set_suspend_equivalent();
2119       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2120       sig_sem->wait();
2121 
2122       // were we externally suspended while we were waiting?
2123       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2124       if (threadIsSuspended) {
2125         // The semaphore has been incremented, but while we were waiting
2126         // another thread suspended us. We don't want to continue running
2127         // while suspended because that would surprise the thread that
2128         // suspended us.
2129         sig_sem->signal();
2130 
2131         thread->java_suspend_self();
2132       }
2133     } while (threadIsSuspended);
2134   }
2135 }
2136 
2137 int os::signal_wait() {
2138   return check_pending_signals();
2139 }
2140 
2141 // Implicit OS exception handling
2142 
2143 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2144                       address handler) {
2145   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2146   // Save pc in thread
2147 #ifdef _M_AMD64
2148   // Do not blow up if no thread info available.
2149   if (thread) {
2150     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2151   }
2152   // Set pc to handler
2153   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2154 #else
2155   // Do not blow up if no thread info available.
2156   if (thread) {
2157     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2158   }
2159   // Set pc to handler
2160   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2161 #endif
2162 
2163   // Continue the execution
2164   return EXCEPTION_CONTINUE_EXECUTION;
2165 }
2166 
2167 
2168 // Used for PostMortemDump
2169 extern "C" void safepoints();
2170 extern "C" void find(int x);
2171 extern "C" void events();
2172 
2173 // According to Windows API documentation, an illegal instruction sequence should generate
2174 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2175 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2176 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2177 
2178 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2179 
2180 // From "Execution Protection in the Windows Operating System" draft 0.35
2181 // Once a system header becomes available, the "real" define should be
2182 // included or copied here.
2183 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2184 
2185 // Windows Vista/2008 heap corruption check
2186 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2187 
2188 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2189 // C++ compiler contain this error code. Because this is a compiler-generated
2190 // error, the code is not listed in the Win32 API header files.
2191 // The code is actually a cryptic mnemonic device, with the initial "E"
2192 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2193 // ASCII values of "msc".
2194 
2195 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2196 
2197 #define def_excpt(val) { #val, (val) }
2198 
2199 static const struct { const char* name; uint number; } exceptlabels[] = {
2200     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2201     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2202     def_excpt(EXCEPTION_BREAKPOINT),
2203     def_excpt(EXCEPTION_SINGLE_STEP),
2204     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2205     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2206     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2207     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2208     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2209     def_excpt(EXCEPTION_FLT_OVERFLOW),
2210     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2211     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2212     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2213     def_excpt(EXCEPTION_INT_OVERFLOW),
2214     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2215     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2216     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2217     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2218     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2219     def_excpt(EXCEPTION_STACK_OVERFLOW),
2220     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2221     def_excpt(EXCEPTION_GUARD_PAGE),
2222     def_excpt(EXCEPTION_INVALID_HANDLE),
2223     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2224     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2225 };
2226 
2227 #undef def_excpt
2228 
2229 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2230   uint code = static_cast<uint>(exception_code);
2231   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2232     if (exceptlabels[i].number == code) {
2233       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2234       return buf;
2235     }
2236   }
2237 
2238   return NULL;
2239 }
2240 
2241 //-----------------------------------------------------------------------------
2242 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2243   // handle exception caused by idiv; should only happen for -MinInt/-1
2244   // (division by zero is handled explicitly)
2245 #ifdef  _M_AMD64
2246   PCONTEXT ctx = exceptionInfo->ContextRecord;
2247   address pc = (address)ctx->Rip;
2248   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2249   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2250   if (pc[0] == 0xF7) {
2251     // set correct result values and continue after idiv instruction
2252     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2253   } else {
2254     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2255   }
2256   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2257   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2258   // idiv opcode (0xF7).
2259   ctx->Rdx = (DWORD)0;             // remainder
2260   // Continue the execution
2261 #else
2262   PCONTEXT ctx = exceptionInfo->ContextRecord;
2263   address pc = (address)ctx->Eip;
2264   assert(pc[0] == 0xF7, "not an idiv opcode");
2265   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2266   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2267   // set correct result values and continue after idiv instruction
2268   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2269   ctx->Eax = (DWORD)min_jint;      // result
2270   ctx->Edx = (DWORD)0;             // remainder
2271   // Continue the execution
2272 #endif
2273   return EXCEPTION_CONTINUE_EXECUTION;
2274 }
2275 
2276 //-----------------------------------------------------------------------------
2277 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2278   PCONTEXT ctx = exceptionInfo->ContextRecord;
2279 #ifndef  _WIN64
2280   // handle exception caused by native method modifying control word
2281   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2282 
2283   switch (exception_code) {
2284   case EXCEPTION_FLT_DENORMAL_OPERAND:
2285   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2286   case EXCEPTION_FLT_INEXACT_RESULT:
2287   case EXCEPTION_FLT_INVALID_OPERATION:
2288   case EXCEPTION_FLT_OVERFLOW:
2289   case EXCEPTION_FLT_STACK_CHECK:
2290   case EXCEPTION_FLT_UNDERFLOW:
2291     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2292     if (fp_control_word != ctx->FloatSave.ControlWord) {
2293       // Restore FPCW and mask out FLT exceptions
2294       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2295       // Mask out pending FLT exceptions
2296       ctx->FloatSave.StatusWord &=  0xffffff00;
2297       return EXCEPTION_CONTINUE_EXECUTION;
2298     }
2299   }
2300 
2301   if (prev_uef_handler != NULL) {
2302     // We didn't handle this exception so pass it to the previous
2303     // UnhandledExceptionFilter.
2304     return (prev_uef_handler)(exceptionInfo);
2305   }
2306 #else // !_WIN64
2307   // On Windows, the mxcsr control bits are non-volatile across calls
2308   // See also CR 6192333
2309   //
2310   jint MxCsr = INITIAL_MXCSR;
2311   // we can't use StubRoutines::addr_mxcsr_std()
2312   // because in Win64 mxcsr is not saved there
2313   if (MxCsr != ctx->MxCsr) {
2314     ctx->MxCsr = MxCsr;
2315     return EXCEPTION_CONTINUE_EXECUTION;
2316   }
2317 #endif // !_WIN64
2318 
2319   return EXCEPTION_CONTINUE_SEARCH;
2320 }
2321 
2322 static inline void report_error(Thread* t, DWORD exception_code,
2323                                 address addr, void* siginfo, void* context) {
2324   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2325 
2326   // If UseOsErrorReporting, this will return here and save the error file
2327   // somewhere where we can find it in the minidump.
2328 }
2329 
2330 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2331         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2332   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2333   address addr = (address) exceptionRecord->ExceptionInformation[1];
2334   if (Interpreter::contains(pc)) {
2335     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2336     if (!fr->is_first_java_frame()) {
2337       // get_frame_at_stack_banging_point() is only called when we
2338       // have well defined stacks so java_sender() calls do not need
2339       // to assert safe_for_sender() first.
2340       *fr = fr->java_sender();
2341     }
2342   } else {
2343     // more complex code with compiled code
2344     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2345     CodeBlob* cb = CodeCache::find_blob(pc);
2346     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2347       // Not sure where the pc points to, fallback to default
2348       // stack overflow handling
2349       return false;
2350     } else {
2351       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2352       // in compiled code, the stack banging is performed just after the return pc
2353       // has been pushed on the stack
2354       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2355       if (!fr->is_java_frame()) {
2356         // See java_sender() comment above.
2357         *fr = fr->java_sender();
2358       }
2359     }
2360   }
2361   assert(fr->is_java_frame(), "Safety check");
2362   return true;
2363 }
2364 
2365 #if INCLUDE_AOT
2366 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2367   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2368   address addr = (address) exceptionRecord->ExceptionInformation[1];
2369   address pc = (address) exceptionInfo->ContextRecord->Rip;
2370 
2371   // Handle the case where we get an implicit exception in AOT generated
2372   // code.  AOT DLL's loaded are not registered for structured exceptions.
2373   // If the exception occurred in the codeCache or AOT code, pass control
2374   // to our normal exception handler.
2375   CodeBlob* cb = CodeCache::find_blob(pc);
2376   if (cb != NULL) {
2377     return topLevelExceptionFilter(exceptionInfo);
2378   }
2379 
2380   return EXCEPTION_CONTINUE_SEARCH;
2381 }
2382 #endif
2383 
2384 //-----------------------------------------------------------------------------
2385 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2386   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2387   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2388 #ifdef _M_AMD64
2389   address pc = (address) exceptionInfo->ContextRecord->Rip;
2390 #else
2391   address pc = (address) exceptionInfo->ContextRecord->Eip;
2392 #endif
2393   Thread* t = Thread::current_or_null_safe();
2394 
2395   // Handle SafeFetch32 and SafeFetchN exceptions.
2396   if (StubRoutines::is_safefetch_fault(pc)) {
2397     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2398   }
2399 
2400 #ifndef _WIN64
2401   // Execution protection violation - win32 running on AMD64 only
2402   // Handled first to avoid misdiagnosis as a "normal" access violation;
2403   // This is safe to do because we have a new/unique ExceptionInformation
2404   // code for this condition.
2405   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2406     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2407     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2408     address addr = (address) exceptionRecord->ExceptionInformation[1];
2409 
2410     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2411       int page_size = os::vm_page_size();
2412 
2413       // Make sure the pc and the faulting address are sane.
2414       //
2415       // If an instruction spans a page boundary, and the page containing
2416       // the beginning of the instruction is executable but the following
2417       // page is not, the pc and the faulting address might be slightly
2418       // different - we still want to unguard the 2nd page in this case.
2419       //
2420       // 15 bytes seems to be a (very) safe value for max instruction size.
2421       bool pc_is_near_addr =
2422         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2423       bool instr_spans_page_boundary =
2424         (align_down((intptr_t) pc ^ (intptr_t) addr,
2425                          (intptr_t) page_size) > 0);
2426 
2427       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2428         static volatile address last_addr =
2429           (address) os::non_memory_address_word();
2430 
2431         // In conservative mode, don't unguard unless the address is in the VM
2432         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2433             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2434 
2435           // Set memory to RWX and retry
2436           address page_start = align_down(addr, page_size);
2437           bool res = os::protect_memory((char*) page_start, page_size,
2438                                         os::MEM_PROT_RWX);
2439 
2440           log_debug(os)("Execution protection violation "
2441                         "at " INTPTR_FORMAT
2442                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2443                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2444 
2445           // Set last_addr so if we fault again at the same address, we don't
2446           // end up in an endless loop.
2447           //
2448           // There are two potential complications here.  Two threads trapping
2449           // at the same address at the same time could cause one of the
2450           // threads to think it already unguarded, and abort the VM.  Likely
2451           // very rare.
2452           //
2453           // The other race involves two threads alternately trapping at
2454           // different addresses and failing to unguard the page, resulting in
2455           // an endless loop.  This condition is probably even more unlikely
2456           // than the first.
2457           //
2458           // Although both cases could be avoided by using locks or thread
2459           // local last_addr, these solutions are unnecessary complication:
2460           // this handler is a best-effort safety net, not a complete solution.
2461           // It is disabled by default and should only be used as a workaround
2462           // in case we missed any no-execute-unsafe VM code.
2463 
2464           last_addr = addr;
2465 
2466           return EXCEPTION_CONTINUE_EXECUTION;
2467         }
2468       }
2469 
2470       // Last unguard failed or not unguarding
2471       tty->print_raw_cr("Execution protection violation");
2472       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2473                    exceptionInfo->ContextRecord);
2474       return EXCEPTION_CONTINUE_SEARCH;
2475     }
2476   }
2477 #endif // _WIN64
2478 
2479   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2480       VM_Version::is_cpuinfo_segv_addr(pc)) {
2481     // Verify that OS save/restore AVX registers.
2482     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2483   }
2484 
2485   if (t != NULL && t->is_Java_thread()) {
2486     JavaThread* thread = (JavaThread*) t;
2487     bool in_java = thread->thread_state() == _thread_in_Java;
2488 
2489     // Handle potential stack overflows up front.
2490     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2491       if (thread->stack_guards_enabled()) {
2492         if (in_java) {
2493           frame fr;
2494           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2495           address addr = (address) exceptionRecord->ExceptionInformation[1];
2496           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2497             assert(fr.is_java_frame(), "Must be a Java frame");
2498             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2499           }
2500         }
2501         // Yellow zone violation.  The o/s has unprotected the first yellow
2502         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2503         // update the enabled status, even if the zone contains only one page.
2504         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2505         thread->disable_stack_yellow_reserved_zone();
2506         // If not in java code, return and hope for the best.
2507         return in_java
2508             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2509             :  EXCEPTION_CONTINUE_EXECUTION;
2510       } else {
2511         // Fatal red zone violation.
2512         thread->disable_stack_red_zone();
2513         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2514         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2515                       exceptionInfo->ContextRecord);
2516         return EXCEPTION_CONTINUE_SEARCH;
2517       }
2518     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2519       // Either stack overflow or null pointer exception.
2520       if (in_java) {
2521         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2522         address addr = (address) exceptionRecord->ExceptionInformation[1];
2523         address stack_end = thread->stack_end();
2524         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2525           // Stack overflow.
2526           assert(!os::uses_stack_guard_pages(),
2527                  "should be caught by red zone code above.");
2528           return Handle_Exception(exceptionInfo,
2529                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2530         }
2531         // Check for safepoint polling and implicit null
2532         // We only expect null pointers in the stubs (vtable)
2533         // the rest are checked explicitly now.
2534         CodeBlob* cb = CodeCache::find_blob(pc);
2535         if (cb != NULL) {
2536           if (os::is_poll_address(addr)) {
2537             address stub = SharedRuntime::get_poll_stub(pc);
2538             return Handle_Exception(exceptionInfo, stub);
2539           }
2540         }
2541         {
2542 #ifdef _WIN64
2543           // If it's a legal stack address map the entire region in
2544           //
2545           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2546           address addr = (address) exceptionRecord->ExceptionInformation[1];
2547           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2548             addr = (address)((uintptr_t)addr &
2549                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2550             os::commit_memory((char *)addr, thread->stack_base() - addr,
2551                               !ExecMem);
2552             return EXCEPTION_CONTINUE_EXECUTION;
2553           } else
2554 #endif
2555           {
2556             // Null pointer exception.
2557             if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2558               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2559               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2560             }
2561             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2562                          exceptionInfo->ContextRecord);
2563             return EXCEPTION_CONTINUE_SEARCH;
2564           }
2565         }
2566       }
2567 
2568 #ifdef _WIN64
2569       // Special care for fast JNI field accessors.
2570       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2571       // in and the heap gets shrunk before the field access.
2572       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2573         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2574         if (addr != (address)-1) {
2575           return Handle_Exception(exceptionInfo, addr);
2576         }
2577       }
2578 #endif
2579 
2580       // Stack overflow or null pointer exception in native code.
2581       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2582                    exceptionInfo->ContextRecord);
2583       return EXCEPTION_CONTINUE_SEARCH;
2584     } // /EXCEPTION_ACCESS_VIOLATION
2585     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2586 
2587     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2588       CompiledMethod* nm = NULL;
2589       JavaThread* thread = (JavaThread*)t;
2590       if (in_java) {
2591         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2592         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2593       }
2594 
2595       bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2596       if (((thread->thread_state() == _thread_in_vm ||
2597            thread->thread_state() == _thread_in_native ||
2598            is_unsafe_arraycopy) &&
2599           thread->doing_unsafe_access()) ||
2600           (nm != NULL && nm->has_unsafe_access())) {
2601         address next_pc =  Assembler::locate_next_instruction(pc);
2602         if (is_unsafe_arraycopy) {
2603           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2604         }
2605         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2606       }
2607     }
2608 
2609     if (in_java) {
2610       switch (exception_code) {
2611       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2612         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2613 
2614       case EXCEPTION_INT_OVERFLOW:
2615         return Handle_IDiv_Exception(exceptionInfo);
2616 
2617       } // switch
2618     }
2619     if (((thread->thread_state() == _thread_in_Java) ||
2620          (thread->thread_state() == _thread_in_native)) &&
2621          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2622       LONG result=Handle_FLT_Exception(exceptionInfo);
2623       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2624     }
2625   }
2626 
2627   if (exception_code != EXCEPTION_BREAKPOINT) {
2628     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2629                  exceptionInfo->ContextRecord);
2630   }
2631   return EXCEPTION_CONTINUE_SEARCH;
2632 }
2633 
2634 #ifndef _WIN64
2635 // Special care for fast JNI accessors.
2636 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2637 // the heap gets shrunk before the field access.
2638 // Need to install our own structured exception handler since native code may
2639 // install its own.
2640 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2641   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2642   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2643     address pc = (address) exceptionInfo->ContextRecord->Eip;
2644     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2645     if (addr != (address)-1) {
2646       return Handle_Exception(exceptionInfo, addr);
2647     }
2648   }
2649   return EXCEPTION_CONTINUE_SEARCH;
2650 }
2651 
2652 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2653   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2654                                                      jobject obj,           \
2655                                                      jfieldID fieldID) {    \
2656     __try {                                                                 \
2657       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2658                                                                  obj,       \
2659                                                                  fieldID);  \
2660     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2661                                               _exception_info())) {         \
2662     }                                                                       \
2663     return 0;                                                               \
2664   }
2665 
2666 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2667 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2668 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2669 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2670 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2671 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2672 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2673 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2674 
2675 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2676   switch (type) {
2677   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2678   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2679   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2680   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2681   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2682   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2683   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2684   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2685   default:        ShouldNotReachHere();
2686   }
2687   return (address)-1;
2688 }
2689 #endif
2690 
2691 // Virtual Memory
2692 
2693 int os::vm_page_size() { return os::win32::vm_page_size(); }
2694 int os::vm_allocation_granularity() {
2695   return os::win32::vm_allocation_granularity();
2696 }
2697 
2698 // Windows large page support is available on Windows 2003. In order to use
2699 // large page memory, the administrator must first assign additional privilege
2700 // to the user:
2701 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2702 //   + select Local Policies -> User Rights Assignment
2703 //   + double click "Lock pages in memory", add users and/or groups
2704 //   + reboot
2705 // Note the above steps are needed for administrator as well, as administrators
2706 // by default do not have the privilege to lock pages in memory.
2707 //
2708 // Note about Windows 2003: although the API supports committing large page
2709 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2710 // scenario, I found through experiment it only uses large page if the entire
2711 // memory region is reserved and committed in a single VirtualAlloc() call.
2712 // This makes Windows large page support more or less like Solaris ISM, in
2713 // that the entire heap must be committed upfront. This probably will change
2714 // in the future, if so the code below needs to be revisited.
2715 
2716 #ifndef MEM_LARGE_PAGES
2717   #define MEM_LARGE_PAGES 0x20000000
2718 #endif
2719 
2720 static HANDLE    _hProcess;
2721 static HANDLE    _hToken;
2722 
2723 // Container for NUMA node list info
2724 class NUMANodeListHolder {
2725  private:
2726   int *_numa_used_node_list;  // allocated below
2727   int _numa_used_node_count;
2728 
2729   void free_node_list() {
2730     if (_numa_used_node_list != NULL) {
2731       FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2732     }
2733   }
2734 
2735  public:
2736   NUMANodeListHolder() {
2737     _numa_used_node_count = 0;
2738     _numa_used_node_list = NULL;
2739     // do rest of initialization in build routine (after function pointers are set up)
2740   }
2741 
2742   ~NUMANodeListHolder() {
2743     free_node_list();
2744   }
2745 
2746   bool build() {
2747     DWORD_PTR proc_aff_mask;
2748     DWORD_PTR sys_aff_mask;
2749     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2750     ULONG highest_node_number;
2751     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2752     free_node_list();
2753     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2754     for (unsigned int i = 0; i <= highest_node_number; i++) {
2755       ULONGLONG proc_mask_numa_node;
2756       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2757       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2758         _numa_used_node_list[_numa_used_node_count++] = i;
2759       }
2760     }
2761     return (_numa_used_node_count > 1);
2762   }
2763 
2764   int get_count() { return _numa_used_node_count; }
2765   int get_node_list_entry(int n) {
2766     // for indexes out of range, returns -1
2767     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2768   }
2769 
2770 } numa_node_list_holder;
2771 
2772 
2773 
2774 static size_t _large_page_size = 0;
2775 
2776 static bool request_lock_memory_privilege() {
2777   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2778                           os::current_process_id());
2779 
2780   LUID luid;
2781   if (_hProcess != NULL &&
2782       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2783       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2784 
2785     TOKEN_PRIVILEGES tp;
2786     tp.PrivilegeCount = 1;
2787     tp.Privileges[0].Luid = luid;
2788     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2789 
2790     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2791     // privilege. Check GetLastError() too. See MSDN document.
2792     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2793         (GetLastError() == ERROR_SUCCESS)) {
2794       return true;
2795     }
2796   }
2797 
2798   return false;
2799 }
2800 
2801 static void cleanup_after_large_page_init() {
2802   if (_hProcess) CloseHandle(_hProcess);
2803   _hProcess = NULL;
2804   if (_hToken) CloseHandle(_hToken);
2805   _hToken = NULL;
2806 }
2807 
2808 static bool numa_interleaving_init() {
2809   bool success = false;
2810   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2811 
2812   // print a warning if UseNUMAInterleaving flag is specified on command line
2813   bool warn_on_failure = use_numa_interleaving_specified;
2814 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2815 
2816   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2817   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2818   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2819 
2820   if (numa_node_list_holder.build()) {
2821     if (log_is_enabled(Debug, os, cpu)) {
2822       Log(os, cpu) log;
2823       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2824       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2825         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2826       }
2827     }
2828     success = true;
2829   } else {
2830     WARN("Process does not cover multiple NUMA nodes.");
2831   }
2832   if (!success) {
2833     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2834   }
2835   return success;
2836 #undef WARN
2837 }
2838 
2839 // this routine is used whenever we need to reserve a contiguous VA range
2840 // but we need to make separate VirtualAlloc calls for each piece of the range
2841 // Reasons for doing this:
2842 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2843 //  * UseNUMAInterleaving requires a separate node for each piece
2844 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2845                                          DWORD prot,
2846                                          bool should_inject_error = false) {
2847   char * p_buf;
2848   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2849   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2850   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2851 
2852   // first reserve enough address space in advance since we want to be
2853   // able to break a single contiguous virtual address range into multiple
2854   // large page commits but WS2003 does not allow reserving large page space
2855   // so we just use 4K pages for reserve, this gives us a legal contiguous
2856   // address space. then we will deallocate that reservation, and re alloc
2857   // using large pages
2858   const size_t size_of_reserve = bytes + chunk_size;
2859   if (bytes > size_of_reserve) {
2860     // Overflowed.
2861     return NULL;
2862   }
2863   p_buf = (char *) VirtualAlloc(addr,
2864                                 size_of_reserve,  // size of Reserve
2865                                 MEM_RESERVE,
2866                                 PAGE_READWRITE);
2867   // If reservation failed, return NULL
2868   if (p_buf == NULL) return NULL;
2869   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2870   os::release_memory(p_buf, bytes + chunk_size);
2871 
2872   // we still need to round up to a page boundary (in case we are using large pages)
2873   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2874   // instead we handle this in the bytes_to_rq computation below
2875   p_buf = align_up(p_buf, page_size);
2876 
2877   // now go through and allocate one chunk at a time until all bytes are
2878   // allocated
2879   size_t  bytes_remaining = bytes;
2880   // An overflow of align_up() would have been caught above
2881   // in the calculation of size_of_reserve.
2882   char * next_alloc_addr = p_buf;
2883   HANDLE hProc = GetCurrentProcess();
2884 
2885 #ifdef ASSERT
2886   // Variable for the failure injection
2887   int ran_num = os::random();
2888   size_t fail_after = ran_num % bytes;
2889 #endif
2890 
2891   int count=0;
2892   while (bytes_remaining) {
2893     // select bytes_to_rq to get to the next chunk_size boundary
2894 
2895     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2896     // Note allocate and commit
2897     char * p_new;
2898 
2899 #ifdef ASSERT
2900     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2901 #else
2902     const bool inject_error_now = false;
2903 #endif
2904 
2905     if (inject_error_now) {
2906       p_new = NULL;
2907     } else {
2908       if (!UseNUMAInterleaving) {
2909         p_new = (char *) VirtualAlloc(next_alloc_addr,
2910                                       bytes_to_rq,
2911                                       flags,
2912                                       prot);
2913       } else {
2914         // get the next node to use from the used_node_list
2915         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2916         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2917         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2918       }
2919     }
2920 
2921     if (p_new == NULL) {
2922       // Free any allocated pages
2923       if (next_alloc_addr > p_buf) {
2924         // Some memory was committed so release it.
2925         size_t bytes_to_release = bytes - bytes_remaining;
2926         // NMT has yet to record any individual blocks, so it
2927         // need to create a dummy 'reserve' record to match
2928         // the release.
2929         MemTracker::record_virtual_memory_reserve((address)p_buf,
2930                                                   bytes_to_release, CALLER_PC);
2931         os::release_memory(p_buf, bytes_to_release);
2932       }
2933 #ifdef ASSERT
2934       if (should_inject_error) {
2935         log_develop_debug(pagesize)("Reserving pages individually failed.");
2936       }
2937 #endif
2938       return NULL;
2939     }
2940 
2941     bytes_remaining -= bytes_to_rq;
2942     next_alloc_addr += bytes_to_rq;
2943     count++;
2944   }
2945   // Although the memory is allocated individually, it is returned as one.
2946   // NMT records it as one block.
2947   if ((flags & MEM_COMMIT) != 0) {
2948     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
2949   } else {
2950     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
2951   }
2952 
2953   // made it this far, success
2954   return p_buf;
2955 }
2956 
2957 
2958 
2959 void os::large_page_init() {
2960   if (!UseLargePages) return;
2961 
2962   // print a warning if any large page related flag is specified on command line
2963   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2964                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2965   bool success = false;
2966 
2967 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2968   if (request_lock_memory_privilege()) {
2969     size_t s = GetLargePageMinimum();
2970     if (s) {
2971 #if defined(IA32) || defined(AMD64)
2972       if (s > 4*M || LargePageSizeInBytes > 4*M) {
2973         WARN("JVM cannot use large pages bigger than 4mb.");
2974       } else {
2975 #endif
2976         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
2977           _large_page_size = LargePageSizeInBytes;
2978         } else {
2979           _large_page_size = s;
2980         }
2981         success = true;
2982 #if defined(IA32) || defined(AMD64)
2983       }
2984 #endif
2985     } else {
2986       WARN("Large page is not supported by the processor.");
2987     }
2988   } else {
2989     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
2990   }
2991 #undef WARN
2992 
2993   const size_t default_page_size = (size_t) vm_page_size();
2994   if (success && _large_page_size > default_page_size) {
2995     _page_sizes[0] = _large_page_size;
2996     _page_sizes[1] = default_page_size;
2997     _page_sizes[2] = 0;
2998   }
2999 
3000   cleanup_after_large_page_init();
3001   UseLargePages = success;
3002 }
3003 
3004 int os::create_file_for_heap(const char* dir) {
3005 
3006   const char name_template[] = "/jvmheap.XXXXXX";
3007 
3008   size_t fullname_len = strlen(dir) + strlen(name_template);
3009   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3010   if (fullname == NULL) {
3011     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3012     return -1;
3013   }
3014   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3015   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3016 
3017   os::native_path(fullname);
3018 
3019   char *path = _mktemp(fullname);
3020   if (path == NULL) {
3021     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3022     os::free(fullname);
3023     return -1;
3024   }
3025 
3026   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3027 
3028   os::free(fullname);
3029   if (fd < 0) {
3030     warning("Problem opening file for heap (%s)", os::strerror(errno));
3031     return -1;
3032   }
3033   return fd;
3034 }
3035 
3036 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3037 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3038   assert(fd != -1, "File descriptor is not valid");
3039 
3040   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3041 #ifdef _LP64
3042   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3043     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3044 #else
3045   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3046     0, (DWORD)size, NULL);
3047 #endif
3048   if (fileMapping == NULL) {
3049     if (GetLastError() == ERROR_DISK_FULL) {
3050       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3051     }
3052     else {
3053       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3054     }
3055 
3056     return NULL;
3057   }
3058 
3059   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3060 
3061   CloseHandle(fileMapping);
3062 
3063   return (char*)addr;
3064 }
3065 
3066 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3067   assert(fd != -1, "File descriptor is not valid");
3068   assert(base != NULL, "Base address cannot be NULL");
3069 
3070   release_memory(base, size);
3071   return map_memory_to_file(base, size, fd);
3072 }
3073 
3074 // On win32, one cannot release just a part of reserved memory, it's an
3075 // all or nothing deal.  When we split a reservation, we must break the
3076 // reservation into two reservations.
3077 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3078                                   bool realloc) {
3079   if (size > 0) {
3080     release_memory(base, size);
3081     if (realloc) {
3082       reserve_memory(split, base);
3083     }
3084     if (size != split) {
3085       reserve_memory(size - split, base + split);
3086     }
3087   }
3088 }
3089 
3090 // Multiple threads can race in this code but it's not possible to unmap small sections of
3091 // virtual space to get requested alignment, like posix-like os's.
3092 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3093 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3094   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3095          "Alignment must be a multiple of allocation granularity (page size)");
3096   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3097 
3098   size_t extra_size = size + alignment;
3099   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3100 
3101   char* aligned_base = NULL;
3102 
3103   do {
3104     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3105     if (extra_base == NULL) {
3106       return NULL;
3107     }
3108     // Do manual alignment
3109     aligned_base = align_up(extra_base, alignment);
3110 
3111     if (file_desc != -1) {
3112       os::unmap_memory(extra_base, extra_size);
3113     } else {
3114       os::release_memory(extra_base, extra_size);
3115     }
3116 
3117     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3118 
3119   } while (aligned_base == NULL);
3120 
3121   return aligned_base;
3122 }
3123 
3124 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3125   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3126          "reserve alignment");
3127   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3128   char* res;
3129   // note that if UseLargePages is on, all the areas that require interleaving
3130   // will go thru reserve_memory_special rather than thru here.
3131   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3132   if (!use_individual) {
3133     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3134   } else {
3135     elapsedTimer reserveTimer;
3136     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3137     // in numa interleaving, we have to allocate pages individually
3138     // (well really chunks of NUMAInterleaveGranularity size)
3139     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3140     if (res == NULL) {
3141       warning("NUMA page allocation failed");
3142     }
3143     if (Verbose && PrintMiscellaneous) {
3144       reserveTimer.stop();
3145       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3146                     reserveTimer.milliseconds(), reserveTimer.ticks());
3147     }
3148   }
3149   assert(res == NULL || addr == NULL || addr == res,
3150          "Unexpected address from reserve.");
3151 
3152   return res;
3153 }
3154 
3155 // Reserve memory at an arbitrary address, only if that area is
3156 // available (and not reserved for something else).
3157 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3158   // Windows os::reserve_memory() fails of the requested address range is
3159   // not avilable.
3160   return reserve_memory(bytes, requested_addr);
3161 }
3162 
3163 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3164   assert(file_desc >= 0, "file_desc is not valid");
3165   return map_memory_to_file(requested_addr, bytes, file_desc);
3166 }
3167 
3168 size_t os::large_page_size() {
3169   return _large_page_size;
3170 }
3171 
3172 bool os::can_commit_large_page_memory() {
3173   // Windows only uses large page memory when the entire region is reserved
3174   // and committed in a single VirtualAlloc() call. This may change in the
3175   // future, but with Windows 2003 it's not possible to commit on demand.
3176   return false;
3177 }
3178 
3179 bool os::can_execute_large_page_memory() {
3180   return true;
3181 }
3182 
3183 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3184                                  bool exec) {
3185   assert(UseLargePages, "only for large pages");
3186 
3187   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3188     return NULL; // Fallback to small pages.
3189   }
3190 
3191   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3192   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3193 
3194   // with large pages, there are two cases where we need to use Individual Allocation
3195   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3196   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3197   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3198     log_debug(pagesize)("Reserving large pages individually.");
3199 
3200     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3201     if (p_buf == NULL) {
3202       // give an appropriate warning message
3203       if (UseNUMAInterleaving) {
3204         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3205       }
3206       if (UseLargePagesIndividualAllocation) {
3207         warning("Individually allocated large pages failed, "
3208                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3209       }
3210       return NULL;
3211     }
3212 
3213     return p_buf;
3214 
3215   } else {
3216     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3217 
3218     // normal policy just allocate it all at once
3219     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3220     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3221     if (res != NULL) {
3222       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3223     }
3224 
3225     return res;
3226   }
3227 }
3228 
3229 bool os::release_memory_special(char* base, size_t bytes) {
3230   assert(base != NULL, "Sanity check");
3231   return release_memory(base, bytes);
3232 }
3233 
3234 void os::print_statistics() {
3235 }
3236 
3237 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3238   int err = os::get_last_error();
3239   char buf[256];
3240   size_t buf_len = os::lasterror(buf, sizeof(buf));
3241   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3242           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3243           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3244 }
3245 
3246 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3247   if (bytes == 0) {
3248     // Don't bother the OS with noops.
3249     return true;
3250   }
3251   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3252   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3253   // Don't attempt to print anything if the OS call fails. We're
3254   // probably low on resources, so the print itself may cause crashes.
3255 
3256   // unless we have NUMAInterleaving enabled, the range of a commit
3257   // is always within a reserve covered by a single VirtualAlloc
3258   // in that case we can just do a single commit for the requested size
3259   if (!UseNUMAInterleaving) {
3260     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3261       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3262       return false;
3263     }
3264     if (exec) {
3265       DWORD oldprot;
3266       // Windows doc says to use VirtualProtect to get execute permissions
3267       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3268         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3269         return false;
3270       }
3271     }
3272     return true;
3273   } else {
3274 
3275     // when NUMAInterleaving is enabled, the commit might cover a range that
3276     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3277     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3278     // returns represents the number of bytes that can be committed in one step.
3279     size_t bytes_remaining = bytes;
3280     char * next_alloc_addr = addr;
3281     while (bytes_remaining > 0) {
3282       MEMORY_BASIC_INFORMATION alloc_info;
3283       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3284       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3285       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3286                        PAGE_READWRITE) == NULL) {
3287         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3288                                             exec);)
3289         return false;
3290       }
3291       if (exec) {
3292         DWORD oldprot;
3293         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3294                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3295           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3296                                               exec);)
3297           return false;
3298         }
3299       }
3300       bytes_remaining -= bytes_to_rq;
3301       next_alloc_addr += bytes_to_rq;
3302     }
3303   }
3304   // if we made it this far, return true
3305   return true;
3306 }
3307 
3308 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3309                           bool exec) {
3310   // alignment_hint is ignored on this OS
3311   return pd_commit_memory(addr, size, exec);
3312 }
3313 
3314 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3315                                   const char* mesg) {
3316   assert(mesg != NULL, "mesg must be specified");
3317   if (!pd_commit_memory(addr, size, exec)) {
3318     warn_fail_commit_memory(addr, size, exec);
3319     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3320   }
3321 }
3322 
3323 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3324                                   size_t alignment_hint, bool exec,
3325                                   const char* mesg) {
3326   // alignment_hint is ignored on this OS
3327   pd_commit_memory_or_exit(addr, size, exec, mesg);
3328 }
3329 
3330 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3331   if (bytes == 0) {
3332     // Don't bother the OS with noops.
3333     return true;
3334   }
3335   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3336   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3337   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3338 }
3339 
3340 bool os::pd_release_memory(char* addr, size_t bytes) {
3341   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3342 }
3343 
3344 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3345   return os::commit_memory(addr, size, !ExecMem);
3346 }
3347 
3348 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3349   return os::uncommit_memory(addr, size);
3350 }
3351 
3352 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3353   uint count = 0;
3354   bool ret = false;
3355   size_t bytes_remaining = bytes;
3356   char * next_protect_addr = addr;
3357 
3358   // Use VirtualQuery() to get the chunk size.
3359   while (bytes_remaining) {
3360     MEMORY_BASIC_INFORMATION alloc_info;
3361     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3362       return false;
3363     }
3364 
3365     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3366     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3367     // but we don't distinguish here as both cases are protected by same API.
3368     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3369     warning("Failed protecting pages individually for chunk #%u", count);
3370     if (!ret) {
3371       return false;
3372     }
3373 
3374     bytes_remaining -= bytes_to_protect;
3375     next_protect_addr += bytes_to_protect;
3376     count++;
3377   }
3378   return ret;
3379 }
3380 
3381 // Set protections specified
3382 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3383                         bool is_committed) {
3384   unsigned int p = 0;
3385   switch (prot) {
3386   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3387   case MEM_PROT_READ: p = PAGE_READONLY; break;
3388   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3389   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3390   default:
3391     ShouldNotReachHere();
3392   }
3393 
3394   DWORD old_status;
3395 
3396   // Strange enough, but on Win32 one can change protection only for committed
3397   // memory, not a big deal anyway, as bytes less or equal than 64K
3398   if (!is_committed) {
3399     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3400                           "cannot commit protection page");
3401   }
3402   // One cannot use os::guard_memory() here, as on Win32 guard page
3403   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3404   //
3405   // Pages in the region become guard pages. Any attempt to access a guard page
3406   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3407   // the guard page status. Guard pages thus act as a one-time access alarm.
3408   bool ret;
3409   if (UseNUMAInterleaving) {
3410     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3411     // so we must protect the chunks individually.
3412     ret = protect_pages_individually(addr, bytes, p, &old_status);
3413   } else {
3414     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3415   }
3416 #ifdef ASSERT
3417   if (!ret) {
3418     int err = os::get_last_error();
3419     char buf[256];
3420     size_t buf_len = os::lasterror(buf, sizeof(buf));
3421     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3422           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3423           buf_len != 0 ? buf : "<no_error_string>", err);
3424   }
3425 #endif
3426   return ret;
3427 }
3428 
3429 bool os::guard_memory(char* addr, size_t bytes) {
3430   DWORD old_status;
3431   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3432 }
3433 
3434 bool os::unguard_memory(char* addr, size_t bytes) {
3435   DWORD old_status;
3436   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3437 }
3438 
3439 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3440 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3441 void os::numa_make_global(char *addr, size_t bytes)    { }
3442 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3443 bool os::numa_topology_changed()                       { return false; }
3444 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3445 int os::numa_get_group_id()                            { return 0; }
3446 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3447   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3448     // Provide an answer for UMA systems
3449     ids[0] = 0;
3450     return 1;
3451   } else {
3452     // check for size bigger than actual groups_num
3453     size = MIN2(size, numa_get_groups_num());
3454     for (int i = 0; i < (int)size; i++) {
3455       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3456     }
3457     return size;
3458   }
3459 }
3460 
3461 bool os::get_page_info(char *start, page_info* info) {
3462   return false;
3463 }
3464 
3465 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3466                      page_info* page_found) {
3467   return end;
3468 }
3469 
3470 char* os::non_memory_address_word() {
3471   // Must never look like an address returned by reserve_memory,
3472   // even in its subfields (as defined by the CPU immediate fields,
3473   // if the CPU splits constants across multiple instructions).
3474   return (char*)-1;
3475 }
3476 
3477 #define MAX_ERROR_COUNT 100
3478 #define SYS_THREAD_ERROR 0xffffffffUL
3479 
3480 void os::pd_start_thread(Thread* thread) {
3481   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3482   // Returns previous suspend state:
3483   // 0:  Thread was not suspended
3484   // 1:  Thread is running now
3485   // >1: Thread is still suspended.
3486   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3487 }
3488 
3489 
3490 
3491 // Short sleep, direct OS call.
3492 //
3493 // ms = 0, means allow others (if any) to run.
3494 //
3495 void os::naked_short_sleep(jlong ms) {
3496   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3497   Sleep(ms);
3498 }
3499 
3500 // Windows does not provide sleep functionality with nanosecond resolution, so we
3501 // try to approximate this with spinning combined with yielding if another thread
3502 // is ready to run on the current processor.
3503 void os::naked_short_nanosleep(jlong ns) {
3504   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3505 
3506   int64_t start = os::javaTimeNanos();
3507   do {
3508     if (SwitchToThread() == 0) {
3509       // Nothing else is ready to run on this cpu, spin a little
3510       SpinPause();
3511     }
3512   } while (os::javaTimeNanos() - start < ns);
3513 }
3514 
3515 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3516 void os::infinite_sleep() {
3517   while (true) {    // sleep forever ...
3518     Sleep(100000);  // ... 100 seconds at a time
3519   }
3520 }
3521 
3522 typedef BOOL (WINAPI * STTSignature)(void);
3523 
3524 void os::naked_yield() {
3525   // Consider passing back the return value from SwitchToThread().
3526   SwitchToThread();
3527 }
3528 
3529 // Win32 only gives you access to seven real priorities at a time,
3530 // so we compress Java's ten down to seven.  It would be better
3531 // if we dynamically adjusted relative priorities.
3532 
3533 int os::java_to_os_priority[CriticalPriority + 1] = {
3534   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3535   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3536   THREAD_PRIORITY_LOWEST,                       // 2
3537   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3538   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3539   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3540   THREAD_PRIORITY_NORMAL,                       // 6
3541   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3542   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3543   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3544   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3545   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3546 };
3547 
3548 int prio_policy1[CriticalPriority + 1] = {
3549   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3550   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3551   THREAD_PRIORITY_LOWEST,                       // 2
3552   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3553   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3554   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3555   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3556   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3557   THREAD_PRIORITY_HIGHEST,                      // 8
3558   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3559   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3560   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3561 };
3562 
3563 static int prio_init() {
3564   // If ThreadPriorityPolicy is 1, switch tables
3565   if (ThreadPriorityPolicy == 1) {
3566     int i;
3567     for (i = 0; i < CriticalPriority + 1; i++) {
3568       os::java_to_os_priority[i] = prio_policy1[i];
3569     }
3570   }
3571   if (UseCriticalJavaThreadPriority) {
3572     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3573   }
3574   return 0;
3575 }
3576 
3577 OSReturn os::set_native_priority(Thread* thread, int priority) {
3578   if (!UseThreadPriorities) return OS_OK;
3579   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3580   return ret ? OS_OK : OS_ERR;
3581 }
3582 
3583 OSReturn os::get_native_priority(const Thread* const thread,
3584                                  int* priority_ptr) {
3585   if (!UseThreadPriorities) {
3586     *priority_ptr = java_to_os_priority[NormPriority];
3587     return OS_OK;
3588   }
3589   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3590   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3591     assert(false, "GetThreadPriority failed");
3592     return OS_ERR;
3593   }
3594   *priority_ptr = os_prio;
3595   return OS_OK;
3596 }
3597 
3598 void os::interrupt(Thread* thread) {
3599   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
3600 
3601   OSThread* osthread = thread->osthread();
3602   osthread->set_interrupted(true);
3603   // More than one thread can get here with the same value of osthread,
3604   // resulting in multiple notifications.  We do, however, want the store
3605   // to interrupted() to be visible to other threads before we post
3606   // the interrupt event.
3607   OrderAccess::release();
3608   SetEvent(osthread->interrupt_event());
3609   // For JSR166:  unpark after setting status
3610   if (thread->is_Java_thread()) {
3611     ((JavaThread*)thread)->parker()->unpark();
3612   }
3613 
3614   ParkEvent * ev = thread->_ParkEvent;
3615   if (ev != NULL) ev->unpark();
3616 
3617   ev = thread->_SleepEvent;
3618   if (ev != NULL) ev->unpark();
3619 }
3620 
3621 
3622 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3623   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
3624 
3625   OSThread* osthread = thread->osthread();
3626   // There is no synchronization between the setting of the interrupt
3627   // and it being cleared here. It is critical - see 6535709 - that
3628   // we only clear the interrupt state, and reset the interrupt event,
3629   // if we are going to report that we were indeed interrupted - else
3630   // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3631   // depending on the timing. By checking thread interrupt event to see
3632   // if the thread gets real interrupt thus prevent spurious wakeup.
3633   bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3634   if (interrupted && clear_interrupted) {
3635     osthread->set_interrupted(false);
3636     ResetEvent(osthread->interrupt_event());
3637   } // Otherwise leave the interrupted state alone
3638 
3639   return interrupted;
3640 }
3641 
3642 // GetCurrentThreadId() returns DWORD
3643 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3644 
3645 static int _initial_pid = 0;
3646 
3647 int os::current_process_id() {
3648   return (_initial_pid ? _initial_pid : _getpid());
3649 }
3650 
3651 int    os::win32::_vm_page_size              = 0;
3652 int    os::win32::_vm_allocation_granularity = 0;
3653 int    os::win32::_processor_type            = 0;
3654 // Processor level is not available on non-NT systems, use vm_version instead
3655 int    os::win32::_processor_level           = 0;
3656 julong os::win32::_physical_memory           = 0;
3657 size_t os::win32::_default_stack_size        = 0;
3658 
3659 intx          os::win32::_os_thread_limit    = 0;
3660 volatile intx os::win32::_os_thread_count    = 0;
3661 
3662 bool   os::win32::_is_windows_server         = false;
3663 
3664 // 6573254
3665 // Currently, the bug is observed across all the supported Windows releases,
3666 // including the latest one (as of this writing - Windows Server 2012 R2)
3667 bool   os::win32::_has_exit_bug              = true;
3668 
3669 void os::win32::initialize_system_info() {
3670   SYSTEM_INFO si;
3671   GetSystemInfo(&si);
3672   _vm_page_size    = si.dwPageSize;
3673   _vm_allocation_granularity = si.dwAllocationGranularity;
3674   _processor_type  = si.dwProcessorType;
3675   _processor_level = si.wProcessorLevel;
3676   set_processor_count(si.dwNumberOfProcessors);
3677 
3678   MEMORYSTATUSEX ms;
3679   ms.dwLength = sizeof(ms);
3680 
3681   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3682   // dwMemoryLoad (% of memory in use)
3683   GlobalMemoryStatusEx(&ms);
3684   _physical_memory = ms.ullTotalPhys;
3685 
3686   if (FLAG_IS_DEFAULT(MaxRAM)) {
3687     // Adjust MaxRAM according to the maximum virtual address space available.
3688     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3689   }
3690 
3691   OSVERSIONINFOEX oi;
3692   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3693   GetVersionEx((OSVERSIONINFO*)&oi);
3694   switch (oi.dwPlatformId) {
3695   case VER_PLATFORM_WIN32_NT:
3696     {
3697       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3698       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3699           oi.wProductType == VER_NT_SERVER) {
3700         _is_windows_server = true;
3701       }
3702     }
3703     break;
3704   default: fatal("Unknown platform");
3705   }
3706 
3707   _default_stack_size = os::current_stack_size();
3708   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3709   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3710          "stack size not a multiple of page size");
3711 
3712   initialize_performance_counter();
3713 }
3714 
3715 
3716 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3717                                       int ebuflen) {
3718   char path[MAX_PATH];
3719   DWORD size;
3720   DWORD pathLen = (DWORD)sizeof(path);
3721   HINSTANCE result = NULL;
3722 
3723   // only allow library name without path component
3724   assert(strchr(name, '\\') == NULL, "path not allowed");
3725   assert(strchr(name, ':') == NULL, "path not allowed");
3726   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3727     jio_snprintf(ebuf, ebuflen,
3728                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3729     return NULL;
3730   }
3731 
3732   // search system directory
3733   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3734     if (size >= pathLen) {
3735       return NULL; // truncated
3736     }
3737     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3738       return NULL; // truncated
3739     }
3740     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3741       return result;
3742     }
3743   }
3744 
3745   // try Windows directory
3746   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3747     if (size >= pathLen) {
3748       return NULL; // truncated
3749     }
3750     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3751       return NULL; // truncated
3752     }
3753     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3754       return result;
3755     }
3756   }
3757 
3758   jio_snprintf(ebuf, ebuflen,
3759                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3760   return NULL;
3761 }
3762 
3763 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3764 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3765 
3766 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3767   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3768   return TRUE;
3769 }
3770 
3771 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3772   // Basic approach:
3773   //  - Each exiting thread registers its intent to exit and then does so.
3774   //  - A thread trying to terminate the process must wait for all
3775   //    threads currently exiting to complete their exit.
3776 
3777   if (os::win32::has_exit_bug()) {
3778     // The array holds handles of the threads that have started exiting by calling
3779     // _endthreadex().
3780     // Should be large enough to avoid blocking the exiting thread due to lack of
3781     // a free slot.
3782     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3783     static int handle_count = 0;
3784 
3785     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3786     static CRITICAL_SECTION crit_sect;
3787     static volatile DWORD process_exiting = 0;
3788     int i, j;
3789     DWORD res;
3790     HANDLE hproc, hthr;
3791 
3792     // We only attempt to register threads until a process exiting
3793     // thread manages to set the process_exiting flag. Any threads
3794     // that come through here after the process_exiting flag is set
3795     // are unregistered and will be caught in the SuspendThread()
3796     // infinite loop below.
3797     bool registered = false;
3798 
3799     // The first thread that reached this point, initializes the critical section.
3800     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3801       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3802     } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3803       if (what != EPT_THREAD) {
3804         // Atomically set process_exiting before the critical section
3805         // to increase the visibility between racing threads.
3806         Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0);
3807       }
3808       EnterCriticalSection(&crit_sect);
3809 
3810       if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3811         // Remove from the array those handles of the threads that have completed exiting.
3812         for (i = 0, j = 0; i < handle_count; ++i) {
3813           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3814           if (res == WAIT_TIMEOUT) {
3815             handles[j++] = handles[i];
3816           } else {
3817             if (res == WAIT_FAILED) {
3818               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3819                       GetLastError(), __FILE__, __LINE__);
3820             }
3821             // Don't keep the handle, if we failed waiting for it.
3822             CloseHandle(handles[i]);
3823           }
3824         }
3825 
3826         // If there's no free slot in the array of the kept handles, we'll have to
3827         // wait until at least one thread completes exiting.
3828         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3829           // Raise the priority of the oldest exiting thread to increase its chances
3830           // to complete sooner.
3831           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3832           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3833           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3834             i = (res - WAIT_OBJECT_0);
3835             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3836             for (; i < handle_count; ++i) {
3837               handles[i] = handles[i + 1];
3838             }
3839           } else {
3840             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3841                     (res == WAIT_FAILED ? "failed" : "timed out"),
3842                     GetLastError(), __FILE__, __LINE__);
3843             // Don't keep handles, if we failed waiting for them.
3844             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3845               CloseHandle(handles[i]);
3846             }
3847             handle_count = 0;
3848           }
3849         }
3850 
3851         // Store a duplicate of the current thread handle in the array of handles.
3852         hproc = GetCurrentProcess();
3853         hthr = GetCurrentThread();
3854         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3855                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3856           warning("DuplicateHandle failed (%u) in %s: %d\n",
3857                   GetLastError(), __FILE__, __LINE__);
3858 
3859           // We can't register this thread (no more handles) so this thread
3860           // may be racing with a thread that is calling exit(). If the thread
3861           // that is calling exit() has managed to set the process_exiting
3862           // flag, then this thread will be caught in the SuspendThread()
3863           // infinite loop below which closes that race. A small timing
3864           // window remains before the process_exiting flag is set, but it
3865           // is only exposed when we are out of handles.
3866         } else {
3867           ++handle_count;
3868           registered = true;
3869 
3870           // The current exiting thread has stored its handle in the array, and now
3871           // should leave the critical section before calling _endthreadex().
3872         }
3873 
3874       } else if (what != EPT_THREAD && handle_count > 0) {
3875         jlong start_time, finish_time, timeout_left;
3876         // Before ending the process, make sure all the threads that had called
3877         // _endthreadex() completed.
3878 
3879         // Set the priority level of the current thread to the same value as
3880         // the priority level of exiting threads.
3881         // This is to ensure it will be given a fair chance to execute if
3882         // the timeout expires.
3883         hthr = GetCurrentThread();
3884         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3885         start_time = os::javaTimeNanos();
3886         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3887         for (i = 0; ; ) {
3888           int portion_count = handle_count - i;
3889           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3890             portion_count = MAXIMUM_WAIT_OBJECTS;
3891           }
3892           for (j = 0; j < portion_count; ++j) {
3893             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3894           }
3895           timeout_left = (finish_time - start_time) / 1000000L;
3896           if (timeout_left < 0) {
3897             timeout_left = 0;
3898           }
3899           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3900           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3901             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3902                     (res == WAIT_FAILED ? "failed" : "timed out"),
3903                     GetLastError(), __FILE__, __LINE__);
3904             // Reset portion_count so we close the remaining
3905             // handles due to this error.
3906             portion_count = handle_count - i;
3907           }
3908           for (j = 0; j < portion_count; ++j) {
3909             CloseHandle(handles[i + j]);
3910           }
3911           if ((i += portion_count) >= handle_count) {
3912             break;
3913           }
3914           start_time = os::javaTimeNanos();
3915         }
3916         handle_count = 0;
3917       }
3918 
3919       LeaveCriticalSection(&crit_sect);
3920     }
3921 
3922     if (!registered &&
3923         OrderAccess::load_acquire(&process_exiting) != 0 &&
3924         process_exiting != GetCurrentThreadId()) {
3925       // Some other thread is about to call exit(), so we don't let
3926       // the current unregistered thread proceed to exit() or _endthreadex()
3927       while (true) {
3928         SuspendThread(GetCurrentThread());
3929         // Avoid busy-wait loop, if SuspendThread() failed.
3930         Sleep(EXIT_TIMEOUT);
3931       }
3932     }
3933   }
3934 
3935   // We are here if either
3936   // - there's no 'race at exit' bug on this OS release;
3937   // - initialization of the critical section failed (unlikely);
3938   // - the current thread has registered itself and left the critical section;
3939   // - the process-exiting thread has raised the flag and left the critical section.
3940   if (what == EPT_THREAD) {
3941     _endthreadex((unsigned)exit_code);
3942   } else if (what == EPT_PROCESS) {
3943     ::exit(exit_code);
3944   } else {
3945     _exit(exit_code);
3946   }
3947 
3948   // Should not reach here
3949   return exit_code;
3950 }
3951 
3952 #undef EXIT_TIMEOUT
3953 
3954 void os::win32::setmode_streams() {
3955   _setmode(_fileno(stdin), _O_BINARY);
3956   _setmode(_fileno(stdout), _O_BINARY);
3957   _setmode(_fileno(stderr), _O_BINARY);
3958 }
3959 
3960 
3961 bool os::is_debugger_attached() {
3962   return IsDebuggerPresent() ? true : false;
3963 }
3964 
3965 
3966 void os::wait_for_keypress_at_exit(void) {
3967   if (PauseAtExit) {
3968     fprintf(stderr, "Press any key to continue...\n");
3969     fgetc(stdin);
3970   }
3971 }
3972 
3973 
3974 bool os::message_box(const char* title, const char* message) {
3975   int result = MessageBox(NULL, message, title,
3976                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3977   return result == IDYES;
3978 }
3979 
3980 #ifndef PRODUCT
3981 #ifndef _WIN64
3982 // Helpers to check whether NX protection is enabled
3983 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3984   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3985       pex->ExceptionRecord->NumberParameters > 0 &&
3986       pex->ExceptionRecord->ExceptionInformation[0] ==
3987       EXCEPTION_INFO_EXEC_VIOLATION) {
3988     return EXCEPTION_EXECUTE_HANDLER;
3989   }
3990   return EXCEPTION_CONTINUE_SEARCH;
3991 }
3992 
3993 void nx_check_protection() {
3994   // If NX is enabled we'll get an exception calling into code on the stack
3995   char code[] = { (char)0xC3 }; // ret
3996   void *code_ptr = (void *)code;
3997   __try {
3998     __asm call code_ptr
3999   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4000     tty->print_raw_cr("NX protection detected.");
4001   }
4002 }
4003 #endif // _WIN64
4004 #endif // PRODUCT
4005 
4006 // This is called _before_ the global arguments have been parsed
4007 void os::init(void) {
4008   _initial_pid = _getpid();
4009 
4010   init_random(1234567);
4011 
4012   win32::initialize_system_info();
4013   win32::setmode_streams();
4014   init_page_sizes((size_t) win32::vm_page_size());
4015 
4016   // This may be overridden later when argument processing is done.
4017   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4018 
4019   // Initialize main_process and main_thread
4020   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4021   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4022                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4023     fatal("DuplicateHandle failed\n");
4024   }
4025   main_thread_id = (int) GetCurrentThreadId();
4026 
4027   // initialize fast thread access - only used for 32-bit
4028   win32::initialize_thread_ptr_offset();
4029 }
4030 
4031 // To install functions for atexit processing
4032 extern "C" {
4033   static void perfMemory_exit_helper() {
4034     perfMemory_exit();
4035   }
4036 }
4037 
4038 static jint initSock();
4039 
4040 // this is called _after_ the global arguments have been parsed
4041 jint os::init_2(void) {
4042 
4043   // This could be set any time but all platforms
4044   // have to set it the same so we have to mirror Solaris.
4045   DEBUG_ONLY(os::set_mutex_init_done();)
4046 
4047   // Setup Windows Exceptions
4048 
4049 #if INCLUDE_AOT
4050   // If AOT is enabled we need to install a vectored exception handler
4051   // in order to forward implicit exceptions from code in AOT
4052   // generated DLLs.  This is necessary since these DLLs are not
4053   // registered for structured exceptions like codecache methods are.
4054   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4055     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4056   }
4057 #endif
4058 
4059   // for debugging float code generation bugs
4060   if (ForceFloatExceptions) {
4061 #ifndef  _WIN64
4062     static long fp_control_word = 0;
4063     __asm { fstcw fp_control_word }
4064     // see Intel PPro Manual, Vol. 2, p 7-16
4065     const long precision = 0x20;
4066     const long underflow = 0x10;
4067     const long overflow  = 0x08;
4068     const long zero_div  = 0x04;
4069     const long denorm    = 0x02;
4070     const long invalid   = 0x01;
4071     fp_control_word |= invalid;
4072     __asm { fldcw fp_control_word }
4073 #endif
4074   }
4075 
4076   // If stack_commit_size is 0, windows will reserve the default size,
4077   // but only commit a small portion of it.
4078   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4079   size_t default_reserve_size = os::win32::default_stack_size();
4080   size_t actual_reserve_size = stack_commit_size;
4081   if (stack_commit_size < default_reserve_size) {
4082     // If stack_commit_size == 0, we want this too
4083     actual_reserve_size = default_reserve_size;
4084   }
4085 
4086   // Check minimum allowable stack size for thread creation and to initialize
4087   // the java system classes, including StackOverflowError - depends on page
4088   // size.  Add two 4K pages for compiler2 recursion in main thread.
4089   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4090   // class initialization depending on 32 or 64 bit VM.
4091   size_t min_stack_allowed =
4092             (size_t)(JavaThread::stack_guard_zone_size() +
4093                      JavaThread::stack_shadow_zone_size() +
4094                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4095 
4096   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4097 
4098   if (actual_reserve_size < min_stack_allowed) {
4099     tty->print_cr("\nThe Java thread stack size specified is too small. "
4100                   "Specify at least %dk",
4101                   min_stack_allowed / K);
4102     return JNI_ERR;
4103   }
4104 
4105   JavaThread::set_stack_size_at_create(stack_commit_size);
4106 
4107   // Calculate theoretical max. size of Threads to guard gainst artifical
4108   // out-of-memory situations, where all available address-space has been
4109   // reserved by thread stacks.
4110   assert(actual_reserve_size != 0, "Must have a stack");
4111 
4112   // Calculate the thread limit when we should start doing Virtual Memory
4113   // banging. Currently when the threads will have used all but 200Mb of space.
4114   //
4115   // TODO: consider performing a similar calculation for commit size instead
4116   // as reserve size, since on a 64-bit platform we'll run into that more
4117   // often than running out of virtual memory space.  We can use the
4118   // lower value of the two calculations as the os_thread_limit.
4119   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4120   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4121 
4122   // at exit methods are called in the reverse order of their registration.
4123   // there is no limit to the number of functions registered. atexit does
4124   // not set errno.
4125 
4126   if (PerfAllowAtExitRegistration) {
4127     // only register atexit functions if PerfAllowAtExitRegistration is set.
4128     // atexit functions can be delayed until process exit time, which
4129     // can be problematic for embedded VM situations. Embedded VMs should
4130     // call DestroyJavaVM() to assure that VM resources are released.
4131 
4132     // note: perfMemory_exit_helper atexit function may be removed in
4133     // the future if the appropriate cleanup code can be added to the
4134     // VM_Exit VMOperation's doit method.
4135     if (atexit(perfMemory_exit_helper) != 0) {
4136       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4137     }
4138   }
4139 
4140 #ifndef _WIN64
4141   // Print something if NX is enabled (win32 on AMD64)
4142   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4143 #endif
4144 
4145   // initialize thread priority policy
4146   prio_init();
4147 
4148   if (UseNUMA && !ForceNUMA) {
4149     UseNUMA = false; // We don't fully support this yet
4150   }
4151 
4152   if (UseNUMAInterleaving) {
4153     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4154     bool success = numa_interleaving_init();
4155     if (!success) UseNUMAInterleaving = false;
4156   }
4157 
4158   if (initSock() != JNI_OK) {
4159     return JNI_ERR;
4160   }
4161 
4162   SymbolEngine::recalc_search_path();
4163 
4164   // Initialize data for jdk.internal.misc.Signal
4165   if (!ReduceSignalUsage) {
4166     jdk_misc_signal_init();
4167   }
4168 
4169   return JNI_OK;
4170 }
4171 
4172 // Mark the polling page as unreadable
4173 void os::make_polling_page_unreadable(void) {
4174   DWORD old_status;
4175   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4176                       PAGE_NOACCESS, &old_status)) {
4177     fatal("Could not disable polling page");
4178   }
4179 }
4180 
4181 // Mark the polling page as readable
4182 void os::make_polling_page_readable(void) {
4183   DWORD old_status;
4184   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4185                       PAGE_READONLY, &old_status)) {
4186     fatal("Could not enable polling page");
4187   }
4188 }
4189 
4190 // combine the high and low DWORD into a ULONGLONG
4191 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4192   ULONGLONG value = high_word;
4193   value <<= sizeof(high_word) * 8;
4194   value |= low_word;
4195   return value;
4196 }
4197 
4198 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4199 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4200   ::memset((void*)sbuf, 0, sizeof(struct stat));
4201   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4202   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4203                                   file_data.ftLastWriteTime.dwLowDateTime);
4204   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4205                                   file_data.ftCreationTime.dwLowDateTime);
4206   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4207                                   file_data.ftLastAccessTime.dwLowDateTime);
4208   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4209     sbuf->st_mode |= S_IFDIR;
4210   } else {
4211     sbuf->st_mode |= S_IFREG;
4212   }
4213 }
4214 
4215 // The following function is adapted from java.base/windows/native/libjava/canonicalize_md.c
4216 // Creates an UNC path from a single byte path. Return buffer is
4217 // allocated in C heap and needs to be freed by the caller.
4218 // Returns NULL on error.
4219 static wchar_t* create_unc_path(const char* path, errno_t &err) {
4220   wchar_t* wpath = NULL;
4221   size_t converted_chars = 0;
4222   size_t path_len = strlen(path) + 1; // includes the terminating NULL
4223   if (path[0] == '\\' && path[1] == '\\') {
4224     if (path[2] == '?' && path[3] == '\\'){
4225       // if it already has a \\?\ don't do the prefix
4226       wpath = (wchar_t*)os::malloc(path_len * sizeof(wchar_t), mtInternal);
4227       if (wpath != NULL) {
4228         err = ::mbstowcs_s(&converted_chars, wpath, path_len, path, path_len);
4229       } else {
4230         err = ENOMEM;
4231       }
4232     } else {
4233       // only UNC pathname includes double slashes here
4234       wpath = (wchar_t*)os::malloc((path_len + 7) * sizeof(wchar_t), mtInternal);
4235       if (wpath != NULL) {
4236         ::wcscpy(wpath, L"\\\\?\\UNC\0");
4237         err = ::mbstowcs_s(&converted_chars, &wpath[7], path_len, path, path_len);
4238       } else {
4239         err = ENOMEM;
4240       }
4241     }
4242   } else {
4243     wpath = (wchar_t*)os::malloc((path_len + 4) * sizeof(wchar_t), mtInternal);
4244     if (wpath != NULL) {
4245       ::wcscpy(wpath, L"\\\\?\\\0");
4246       err = ::mbstowcs_s(&converted_chars, &wpath[4], path_len, path, path_len);
4247     } else {
4248       err = ENOMEM;
4249     }
4250   }
4251   return wpath;
4252 }
4253 
4254 static void destroy_unc_path(wchar_t* wpath) {
4255   os::free(wpath);
4256 }
4257 
4258 int os::stat(const char *path, struct stat *sbuf) {
4259   char* pathbuf = (char*)os::strdup(path, mtInternal);
4260   if (pathbuf == NULL) {
4261     errno = ENOMEM;
4262     return -1;
4263   }
4264   os::native_path(pathbuf);
4265   int ret;
4266   WIN32_FILE_ATTRIBUTE_DATA file_data;
4267   // Not using stat() to avoid the problem described in JDK-6539723
4268   if (strlen(path) < MAX_PATH) {
4269     BOOL bret = ::GetFileAttributesExA(pathbuf, GetFileExInfoStandard, &file_data);
4270     if (!bret) {
4271       errno = ::GetLastError();
4272       ret = -1;
4273     }
4274     else {
4275       file_attribute_data_to_stat(sbuf, file_data);
4276       ret = 0;
4277     }
4278   } else {
4279     errno_t err = ERROR_SUCCESS;
4280     wchar_t* wpath = create_unc_path(pathbuf, err);
4281     if (err != ERROR_SUCCESS) {
4282       if (wpath != NULL) {
4283         destroy_unc_path(wpath);
4284       }
4285       os::free(pathbuf);
4286       errno = err;
4287       return -1;
4288     }
4289     BOOL bret = ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &file_data);
4290     if (!bret) {
4291       errno = ::GetLastError();
4292       ret = -1;
4293     } else {
4294       file_attribute_data_to_stat(sbuf, file_data);
4295       ret = 0;
4296     }
4297     destroy_unc_path(wpath);
4298   }
4299   os::free(pathbuf);
4300   return ret;
4301 }
4302 
4303 static HANDLE create_read_only_file_handle(const char* file) {
4304   if (file == NULL) {
4305     return INVALID_HANDLE_VALUE;
4306   }
4307 
4308   char* nativepath = (char*)os::strdup(file, mtInternal);
4309   if (nativepath == NULL) {
4310     errno = ENOMEM;
4311     return INVALID_HANDLE_VALUE;
4312   }
4313   os::native_path(nativepath);
4314 
4315   size_t len = strlen(nativepath);
4316   HANDLE handle = INVALID_HANDLE_VALUE;
4317 
4318   if (len < MAX_PATH) {
4319     handle = ::CreateFile(nativepath, 0, FILE_SHARE_READ,
4320                           NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4321   } else {
4322     errno_t err = ERROR_SUCCESS;
4323     wchar_t* wfile = create_unc_path(nativepath, err);
4324     if (err != ERROR_SUCCESS) {
4325       if (wfile != NULL) {
4326         destroy_unc_path(wfile);
4327       }
4328       os::free(nativepath);
4329       return INVALID_HANDLE_VALUE;
4330     }
4331     handle = ::CreateFileW(wfile, 0, FILE_SHARE_READ,
4332                            NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4333     destroy_unc_path(wfile);
4334   }
4335 
4336   os::free(nativepath);
4337   return handle;
4338 }
4339 
4340 bool os::same_files(const char* file1, const char* file2) {
4341 
4342   if (file1 == NULL && file2 == NULL) {
4343     return true;
4344   }
4345 
4346   if (file1 == NULL || file2 == NULL) {
4347     return false;
4348   }
4349 
4350   if (strcmp(file1, file2) == 0) {
4351     return true;
4352   }
4353 
4354   HANDLE handle1 = create_read_only_file_handle(file1);
4355   HANDLE handle2 = create_read_only_file_handle(file2);
4356   bool result = false;
4357 
4358   // if we could open both paths...
4359   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4360     BY_HANDLE_FILE_INFORMATION fileInfo1;
4361     BY_HANDLE_FILE_INFORMATION fileInfo2;
4362     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4363       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4364       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4365       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4366         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4367         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4368         result = true;
4369       }
4370     }
4371   }
4372 
4373   //free the handles
4374   if (handle1 != INVALID_HANDLE_VALUE) {
4375     ::CloseHandle(handle1);
4376   }
4377 
4378   if (handle2 != INVALID_HANDLE_VALUE) {
4379     ::CloseHandle(handle2);
4380   }
4381 
4382   return result;
4383 }
4384 
4385 
4386 #define FT2INT64(ft) \
4387   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4388 
4389 
4390 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4391 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4392 // of a thread.
4393 //
4394 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4395 // the fast estimate available on the platform.
4396 
4397 // current_thread_cpu_time() is not optimized for Windows yet
4398 jlong os::current_thread_cpu_time() {
4399   // return user + sys since the cost is the same
4400   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4401 }
4402 
4403 jlong os::thread_cpu_time(Thread* thread) {
4404   // consistent with what current_thread_cpu_time() returns.
4405   return os::thread_cpu_time(thread, true /* user+sys */);
4406 }
4407 
4408 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4409   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4410 }
4411 
4412 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4413   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4414   // If this function changes, os::is_thread_cpu_time_supported() should too
4415   FILETIME CreationTime;
4416   FILETIME ExitTime;
4417   FILETIME KernelTime;
4418   FILETIME UserTime;
4419 
4420   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4421                       &ExitTime, &KernelTime, &UserTime) == 0) {
4422     return -1;
4423   } else if (user_sys_cpu_time) {
4424     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4425   } else {
4426     return FT2INT64(UserTime) * 100;
4427   }
4428 }
4429 
4430 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4431   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4432   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4433   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4434   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4435 }
4436 
4437 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4438   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4439   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4440   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4441   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4442 }
4443 
4444 bool os::is_thread_cpu_time_supported() {
4445   // see os::thread_cpu_time
4446   FILETIME CreationTime;
4447   FILETIME ExitTime;
4448   FILETIME KernelTime;
4449   FILETIME UserTime;
4450 
4451   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4452                       &KernelTime, &UserTime) == 0) {
4453     return false;
4454   } else {
4455     return true;
4456   }
4457 }
4458 
4459 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4460 // It does have primitives (PDH API) to get CPU usage and run queue length.
4461 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4462 // If we wanted to implement loadavg on Windows, we have a few options:
4463 //
4464 // a) Query CPU usage and run queue length and "fake" an answer by
4465 //    returning the CPU usage if it's under 100%, and the run queue
4466 //    length otherwise.  It turns out that querying is pretty slow
4467 //    on Windows, on the order of 200 microseconds on a fast machine.
4468 //    Note that on the Windows the CPU usage value is the % usage
4469 //    since the last time the API was called (and the first call
4470 //    returns 100%), so we'd have to deal with that as well.
4471 //
4472 // b) Sample the "fake" answer using a sampling thread and store
4473 //    the answer in a global variable.  The call to loadavg would
4474 //    just return the value of the global, avoiding the slow query.
4475 //
4476 // c) Sample a better answer using exponential decay to smooth the
4477 //    value.  This is basically the algorithm used by UNIX kernels.
4478 //
4479 // Note that sampling thread starvation could affect both (b) and (c).
4480 int os::loadavg(double loadavg[], int nelem) {
4481   return -1;
4482 }
4483 
4484 
4485 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4486 bool os::dont_yield() {
4487   return DontYieldALot;
4488 }
4489 
4490 // This method is a slightly reworked copy of JDK's sysOpen
4491 // from src/windows/hpi/src/sys_api_md.c
4492 
4493 int os::open(const char *path, int oflag, int mode) {
4494   char* pathbuf = (char*)os::strdup(path, mtInternal);
4495   if (pathbuf == NULL) {
4496     errno = ENOMEM;
4497     return -1;
4498   }
4499   os::native_path(pathbuf);
4500   int ret;
4501   if (strlen(path) < MAX_PATH) {
4502     ret = ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4503   } else {
4504     errno_t err = ERROR_SUCCESS;
4505     wchar_t* wpath = create_unc_path(pathbuf, err);
4506     if (err != ERROR_SUCCESS) {
4507       if (wpath != NULL) {
4508         destroy_unc_path(wpath);
4509       }
4510       os::free(pathbuf);
4511       errno = err;
4512       return -1;
4513     }
4514     ret = ::_wopen(wpath, oflag | O_BINARY | O_NOINHERIT, mode);
4515     if (ret == -1) {
4516       errno = ::GetLastError();
4517     }
4518     destroy_unc_path(wpath);
4519   }
4520   os::free(pathbuf);
4521   return ret;
4522 }
4523 
4524 FILE* os::open(int fd, const char* mode) {
4525   return ::_fdopen(fd, mode);
4526 }
4527 
4528 // Is a (classpath) directory empty?
4529 bool os::dir_is_empty(const char* path) {
4530   char* search_path = (char*)os::malloc(strlen(path) + 3, mtInternal);
4531   if (search_path == NULL) {
4532     errno = ENOMEM;
4533     return false;
4534   }
4535   strcpy(search_path, path);
4536   os::native_path(search_path);
4537   // Append "*", or possibly "\\*", to path
4538   if (search_path[1] == ':' &&
4539        (search_path[2] == '\0' ||
4540          (search_path[2] == '\\' && search_path[3] == '\0'))) {
4541     // No '\\' needed for cases like "Z:" or "Z:\"
4542     strcat(search_path, "*");
4543   }
4544   else {
4545     strcat(search_path, "\\*");
4546   }
4547   errno_t err = ERROR_SUCCESS;
4548   wchar_t* wpath = create_unc_path(search_path, err);
4549   if (err != ERROR_SUCCESS) {
4550     if (wpath != NULL) {
4551       destroy_unc_path(wpath);
4552     }
4553     os::free(search_path);
4554     errno = err;
4555     return false;
4556   }
4557   WIN32_FIND_DATAW fd;
4558   HANDLE f = ::FindFirstFileW(wpath, &fd);
4559   destroy_unc_path(wpath);
4560   bool is_empty = true;
4561   if (f != INVALID_HANDLE_VALUE) {
4562     while (is_empty && ::FindNextFileW(f, &fd)) {
4563       // An empty directory contains only the current directory file
4564       // and the previous directory file.
4565       if ((wcscmp(fd.cFileName, L".") != 0) &&
4566           (wcscmp(fd.cFileName, L"..") != 0)) {
4567         is_empty = false;
4568       }
4569     }
4570     FindClose(f);
4571   }
4572   os::free(search_path);
4573   return is_empty;
4574 }
4575 
4576 // create binary file, rewriting existing file if required
4577 int os::create_binary_file(const char* path, bool rewrite_existing) {
4578   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4579   if (!rewrite_existing) {
4580     oflags |= _O_EXCL;
4581   }
4582   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4583 }
4584 
4585 // return current position of file pointer
4586 jlong os::current_file_offset(int fd) {
4587   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4588 }
4589 
4590 // move file pointer to the specified offset
4591 jlong os::seek_to_file_offset(int fd, jlong offset) {
4592   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4593 }
4594 
4595 
4596 jlong os::lseek(int fd, jlong offset, int whence) {
4597   return (jlong) ::_lseeki64(fd, offset, whence);
4598 }
4599 
4600 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4601   OVERLAPPED ov;
4602   DWORD nread;
4603   BOOL result;
4604 
4605   ZeroMemory(&ov, sizeof(ov));
4606   ov.Offset = (DWORD)offset;
4607   ov.OffsetHigh = (DWORD)(offset >> 32);
4608 
4609   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4610 
4611   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4612 
4613   return result ? nread : 0;
4614 }
4615 
4616 
4617 // This method is a slightly reworked copy of JDK's sysNativePath
4618 // from src/windows/hpi/src/path_md.c
4619 
4620 // Convert a pathname to native format.  On win32, this involves forcing all
4621 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4622 // sometimes rejects '/') and removing redundant separators.  The input path is
4623 // assumed to have been converted into the character encoding used by the local
4624 // system.  Because this might be a double-byte encoding, care is taken to
4625 // treat double-byte lead characters correctly.
4626 //
4627 // This procedure modifies the given path in place, as the result is never
4628 // longer than the original.  There is no error return; this operation always
4629 // succeeds.
4630 char * os::native_path(char *path) {
4631   char *src = path, *dst = path, *end = path;
4632   char *colon = NULL;  // If a drive specifier is found, this will
4633                        // point to the colon following the drive letter
4634 
4635   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4636   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4637           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4638 
4639   // Check for leading separators
4640 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4641   while (isfilesep(*src)) {
4642     src++;
4643   }
4644 
4645   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4646     // Remove leading separators if followed by drive specifier.  This
4647     // hack is necessary to support file URLs containing drive
4648     // specifiers (e.g., "file://c:/path").  As a side effect,
4649     // "/c:/path" can be used as an alternative to "c:/path".
4650     *dst++ = *src++;
4651     colon = dst;
4652     *dst++ = ':';
4653     src++;
4654   } else {
4655     src = path;
4656     if (isfilesep(src[0]) && isfilesep(src[1])) {
4657       // UNC pathname: Retain first separator; leave src pointed at
4658       // second separator so that further separators will be collapsed
4659       // into the second separator.  The result will be a pathname
4660       // beginning with "\\\\" followed (most likely) by a host name.
4661       src = dst = path + 1;
4662       path[0] = '\\';     // Force first separator to '\\'
4663     }
4664   }
4665 
4666   end = dst;
4667 
4668   // Remove redundant separators from remainder of path, forcing all
4669   // separators to be '\\' rather than '/'. Also, single byte space
4670   // characters are removed from the end of the path because those
4671   // are not legal ending characters on this operating system.
4672   //
4673   while (*src != '\0') {
4674     if (isfilesep(*src)) {
4675       *dst++ = '\\'; src++;
4676       while (isfilesep(*src)) src++;
4677       if (*src == '\0') {
4678         // Check for trailing separator
4679         end = dst;
4680         if (colon == dst - 2) break;  // "z:\\"
4681         if (dst == path + 1) break;   // "\\"
4682         if (dst == path + 2 && isfilesep(path[0])) {
4683           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4684           // beginning of a UNC pathname.  Even though it is not, by
4685           // itself, a valid UNC pathname, we leave it as is in order
4686           // to be consistent with the path canonicalizer as well
4687           // as the win32 APIs, which treat this case as an invalid
4688           // UNC pathname rather than as an alias for the root
4689           // directory of the current drive.
4690           break;
4691         }
4692         end = --dst;  // Path does not denote a root directory, so
4693                       // remove trailing separator
4694         break;
4695       }
4696       end = dst;
4697     } else {
4698       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4699         *dst++ = *src++;
4700         if (*src) *dst++ = *src++;
4701         end = dst;
4702       } else {  // Copy a single-byte character
4703         char c = *src++;
4704         *dst++ = c;
4705         // Space is not a legal ending character
4706         if (c != ' ') end = dst;
4707       }
4708     }
4709   }
4710 
4711   *end = '\0';
4712 
4713   // For "z:", add "." to work around a bug in the C runtime library
4714   if (colon == dst - 1) {
4715     path[2] = '.';
4716     path[3] = '\0';
4717   }
4718 
4719   return path;
4720 }
4721 
4722 // This code is a copy of JDK's sysSetLength
4723 // from src/windows/hpi/src/sys_api_md.c
4724 
4725 int os::ftruncate(int fd, jlong length) {
4726   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4727   long high = (long)(length >> 32);
4728   DWORD ret;
4729 
4730   if (h == (HANDLE)(-1)) {
4731     return -1;
4732   }
4733 
4734   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4735   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4736     return -1;
4737   }
4738 
4739   if (::SetEndOfFile(h) == FALSE) {
4740     return -1;
4741   }
4742 
4743   return 0;
4744 }
4745 
4746 int os::get_fileno(FILE* fp) {
4747   return _fileno(fp);
4748 }
4749 
4750 // This code is a copy of JDK's sysSync
4751 // from src/windows/hpi/src/sys_api_md.c
4752 // except for the legacy workaround for a bug in Win 98
4753 
4754 int os::fsync(int fd) {
4755   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4756 
4757   if ((!::FlushFileBuffers(handle)) &&
4758       (GetLastError() != ERROR_ACCESS_DENIED)) {
4759     // from winerror.h
4760     return -1;
4761   }
4762   return 0;
4763 }
4764 
4765 static int nonSeekAvailable(int, long *);
4766 static int stdinAvailable(int, long *);
4767 
4768 // This code is a copy of JDK's sysAvailable
4769 // from src/windows/hpi/src/sys_api_md.c
4770 
4771 int os::available(int fd, jlong *bytes) {
4772   jlong cur, end;
4773   struct _stati64 stbuf64;
4774 
4775   if (::_fstati64(fd, &stbuf64) >= 0) {
4776     int mode = stbuf64.st_mode;
4777     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4778       int ret;
4779       long lpbytes;
4780       if (fd == 0) {
4781         ret = stdinAvailable(fd, &lpbytes);
4782       } else {
4783         ret = nonSeekAvailable(fd, &lpbytes);
4784       }
4785       (*bytes) = (jlong)(lpbytes);
4786       return ret;
4787     }
4788     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4789       return FALSE;
4790     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4791       return FALSE;
4792     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4793       return FALSE;
4794     }
4795     *bytes = end - cur;
4796     return TRUE;
4797   } else {
4798     return FALSE;
4799   }
4800 }
4801 
4802 void os::flockfile(FILE* fp) {
4803   _lock_file(fp);
4804 }
4805 
4806 void os::funlockfile(FILE* fp) {
4807   _unlock_file(fp);
4808 }
4809 
4810 // This code is a copy of JDK's nonSeekAvailable
4811 // from src/windows/hpi/src/sys_api_md.c
4812 
4813 static int nonSeekAvailable(int fd, long *pbytes) {
4814   // This is used for available on non-seekable devices
4815   // (like both named and anonymous pipes, such as pipes
4816   //  connected to an exec'd process).
4817   // Standard Input is a special case.
4818   HANDLE han;
4819 
4820   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4821     return FALSE;
4822   }
4823 
4824   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4825     // PeekNamedPipe fails when at EOF.  In that case we
4826     // simply make *pbytes = 0 which is consistent with the
4827     // behavior we get on Solaris when an fd is at EOF.
4828     // The only alternative is to raise an Exception,
4829     // which isn't really warranted.
4830     //
4831     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4832       return FALSE;
4833     }
4834     *pbytes = 0;
4835   }
4836   return TRUE;
4837 }
4838 
4839 #define MAX_INPUT_EVENTS 2000
4840 
4841 // This code is a copy of JDK's stdinAvailable
4842 // from src/windows/hpi/src/sys_api_md.c
4843 
4844 static int stdinAvailable(int fd, long *pbytes) {
4845   HANDLE han;
4846   DWORD numEventsRead = 0;  // Number of events read from buffer
4847   DWORD numEvents = 0;      // Number of events in buffer
4848   DWORD i = 0;              // Loop index
4849   DWORD curLength = 0;      // Position marker
4850   DWORD actualLength = 0;   // Number of bytes readable
4851   BOOL error = FALSE;       // Error holder
4852   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4853 
4854   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4855     return FALSE;
4856   }
4857 
4858   // Construct an array of input records in the console buffer
4859   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4860   if (error == 0) {
4861     return nonSeekAvailable(fd, pbytes);
4862   }
4863 
4864   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4865   if (numEvents > MAX_INPUT_EVENTS) {
4866     numEvents = MAX_INPUT_EVENTS;
4867   }
4868 
4869   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4870   if (lpBuffer == NULL) {
4871     return FALSE;
4872   }
4873 
4874   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4875   if (error == 0) {
4876     os::free(lpBuffer);
4877     return FALSE;
4878   }
4879 
4880   // Examine input records for the number of bytes available
4881   for (i=0; i<numEvents; i++) {
4882     if (lpBuffer[i].EventType == KEY_EVENT) {
4883 
4884       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4885                                       &(lpBuffer[i].Event);
4886       if (keyRecord->bKeyDown == TRUE) {
4887         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4888         curLength++;
4889         if (*keyPressed == '\r') {
4890           actualLength = curLength;
4891         }
4892       }
4893     }
4894   }
4895 
4896   if (lpBuffer != NULL) {
4897     os::free(lpBuffer);
4898   }
4899 
4900   *pbytes = (long) actualLength;
4901   return TRUE;
4902 }
4903 
4904 // Map a block of memory.
4905 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4906                         char *addr, size_t bytes, bool read_only,
4907                         bool allow_exec) {
4908   HANDLE hFile;
4909   char* base;
4910 
4911   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4912                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4913   if (hFile == NULL) {
4914     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4915     return NULL;
4916   }
4917 
4918   if (allow_exec) {
4919     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4920     // unless it comes from a PE image (which the shared archive is not.)
4921     // Even VirtualProtect refuses to give execute access to mapped memory
4922     // that was not previously executable.
4923     //
4924     // Instead, stick the executable region in anonymous memory.  Yuck.
4925     // Penalty is that ~4 pages will not be shareable - in the future
4926     // we might consider DLLizing the shared archive with a proper PE
4927     // header so that mapping executable + sharing is possible.
4928 
4929     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4930                                 PAGE_READWRITE);
4931     if (base == NULL) {
4932       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4933       CloseHandle(hFile);
4934       return NULL;
4935     }
4936 
4937     // Record virtual memory allocation
4938     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
4939 
4940     DWORD bytes_read;
4941     OVERLAPPED overlapped;
4942     overlapped.Offset = (DWORD)file_offset;
4943     overlapped.OffsetHigh = 0;
4944     overlapped.hEvent = NULL;
4945     // ReadFile guarantees that if the return value is true, the requested
4946     // number of bytes were read before returning.
4947     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4948     if (!res) {
4949       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4950       release_memory(base, bytes);
4951       CloseHandle(hFile);
4952       return NULL;
4953     }
4954   } else {
4955     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4956                                     NULL /* file_name */);
4957     if (hMap == NULL) {
4958       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4959       CloseHandle(hFile);
4960       return NULL;
4961     }
4962 
4963     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4964     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4965                                   (DWORD)bytes, addr);
4966     if (base == NULL) {
4967       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4968       CloseHandle(hMap);
4969       CloseHandle(hFile);
4970       return NULL;
4971     }
4972 
4973     if (CloseHandle(hMap) == 0) {
4974       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4975       CloseHandle(hFile);
4976       return base;
4977     }
4978   }
4979 
4980   if (allow_exec) {
4981     DWORD old_protect;
4982     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4983     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4984 
4985     if (!res) {
4986       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4987       // Don't consider this a hard error, on IA32 even if the
4988       // VirtualProtect fails, we should still be able to execute
4989       CloseHandle(hFile);
4990       return base;
4991     }
4992   }
4993 
4994   if (CloseHandle(hFile) == 0) {
4995     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4996     return base;
4997   }
4998 
4999   return base;
5000 }
5001 
5002 
5003 // Remap a block of memory.
5004 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5005                           char *addr, size_t bytes, bool read_only,
5006                           bool allow_exec) {
5007   // This OS does not allow existing memory maps to be remapped so we
5008   // would have to unmap the memory before we remap it.
5009 
5010   // Because there is a small window between unmapping memory and mapping
5011   // it in again with different protections, CDS archives are mapped RW
5012   // on windows, so this function isn't called.
5013   ShouldNotReachHere();
5014   return NULL;
5015 }
5016 
5017 
5018 // Unmap a block of memory.
5019 // Returns true=success, otherwise false.
5020 
5021 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5022   MEMORY_BASIC_INFORMATION mem_info;
5023   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5024     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5025     return false;
5026   }
5027 
5028   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5029   // Instead, executable region was allocated using VirtualAlloc(). See
5030   // pd_map_memory() above.
5031   //
5032   // The following flags should match the 'exec_access' flages used for
5033   // VirtualProtect() in pd_map_memory().
5034   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5035       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5036     return pd_release_memory(addr, bytes);
5037   }
5038 
5039   BOOL result = UnmapViewOfFile(addr);
5040   if (result == 0) {
5041     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
5042     return false;
5043   }
5044   return true;
5045 }
5046 
5047 void os::pause() {
5048   char filename[MAX_PATH];
5049   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5050     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5051   } else {
5052     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5053   }
5054 
5055   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5056   if (fd != -1) {
5057     struct stat buf;
5058     ::close(fd);
5059     while (::stat(filename, &buf) == 0) {
5060       Sleep(100);
5061     }
5062   } else {
5063     jio_fprintf(stderr,
5064                 "Could not open pause file '%s', continuing immediately.\n", filename);
5065   }
5066 }
5067 
5068 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5069 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5070 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5071 
5072 os::ThreadCrashProtection::ThreadCrashProtection() {
5073 }
5074 
5075 // See the caveats for this class in os_windows.hpp
5076 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5077 // into this method and returns false. If no OS EXCEPTION was raised, returns
5078 // true.
5079 // The callback is supposed to provide the method that should be protected.
5080 //
5081 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5082 
5083   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5084 
5085   _protected_thread = Thread::current_or_null();
5086   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5087 
5088   bool success = true;
5089   __try {
5090     _crash_protection = this;
5091     cb.call();
5092   } __except(EXCEPTION_EXECUTE_HANDLER) {
5093     // only for protection, nothing to do
5094     success = false;
5095   }
5096   _crash_protection = NULL;
5097   _protected_thread = NULL;
5098   Thread::muxRelease(&_crash_mux);
5099   return success;
5100 }
5101 
5102 
5103 class HighResolutionInterval : public CHeapObj<mtThread> {
5104   // The default timer resolution seems to be 10 milliseconds.
5105   // (Where is this written down?)
5106   // If someone wants to sleep for only a fraction of the default,
5107   // then we set the timer resolution down to 1 millisecond for
5108   // the duration of their interval.
5109   // We carefully set the resolution back, since otherwise we
5110   // seem to incur an overhead (3%?) that we don't need.
5111   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5112   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5113   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5114   // timeBeginPeriod() if the relative error exceeded some threshold.
5115   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5116   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5117   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5118   // resolution timers running.
5119  private:
5120   jlong resolution;
5121  public:
5122   HighResolutionInterval(jlong ms) {
5123     resolution = ms % 10L;
5124     if (resolution != 0) {
5125       MMRESULT result = timeBeginPeriod(1L);
5126     }
5127   }
5128   ~HighResolutionInterval() {
5129     if (resolution != 0) {
5130       MMRESULT result = timeEndPeriod(1L);
5131     }
5132     resolution = 0L;
5133   }
5134 };
5135 
5136 // An Event wraps a win32 "CreateEvent" kernel handle.
5137 //
5138 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5139 //
5140 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5141 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5142 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5143 //     In addition, an unpark() operation might fetch the handle field, but the
5144 //     event could recycle between the fetch and the SetEvent() operation.
5145 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5146 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5147 //     on an stale but recycled handle would be harmless, but in practice this might
5148 //     confuse other non-Sun code, so it's not a viable approach.
5149 //
5150 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5151 //     with the Event.  The event handle is never closed.  This could be construed
5152 //     as handle leakage, but only up to the maximum # of threads that have been extant
5153 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5154 //     permit a process to have hundreds of thousands of open handles.
5155 //
5156 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5157 //     and release unused handles.
5158 //
5159 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5160 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5161 //
5162 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5163 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5164 //
5165 // We use (2).
5166 //
5167 // TODO-FIXME:
5168 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5169 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5170 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5171 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5172 //     into a single win32 CreateEvent() handle.
5173 //
5174 // Assumption:
5175 //    Only one parker can exist on an event, which is why we allocate
5176 //    them per-thread. Multiple unparkers can coexist.
5177 //
5178 // _Event transitions in park()
5179 //   -1 => -1 : illegal
5180 //    1 =>  0 : pass - return immediately
5181 //    0 => -1 : block; then set _Event to 0 before returning
5182 //
5183 // _Event transitions in unpark()
5184 //    0 => 1 : just return
5185 //    1 => 1 : just return
5186 //   -1 => either 0 or 1; must signal target thread
5187 //         That is, we can safely transition _Event from -1 to either
5188 //         0 or 1.
5189 //
5190 // _Event serves as a restricted-range semaphore.
5191 //   -1 : thread is blocked, i.e. there is a waiter
5192 //    0 : neutral: thread is running or ready,
5193 //        could have been signaled after a wait started
5194 //    1 : signaled - thread is running or ready
5195 //
5196 // Another possible encoding of _Event would be with
5197 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5198 //
5199 
5200 int os::PlatformEvent::park(jlong Millis) {
5201   // Transitions for _Event:
5202   //   -1 => -1 : illegal
5203   //    1 =>  0 : pass - return immediately
5204   //    0 => -1 : block; then set _Event to 0 before returning
5205 
5206   guarantee(_ParkHandle != NULL , "Invariant");
5207   guarantee(Millis > 0          , "Invariant");
5208 
5209   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5210   // the initial park() operation.
5211   // Consider: use atomic decrement instead of CAS-loop
5212 
5213   int v;
5214   for (;;) {
5215     v = _Event;
5216     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5217   }
5218   guarantee((v == 0) || (v == 1), "invariant");
5219   if (v != 0) return OS_OK;
5220 
5221   // Do this the hard way by blocking ...
5222   // TODO: consider a brief spin here, gated on the success of recent
5223   // spin attempts by this thread.
5224   //
5225   // We decompose long timeouts into series of shorter timed waits.
5226   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5227   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5228   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5229   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5230   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5231   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5232   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5233   // for the already waited time.  This policy does not admit any new outcomes.
5234   // In the future, however, we might want to track the accumulated wait time and
5235   // adjust Millis accordingly if we encounter a spurious wakeup.
5236 
5237   const int MAXTIMEOUT = 0x10000000;
5238   DWORD rv = WAIT_TIMEOUT;
5239   while (_Event < 0 && Millis > 0) {
5240     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5241     if (Millis > MAXTIMEOUT) {
5242       prd = MAXTIMEOUT;
5243     }
5244     HighResolutionInterval *phri = NULL;
5245     if (!ForceTimeHighResolution) {
5246       phri = new HighResolutionInterval(prd);
5247     }
5248     rv = ::WaitForSingleObject(_ParkHandle, prd);
5249     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5250     if (rv == WAIT_TIMEOUT) {
5251       Millis -= prd;
5252     }
5253     delete phri; // if it is NULL, harmless
5254   }
5255   v = _Event;
5256   _Event = 0;
5257   // see comment at end of os::PlatformEvent::park() below:
5258   OrderAccess::fence();
5259   // If we encounter a nearly simultanous timeout expiry and unpark()
5260   // we return OS_OK indicating we awoke via unpark().
5261   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5262   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5263 }
5264 
5265 void os::PlatformEvent::park() {
5266   // Transitions for _Event:
5267   //   -1 => -1 : illegal
5268   //    1 =>  0 : pass - return immediately
5269   //    0 => -1 : block; then set _Event to 0 before returning
5270 
5271   guarantee(_ParkHandle != NULL, "Invariant");
5272   // Invariant: Only the thread associated with the Event/PlatformEvent
5273   // may call park().
5274   // Consider: use atomic decrement instead of CAS-loop
5275   int v;
5276   for (;;) {
5277     v = _Event;
5278     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5279   }
5280   guarantee((v == 0) || (v == 1), "invariant");
5281   if (v != 0) return;
5282 
5283   // Do this the hard way by blocking ...
5284   // TODO: consider a brief spin here, gated on the success of recent
5285   // spin attempts by this thread.
5286   while (_Event < 0) {
5287     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5288     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5289   }
5290 
5291   // Usually we'll find _Event == 0 at this point, but as
5292   // an optional optimization we clear it, just in case can
5293   // multiple unpark() operations drove _Event up to 1.
5294   _Event = 0;
5295   OrderAccess::fence();
5296   guarantee(_Event >= 0, "invariant");
5297 }
5298 
5299 void os::PlatformEvent::unpark() {
5300   guarantee(_ParkHandle != NULL, "Invariant");
5301 
5302   // Transitions for _Event:
5303   //    0 => 1 : just return
5304   //    1 => 1 : just return
5305   //   -1 => either 0 or 1; must signal target thread
5306   //         That is, we can safely transition _Event from -1 to either
5307   //         0 or 1.
5308   // See also: "Semaphores in Plan 9" by Mullender & Cox
5309   //
5310   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5311   // that it will take two back-to-back park() calls for the owning
5312   // thread to block. This has the benefit of forcing a spurious return
5313   // from the first park() call after an unpark() call which will help
5314   // shake out uses of park() and unpark() without condition variables.
5315 
5316   if (Atomic::xchg(1, &_Event) >= 0) return;
5317 
5318   ::SetEvent(_ParkHandle);
5319 }
5320 
5321 
5322 // JSR166
5323 // -------------------------------------------------------
5324 
5325 // The Windows implementation of Park is very straightforward: Basic
5326 // operations on Win32 Events turn out to have the right semantics to
5327 // use them directly. We opportunistically resuse the event inherited
5328 // from Monitor.
5329 
5330 void Parker::park(bool isAbsolute, jlong time) {
5331   guarantee(_ParkEvent != NULL, "invariant");
5332   // First, demultiplex/decode time arguments
5333   if (time < 0) { // don't wait
5334     return;
5335   } else if (time == 0 && !isAbsolute) {
5336     time = INFINITE;
5337   } else if (isAbsolute) {
5338     time -= os::javaTimeMillis(); // convert to relative time
5339     if (time <= 0) {  // already elapsed
5340       return;
5341     }
5342   } else { // relative
5343     time /= 1000000;  // Must coarsen from nanos to millis
5344     if (time == 0) {  // Wait for the minimal time unit if zero
5345       time = 1;
5346     }
5347   }
5348 
5349   JavaThread* thread = JavaThread::current();
5350 
5351   // Don't wait if interrupted or already triggered
5352   if (Thread::is_interrupted(thread, false) ||
5353       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5354     ResetEvent(_ParkEvent);
5355     return;
5356   } else {
5357     ThreadBlockInVM tbivm(thread);
5358     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5359     thread->set_suspend_equivalent();
5360 
5361     WaitForSingleObject(_ParkEvent, time);
5362     ResetEvent(_ParkEvent);
5363 
5364     // If externally suspended while waiting, re-suspend
5365     if (thread->handle_special_suspend_equivalent_condition()) {
5366       thread->java_suspend_self();
5367     }
5368   }
5369 }
5370 
5371 void Parker::unpark() {
5372   guarantee(_ParkEvent != NULL, "invariant");
5373   SetEvent(_ParkEvent);
5374 }
5375 
5376 // Platform Monitor implementation
5377 
5378 // Must already be locked
5379 int os::PlatformMonitor::wait(jlong millis) {
5380   assert(millis >= 0, "negative timeout");
5381   int ret = OS_TIMEOUT;
5382   int status = SleepConditionVariableCS(&_cond, &_mutex,
5383                                         millis == 0 ? INFINITE : millis);
5384   if (status != 0) {
5385     ret = OS_OK;
5386   }
5387   #ifndef PRODUCT
5388   else {
5389     DWORD err = GetLastError();
5390     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5391   }
5392   #endif
5393   return ret;
5394 }
5395 
5396 // Run the specified command in a separate process. Return its exit value,
5397 // or -1 on failure (e.g. can't create a new process).
5398 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5399   STARTUPINFO si;
5400   PROCESS_INFORMATION pi;
5401   DWORD exit_code;
5402 
5403   char * cmd_string;
5404   const char * cmd_prefix = "cmd /C ";
5405   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5406   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5407   if (cmd_string == NULL) {
5408     return -1;
5409   }
5410   cmd_string[0] = '\0';
5411   strcat(cmd_string, cmd_prefix);
5412   strcat(cmd_string, cmd);
5413 
5414   // now replace all '\n' with '&'
5415   char * substring = cmd_string;
5416   while ((substring = strchr(substring, '\n')) != NULL) {
5417     substring[0] = '&';
5418     substring++;
5419   }
5420   memset(&si, 0, sizeof(si));
5421   si.cb = sizeof(si);
5422   memset(&pi, 0, sizeof(pi));
5423   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5424                             cmd_string,    // command line
5425                             NULL,   // process security attribute
5426                             NULL,   // thread security attribute
5427                             TRUE,   // inherits system handles
5428                             0,      // no creation flags
5429                             NULL,   // use parent's environment block
5430                             NULL,   // use parent's starting directory
5431                             &si,    // (in) startup information
5432                             &pi);   // (out) process information
5433 
5434   if (rslt) {
5435     // Wait until child process exits.
5436     WaitForSingleObject(pi.hProcess, INFINITE);
5437 
5438     GetExitCodeProcess(pi.hProcess, &exit_code);
5439 
5440     // Close process and thread handles.
5441     CloseHandle(pi.hProcess);
5442     CloseHandle(pi.hThread);
5443   } else {
5444     exit_code = -1;
5445   }
5446 
5447   FREE_C_HEAP_ARRAY(char, cmd_string);
5448   return (int)exit_code;
5449 }
5450 
5451 bool os::find(address addr, outputStream* st) {
5452   int offset = -1;
5453   bool result = false;
5454   char buf[256];
5455   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5456     st->print(PTR_FORMAT " ", addr);
5457     if (strlen(buf) < sizeof(buf) - 1) {
5458       char* p = strrchr(buf, '\\');
5459       if (p) {
5460         st->print("%s", p + 1);
5461       } else {
5462         st->print("%s", buf);
5463       }
5464     } else {
5465         // The library name is probably truncated. Let's omit the library name.
5466         // See also JDK-8147512.
5467     }
5468     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5469       st->print("::%s + 0x%x", buf, offset);
5470     }
5471     st->cr();
5472     result = true;
5473   }
5474   return result;
5475 }
5476 
5477 static jint initSock() {
5478   WSADATA wsadata;
5479 
5480   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5481     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5482                 ::GetLastError());
5483     return JNI_ERR;
5484   }
5485   return JNI_OK;
5486 }
5487 
5488 struct hostent* os::get_host_by_name(char* name) {
5489   return (struct hostent*)gethostbyname(name);
5490 }
5491 
5492 int os::socket_close(int fd) {
5493   return ::closesocket(fd);
5494 }
5495 
5496 int os::socket(int domain, int type, int protocol) {
5497   return ::socket(domain, type, protocol);
5498 }
5499 
5500 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5501   return ::connect(fd, him, len);
5502 }
5503 
5504 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5505   return ::recv(fd, buf, (int)nBytes, flags);
5506 }
5507 
5508 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5509   return ::send(fd, buf, (int)nBytes, flags);
5510 }
5511 
5512 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5513   return ::send(fd, buf, (int)nBytes, flags);
5514 }
5515 
5516 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5517 #if defined(IA32)
5518   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5519 #elif defined (AMD64)
5520   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5521 #endif
5522 
5523 // returns true if thread could be suspended,
5524 // false otherwise
5525 static bool do_suspend(HANDLE* h) {
5526   if (h != NULL) {
5527     if (SuspendThread(*h) != ~0) {
5528       return true;
5529     }
5530   }
5531   return false;
5532 }
5533 
5534 // resume the thread
5535 // calling resume on an active thread is a no-op
5536 static void do_resume(HANDLE* h) {
5537   if (h != NULL) {
5538     ResumeThread(*h);
5539   }
5540 }
5541 
5542 // retrieve a suspend/resume context capable handle
5543 // from the tid. Caller validates handle return value.
5544 void get_thread_handle_for_extended_context(HANDLE* h,
5545                                             OSThread::thread_id_t tid) {
5546   if (h != NULL) {
5547     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5548   }
5549 }
5550 
5551 // Thread sampling implementation
5552 //
5553 void os::SuspendedThreadTask::internal_do_task() {
5554   CONTEXT    ctxt;
5555   HANDLE     h = NULL;
5556 
5557   // get context capable handle for thread
5558   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5559 
5560   // sanity
5561   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5562     return;
5563   }
5564 
5565   // suspend the thread
5566   if (do_suspend(&h)) {
5567     ctxt.ContextFlags = sampling_context_flags;
5568     // get thread context
5569     GetThreadContext(h, &ctxt);
5570     SuspendedThreadTaskContext context(_thread, &ctxt);
5571     // pass context to Thread Sampling impl
5572     do_task(context);
5573     // resume thread
5574     do_resume(&h);
5575   }
5576 
5577   // close handle
5578   CloseHandle(h);
5579 }
5580 
5581 bool os::start_debugging(char *buf, int buflen) {
5582   int len = (int)strlen(buf);
5583   char *p = &buf[len];
5584 
5585   jio_snprintf(p, buflen-len,
5586              "\n\n"
5587              "Do you want to debug the problem?\n\n"
5588              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5589              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5590              "Otherwise, select 'No' to abort...",
5591              os::current_process_id(), os::current_thread_id());
5592 
5593   bool yes = os::message_box("Unexpected Error", buf);
5594 
5595   if (yes) {
5596     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5597     // exception. If VM is running inside a debugger, the debugger will
5598     // catch the exception. Otherwise, the breakpoint exception will reach
5599     // the default windows exception handler, which can spawn a debugger and
5600     // automatically attach to the dying VM.
5601     os::breakpoint();
5602     yes = false;
5603   }
5604   return yes;
5605 }
5606 
5607 void* os::get_default_process_handle() {
5608   return (void*)GetModuleHandle(NULL);
5609 }
5610 
5611 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5612 // which is used to find statically linked in agents.
5613 // Additionally for windows, takes into account __stdcall names.
5614 // Parameters:
5615 //            sym_name: Symbol in library we are looking for
5616 //            lib_name: Name of library to look in, NULL for shared libs.
5617 //            is_absolute_path == true if lib_name is absolute path to agent
5618 //                                     such as "C:/a/b/L.dll"
5619 //            == false if only the base name of the library is passed in
5620 //               such as "L"
5621 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5622                                     bool is_absolute_path) {
5623   char *agent_entry_name;
5624   size_t len;
5625   size_t name_len;
5626   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5627   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5628   const char *start;
5629 
5630   if (lib_name != NULL) {
5631     len = name_len = strlen(lib_name);
5632     if (is_absolute_path) {
5633       // Need to strip path, prefix and suffix
5634       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5635         lib_name = ++start;
5636       } else {
5637         // Need to check for drive prefix
5638         if ((start = strchr(lib_name, ':')) != NULL) {
5639           lib_name = ++start;
5640         }
5641       }
5642       if (len <= (prefix_len + suffix_len)) {
5643         return NULL;
5644       }
5645       lib_name += prefix_len;
5646       name_len = strlen(lib_name) - suffix_len;
5647     }
5648   }
5649   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5650   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5651   if (agent_entry_name == NULL) {
5652     return NULL;
5653   }
5654   if (lib_name != NULL) {
5655     const char *p = strrchr(sym_name, '@');
5656     if (p != NULL && p != sym_name) {
5657       // sym_name == _Agent_OnLoad@XX
5658       strncpy(agent_entry_name, sym_name, (p - sym_name));
5659       agent_entry_name[(p-sym_name)] = '\0';
5660       // agent_entry_name == _Agent_OnLoad
5661       strcat(agent_entry_name, "_");
5662       strncat(agent_entry_name, lib_name, name_len);
5663       strcat(agent_entry_name, p);
5664       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5665     } else {
5666       strcpy(agent_entry_name, sym_name);
5667       strcat(agent_entry_name, "_");
5668       strncat(agent_entry_name, lib_name, name_len);
5669     }
5670   } else {
5671     strcpy(agent_entry_name, sym_name);
5672   }
5673   return agent_entry_name;
5674 }
5675 
5676 #ifndef PRODUCT
5677 
5678 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5679 // contiguous memory block at a particular address.
5680 // The test first tries to find a good approximate address to allocate at by using the same
5681 // method to allocate some memory at any address. The test then tries to allocate memory in
5682 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5683 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5684 // the previously allocated memory is available for allocation. The only actual failure
5685 // that is reported is when the test tries to allocate at a particular location but gets a
5686 // different valid one. A NULL return value at this point is not considered an error but may
5687 // be legitimate.
5688 void TestReserveMemorySpecial_test() {
5689   if (!UseLargePages) {
5690     return;
5691   }
5692   // save current value of globals
5693   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5694   bool old_use_numa_interleaving = UseNUMAInterleaving;
5695 
5696   // set globals to make sure we hit the correct code path
5697   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5698 
5699   // do an allocation at an address selected by the OS to get a good one.
5700   const size_t large_allocation_size = os::large_page_size() * 4;
5701   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5702   if (result == NULL) {
5703   } else {
5704     os::release_memory_special(result, large_allocation_size);
5705 
5706     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5707     // we managed to get it once.
5708     const size_t expected_allocation_size = os::large_page_size();
5709     char* expected_location = result + os::large_page_size();
5710     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5711     if (actual_location == NULL) {
5712     } else {
5713       // release memory
5714       os::release_memory_special(actual_location, expected_allocation_size);
5715       // only now check, after releasing any memory to avoid any leaks.
5716       assert(actual_location == expected_location,
5717              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5718              expected_location, expected_allocation_size, actual_location);
5719     }
5720   }
5721 
5722   // restore globals
5723   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5724   UseNUMAInterleaving = old_use_numa_interleaving;
5725 }
5726 #endif // PRODUCT
5727 
5728 /*
5729   All the defined signal names for Windows.
5730 
5731   NOTE that not all of these names are accepted by FindSignal!
5732 
5733   For various reasons some of these may be rejected at runtime.
5734 
5735   Here are the names currently accepted by a user of sun.misc.Signal with
5736   1.4.1 (ignoring potential interaction with use of chaining, etc):
5737 
5738      (LIST TBD)
5739 
5740 */
5741 int os::get_signal_number(const char* name) {
5742   static const struct {
5743     const char* name;
5744     int         number;
5745   } siglabels [] =
5746     // derived from version 6.0 VC98/include/signal.h
5747   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5748   "FPE",        SIGFPE,         // floating point exception
5749   "SEGV",       SIGSEGV,        // segment violation
5750   "INT",        SIGINT,         // interrupt
5751   "TERM",       SIGTERM,        // software term signal from kill
5752   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5753   "ILL",        SIGILL};        // illegal instruction
5754   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5755     if (strcmp(name, siglabels[i].name) == 0) {
5756       return siglabels[i].number;
5757     }
5758   }
5759   return -1;
5760 }
5761 
5762 // Fast current thread access
5763 
5764 int os::win32::_thread_ptr_offset = 0;
5765 
5766 static void call_wrapper_dummy() {}
5767 
5768 // We need to call the os_exception_wrapper once so that it sets
5769 // up the offset from FS of the thread pointer.
5770 void os::win32::initialize_thread_ptr_offset() {
5771   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5772                            NULL, NULL, NULL, NULL);
5773 }
5774 
5775 bool os::supports_map_sync() {
5776   return false;
5777 }