1 /*
   2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/allocation.inline.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/globals.hpp"
  50 #include "runtime/interfaceSupport.inline.hpp"
  51 #include "runtime/java.hpp"
  52 #include "runtime/javaCalls.hpp"
  53 #include "runtime/mutexLocker.hpp"
  54 #include "runtime/objectMonitor.hpp"
  55 #include "runtime/orderAccess.hpp"
  56 #include "runtime/osThread.hpp"
  57 #include "runtime/perfMemory.hpp"
  58 #include "runtime/safepointMechanism.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "services/attachListener.hpp"
  67 #include "services/memTracker.hpp"
  68 #include "services/runtimeService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/macros.hpp"
  74 #include "utilities/vmError.hpp"
  75 #include "symbolengine.hpp"
  76 #include "windbghelp.hpp"
  77 
  78 #ifdef _DEBUG
  79 #include <crtdbg.h>
  80 #endif
  81 
  82 #include <windows.h>
  83 #include <sys/types.h>
  84 #include <sys/stat.h>
  85 #include <sys/timeb.h>
  86 #include <objidl.h>
  87 #include <shlobj.h>
  88 
  89 #include <malloc.h>
  90 #include <signal.h>
  91 #include <direct.h>
  92 #include <errno.h>
  93 #include <fcntl.h>
  94 #include <io.h>
  95 #include <process.h>              // For _beginthreadex(), _endthreadex()
  96 #include <imagehlp.h>             // For os::dll_address_to_function_name
  97 // for enumerating dll libraries
  98 #include <vdmdbg.h>
  99 #include <psapi.h>
 100 #include <mmsystem.h>
 101 #include <winsock2.h>
 102 
 103 // for timer info max values which include all bits
 104 #define ALL_64_BITS CONST64(-1)
 105 
 106 // For DLL loading/load error detection
 107 // Values of PE COFF
 108 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 109 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 110 
 111 static HANDLE main_process;
 112 static HANDLE main_thread;
 113 static int    main_thread_id;
 114 
 115 static FILETIME process_creation_time;
 116 static FILETIME process_exit_time;
 117 static FILETIME process_user_time;
 118 static FILETIME process_kernel_time;
 119 
 120 #ifdef _M_AMD64
 121   #define __CPU__ amd64
 122 #else
 123   #define __CPU__ i486
 124 #endif
 125 
 126 #if INCLUDE_AOT
 127 PVOID  topLevelVectoredExceptionHandler = NULL;
 128 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 129 #endif
 130 
 131 // save DLL module handle, used by GetModuleFileName
 132 
 133 HINSTANCE vm_lib_handle;
 134 
 135 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 136   switch (reason) {
 137   case DLL_PROCESS_ATTACH:
 138     vm_lib_handle = hinst;
 139     if (ForceTimeHighResolution) {
 140       timeBeginPeriod(1L);
 141     }
 142     WindowsDbgHelp::pre_initialize();
 143     SymbolEngine::pre_initialize();
 144     break;
 145   case DLL_PROCESS_DETACH:
 146     if (ForceTimeHighResolution) {
 147       timeEndPeriod(1L);
 148     }
 149 #if INCLUDE_AOT
 150     if (topLevelVectoredExceptionHandler != NULL) {
 151       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 152       topLevelVectoredExceptionHandler = NULL;
 153     }
 154 #endif
 155     break;
 156   default:
 157     break;
 158   }
 159   return true;
 160 }
 161 
 162 static inline double fileTimeAsDouble(FILETIME* time) {
 163   const double high  = (double) ((unsigned int) ~0);
 164   const double split = 10000000.0;
 165   double result = (time->dwLowDateTime / split) +
 166                    time->dwHighDateTime * (high/split);
 167   return result;
 168 }
 169 
 170 // Implementation of os
 171 
 172 bool os::unsetenv(const char* name) {
 173   assert(name != NULL, "Null pointer");
 174   return (SetEnvironmentVariable(name, NULL) == TRUE);
 175 }
 176 
 177 // No setuid programs under Windows.
 178 bool os::have_special_privileges() {
 179   return false;
 180 }
 181 
 182 
 183 // This method is  a periodic task to check for misbehaving JNI applications
 184 // under CheckJNI, we can add any periodic checks here.
 185 // For Windows at the moment does nothing
 186 void os::run_periodic_checks() {
 187   return;
 188 }
 189 
 190 // previous UnhandledExceptionFilter, if there is one
 191 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 192 
 193 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 194 
 195 void os::init_system_properties_values() {
 196   // sysclasspath, java_home, dll_dir
 197   {
 198     char *home_path;
 199     char *dll_path;
 200     char *pslash;
 201     const char *bin = "\\bin";
 202     char home_dir[MAX_PATH + 1];
 203     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 204 
 205     if (alt_home_dir != NULL)  {
 206       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 207       home_dir[MAX_PATH] = '\0';
 208     } else {
 209       os::jvm_path(home_dir, sizeof(home_dir));
 210       // Found the full path to jvm.dll.
 211       // Now cut the path to <java_home>/jre if we can.
 212       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 213       pslash = strrchr(home_dir, '\\');
 214       if (pslash != NULL) {
 215         *pslash = '\0';                   // get rid of \{client|server}
 216         pslash = strrchr(home_dir, '\\');
 217         if (pslash != NULL) {
 218           *pslash = '\0';                 // get rid of \bin
 219         }
 220       }
 221     }
 222 
 223     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 224     strcpy(home_path, home_dir);
 225     Arguments::set_java_home(home_path);
 226     FREE_C_HEAP_ARRAY(char, home_path);
 227 
 228     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 229                                 mtInternal);
 230     strcpy(dll_path, home_dir);
 231     strcat(dll_path, bin);
 232     Arguments::set_dll_dir(dll_path);
 233     FREE_C_HEAP_ARRAY(char, dll_path);
 234 
 235     if (!set_boot_path('\\', ';')) {
 236       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 237     }
 238   }
 239 
 240 // library_path
 241 #define EXT_DIR "\\lib\\ext"
 242 #define BIN_DIR "\\bin"
 243 #define PACKAGE_DIR "\\Sun\\Java"
 244   {
 245     // Win32 library search order (See the documentation for LoadLibrary):
 246     //
 247     // 1. The directory from which application is loaded.
 248     // 2. The system wide Java Extensions directory (Java only)
 249     // 3. System directory (GetSystemDirectory)
 250     // 4. Windows directory (GetWindowsDirectory)
 251     // 5. The PATH environment variable
 252     // 6. The current directory
 253 
 254     char *library_path;
 255     char tmp[MAX_PATH];
 256     char *path_str = ::getenv("PATH");
 257 
 258     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 259                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 260 
 261     library_path[0] = '\0';
 262 
 263     GetModuleFileName(NULL, tmp, sizeof(tmp));
 264     *(strrchr(tmp, '\\')) = '\0';
 265     strcat(library_path, tmp);
 266 
 267     GetWindowsDirectory(tmp, sizeof(tmp));
 268     strcat(library_path, ";");
 269     strcat(library_path, tmp);
 270     strcat(library_path, PACKAGE_DIR BIN_DIR);
 271 
 272     GetSystemDirectory(tmp, sizeof(tmp));
 273     strcat(library_path, ";");
 274     strcat(library_path, tmp);
 275 
 276     GetWindowsDirectory(tmp, sizeof(tmp));
 277     strcat(library_path, ";");
 278     strcat(library_path, tmp);
 279 
 280     if (path_str) {
 281       strcat(library_path, ";");
 282       strcat(library_path, path_str);
 283     }
 284 
 285     strcat(library_path, ";.");
 286 
 287     Arguments::set_library_path(library_path);
 288     FREE_C_HEAP_ARRAY(char, library_path);
 289   }
 290 
 291   // Default extensions directory
 292   {
 293     char path[MAX_PATH];
 294     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 295     GetWindowsDirectory(path, MAX_PATH);
 296     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 297             path, PACKAGE_DIR, EXT_DIR);
 298     Arguments::set_ext_dirs(buf);
 299   }
 300   #undef EXT_DIR
 301   #undef BIN_DIR
 302   #undef PACKAGE_DIR
 303 
 304 #ifndef _WIN64
 305   // set our UnhandledExceptionFilter and save any previous one
 306   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 307 #endif
 308 
 309   // Done
 310   return;
 311 }
 312 
 313 void os::breakpoint() {
 314   DebugBreak();
 315 }
 316 
 317 // Invoked from the BREAKPOINT Macro
 318 extern "C" void breakpoint() {
 319   os::breakpoint();
 320 }
 321 
 322 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 323 // So far, this method is only used by Native Memory Tracking, which is
 324 // only supported on Windows XP or later.
 325 //
 326 int os::get_native_stack(address* stack, int frames, int toSkip) {
 327   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 328   for (int index = captured; index < frames; index ++) {
 329     stack[index] = NULL;
 330   }
 331   return captured;
 332 }
 333 
 334 
 335 // os::current_stack_base()
 336 //
 337 //   Returns the base of the stack, which is the stack's
 338 //   starting address.  This function must be called
 339 //   while running on the stack of the thread being queried.
 340 
 341 address os::current_stack_base() {
 342   MEMORY_BASIC_INFORMATION minfo;
 343   address stack_bottom;
 344   size_t stack_size;
 345 
 346   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 347   stack_bottom =  (address)minfo.AllocationBase;
 348   stack_size = minfo.RegionSize;
 349 
 350   // Add up the sizes of all the regions with the same
 351   // AllocationBase.
 352   while (1) {
 353     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 354     if (stack_bottom == (address)minfo.AllocationBase) {
 355       stack_size += minfo.RegionSize;
 356     } else {
 357       break;
 358     }
 359   }
 360   return stack_bottom + stack_size;
 361 }
 362 
 363 size_t os::current_stack_size() {
 364   size_t sz;
 365   MEMORY_BASIC_INFORMATION minfo;
 366   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 367   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 368   return sz;
 369 }
 370 
 371 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 372   MEMORY_BASIC_INFORMATION minfo;
 373   committed_start = NULL;
 374   committed_size = 0;
 375   address top = start + size;
 376   const address start_addr = start;
 377   while (start < top) {
 378     VirtualQuery(start, &minfo, sizeof(minfo));
 379     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 380       if (committed_start != NULL) {
 381         break;
 382       }
 383     } else {  // committed
 384       if (committed_start == NULL) {
 385         committed_start = start;
 386       }
 387       size_t offset = start - (address)minfo.BaseAddress;
 388       committed_size += minfo.RegionSize - offset;
 389     }
 390     start = (address)minfo.BaseAddress + minfo.RegionSize;
 391   }
 392 
 393   if (committed_start == NULL) {
 394     assert(committed_size == 0, "Sanity");
 395     return false;
 396   } else {
 397     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 398     // current region may go beyond the limit, trim to the limit
 399     committed_size = MIN2(committed_size, size_t(top - committed_start));
 400     return true;
 401   }
 402 }
 403 
 404 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 405   const struct tm* time_struct_ptr = localtime(clock);
 406   if (time_struct_ptr != NULL) {
 407     *res = *time_struct_ptr;
 408     return res;
 409   }
 410   return NULL;
 411 }
 412 
 413 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 414   const struct tm* time_struct_ptr = gmtime(clock);
 415   if (time_struct_ptr != NULL) {
 416     *res = *time_struct_ptr;
 417     return res;
 418   }
 419   return NULL;
 420 }
 421 
 422 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 423 
 424 // Thread start routine for all newly created threads
 425 static unsigned __stdcall thread_native_entry(Thread* thread) {
 426 
 427   thread->record_stack_base_and_size();
 428 
 429   // Try to randomize the cache line index of hot stack frames.
 430   // This helps when threads of the same stack traces evict each other's
 431   // cache lines. The threads can be either from the same JVM instance, or
 432   // from different JVM instances. The benefit is especially true for
 433   // processors with hyperthreading technology.
 434   static int counter = 0;
 435   int pid = os::current_process_id();
 436   _alloca(((pid ^ counter++) & 7) * 128);
 437 
 438   thread->initialize_thread_current();
 439 
 440   OSThread* osthr = thread->osthread();
 441   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 442 
 443   if (UseNUMA) {
 444     int lgrp_id = os::numa_get_group_id();
 445     if (lgrp_id != -1) {
 446       thread->set_lgrp_id(lgrp_id);
 447     }
 448   }
 449 
 450   // Diagnostic code to investigate JDK-6573254
 451   int res = 30115;  // non-java thread
 452   if (thread->is_Java_thread()) {
 453     res = 20115;    // java thread
 454   }
 455 
 456   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 457 
 458   // Install a win32 structured exception handler around every thread created
 459   // by VM, so VM can generate error dump when an exception occurred in non-
 460   // Java thread (e.g. VM thread).
 461   __try {
 462     thread->call_run();
 463   } __except(topLevelExceptionFilter(
 464                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 465     // Nothing to do.
 466   }
 467 
 468   // Note: at this point the thread object may already have deleted itself.
 469   // Do not dereference it from here on out.
 470 
 471   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 472 
 473   // One less thread is executing
 474   // When the VMThread gets here, the main thread may have already exited
 475   // which frees the CodeHeap containing the Atomic::add code
 476   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 477     Atomic::dec(&os::win32::_os_thread_count);
 478   }
 479 
 480   // Thread must not return from exit_process_or_thread(), but if it does,
 481   // let it proceed to exit normally
 482   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 483 }
 484 
 485 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 486                                   int thread_id) {
 487   // Allocate the OSThread object
 488   OSThread* osthread = new OSThread(NULL, NULL);
 489   if (osthread == NULL) return NULL;
 490 
 491   // Initialize the JDK library's interrupt event.
 492   // This should really be done when OSThread is constructed,
 493   // but there is no way for a constructor to report failure to
 494   // allocate the event.
 495   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 496   if (interrupt_event == NULL) {
 497     delete osthread;
 498     return NULL;
 499   }
 500   osthread->set_interrupt_event(interrupt_event);
 501 
 502   // Store info on the Win32 thread into the OSThread
 503   osthread->set_thread_handle(thread_handle);
 504   osthread->set_thread_id(thread_id);
 505 
 506   if (UseNUMA) {
 507     int lgrp_id = os::numa_get_group_id();
 508     if (lgrp_id != -1) {
 509       thread->set_lgrp_id(lgrp_id);
 510     }
 511   }
 512 
 513   // Initial thread state is INITIALIZED, not SUSPENDED
 514   osthread->set_state(INITIALIZED);
 515 
 516   return osthread;
 517 }
 518 
 519 
 520 bool os::create_attached_thread(JavaThread* thread) {
 521 #ifdef ASSERT
 522   thread->verify_not_published();
 523 #endif
 524   HANDLE thread_h;
 525   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 526                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 527     fatal("DuplicateHandle failed\n");
 528   }
 529   OSThread* osthread = create_os_thread(thread, thread_h,
 530                                         (int)current_thread_id());
 531   if (osthread == NULL) {
 532     return false;
 533   }
 534 
 535   // Initial thread state is RUNNABLE
 536   osthread->set_state(RUNNABLE);
 537 
 538   thread->set_osthread(osthread);
 539 
 540   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 541     os::current_thread_id());
 542 
 543   return true;
 544 }
 545 
 546 bool os::create_main_thread(JavaThread* thread) {
 547 #ifdef ASSERT
 548   thread->verify_not_published();
 549 #endif
 550   if (_starting_thread == NULL) {
 551     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 552     if (_starting_thread == NULL) {
 553       return false;
 554     }
 555   }
 556 
 557   // The primordial thread is runnable from the start)
 558   _starting_thread->set_state(RUNNABLE);
 559 
 560   thread->set_osthread(_starting_thread);
 561   return true;
 562 }
 563 
 564 // Helper function to trace _beginthreadex attributes,
 565 //  similar to os::Posix::describe_pthread_attr()
 566 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 567                                                size_t stacksize, unsigned initflag) {
 568   stringStream ss(buf, buflen);
 569   if (stacksize == 0) {
 570     ss.print("stacksize: default, ");
 571   } else {
 572     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 573   }
 574   ss.print("flags: ");
 575   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 576   #define ALL(X) \
 577     X(CREATE_SUSPENDED) \
 578     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 579   ALL(PRINT_FLAG)
 580   #undef ALL
 581   #undef PRINT_FLAG
 582   return buf;
 583 }
 584 
 585 // Allocate and initialize a new OSThread
 586 bool os::create_thread(Thread* thread, ThreadType thr_type,
 587                        size_t stack_size) {
 588   unsigned thread_id;
 589 
 590   // Allocate the OSThread object
 591   OSThread* osthread = new OSThread(NULL, NULL);
 592   if (osthread == NULL) {
 593     return false;
 594   }
 595 
 596   // Initialize the JDK library's interrupt event.
 597   // This should really be done when OSThread is constructed,
 598   // but there is no way for a constructor to report failure to
 599   // allocate the event.
 600   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 601   if (interrupt_event == NULL) {
 602     delete osthread;
 603     return false;
 604   }
 605   osthread->set_interrupt_event(interrupt_event);
 606   // We don't call set_interrupted(false) as it will trip the assert in there
 607   // as we are not operating on the current thread. We don't need to call it
 608   // because the initial state is already correct.
 609 
 610   thread->set_osthread(osthread);
 611 
 612   if (stack_size == 0) {
 613     switch (thr_type) {
 614     case os::java_thread:
 615       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 616       if (JavaThread::stack_size_at_create() > 0) {
 617         stack_size = JavaThread::stack_size_at_create();
 618       }
 619       break;
 620     case os::compiler_thread:
 621       if (CompilerThreadStackSize > 0) {
 622         stack_size = (size_t)(CompilerThreadStackSize * K);
 623         break;
 624       } // else fall through:
 625         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 626     case os::vm_thread:
 627     case os::pgc_thread:
 628     case os::cgc_thread:
 629     case os::watcher_thread:
 630       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 631       break;
 632     }
 633   }
 634 
 635   // Create the Win32 thread
 636   //
 637   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 638   // does not specify stack size. Instead, it specifies the size of
 639   // initially committed space. The stack size is determined by
 640   // PE header in the executable. If the committed "stack_size" is larger
 641   // than default value in the PE header, the stack is rounded up to the
 642   // nearest multiple of 1MB. For example if the launcher has default
 643   // stack size of 320k, specifying any size less than 320k does not
 644   // affect the actual stack size at all, it only affects the initial
 645   // commitment. On the other hand, specifying 'stack_size' larger than
 646   // default value may cause significant increase in memory usage, because
 647   // not only the stack space will be rounded up to MB, but also the
 648   // entire space is committed upfront.
 649   //
 650   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 651   // for CreateThread() that can treat 'stack_size' as stack size. However we
 652   // are not supposed to call CreateThread() directly according to MSDN
 653   // document because JVM uses C runtime library. The good news is that the
 654   // flag appears to work with _beginthredex() as well.
 655 
 656   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 657   HANDLE thread_handle =
 658     (HANDLE)_beginthreadex(NULL,
 659                            (unsigned)stack_size,
 660                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 661                            thread,
 662                            initflag,
 663                            &thread_id);
 664 
 665   char buf[64];
 666   if (thread_handle != NULL) {
 667     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 668       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 669   } else {
 670     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 671       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 672     // Log some OS information which might explain why creating the thread failed.
 673     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 674     LogStream st(Log(os, thread)::info());
 675     os::print_memory_info(&st);
 676   }
 677 
 678   if (thread_handle == NULL) {
 679     // Need to clean up stuff we've allocated so far
 680     thread->set_osthread(NULL);
 681     delete osthread;
 682     return false;
 683   }
 684 
 685   Atomic::inc(&os::win32::_os_thread_count);
 686 
 687   // Store info on the Win32 thread into the OSThread
 688   osthread->set_thread_handle(thread_handle);
 689   osthread->set_thread_id(thread_id);
 690 
 691   // Initial thread state is INITIALIZED, not SUSPENDED
 692   osthread->set_state(INITIALIZED);
 693 
 694   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 695   return true;
 696 }
 697 
 698 
 699 // Free Win32 resources related to the OSThread
 700 void os::free_thread(OSThread* osthread) {
 701   assert(osthread != NULL, "osthread not set");
 702 
 703   // We are told to free resources of the argument thread,
 704   // but we can only really operate on the current thread.
 705   assert(Thread::current()->osthread() == osthread,
 706          "os::free_thread but not current thread");
 707 
 708   CloseHandle(osthread->thread_handle());
 709   delete osthread;
 710 }
 711 
 712 static jlong first_filetime;
 713 static jlong initial_performance_count;
 714 static jlong performance_frequency;
 715 
 716 
 717 jlong as_long(LARGE_INTEGER x) {
 718   jlong result = 0; // initialization to avoid warning
 719   set_high(&result, x.HighPart);
 720   set_low(&result, x.LowPart);
 721   return result;
 722 }
 723 
 724 
 725 jlong os::elapsed_counter() {
 726   LARGE_INTEGER count;
 727   QueryPerformanceCounter(&count);
 728   return as_long(count) - initial_performance_count;
 729 }
 730 
 731 
 732 jlong os::elapsed_frequency() {
 733   return performance_frequency;
 734 }
 735 
 736 
 737 julong os::available_memory() {
 738   return win32::available_memory();
 739 }
 740 
 741 julong os::win32::available_memory() {
 742   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 743   // value if total memory is larger than 4GB
 744   MEMORYSTATUSEX ms;
 745   ms.dwLength = sizeof(ms);
 746   GlobalMemoryStatusEx(&ms);
 747 
 748   return (julong)ms.ullAvailPhys;
 749 }
 750 
 751 julong os::physical_memory() {
 752   return win32::physical_memory();
 753 }
 754 
 755 bool os::has_allocatable_memory_limit(julong* limit) {
 756   MEMORYSTATUSEX ms;
 757   ms.dwLength = sizeof(ms);
 758   GlobalMemoryStatusEx(&ms);
 759 #ifdef _LP64
 760   *limit = (julong)ms.ullAvailVirtual;
 761   return true;
 762 #else
 763   // Limit to 1400m because of the 2gb address space wall
 764   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 765   return true;
 766 #endif
 767 }
 768 
 769 int os::active_processor_count() {
 770   // User has overridden the number of active processors
 771   if (ActiveProcessorCount > 0) {
 772     log_trace(os)("active_processor_count: "
 773                   "active processor count set by user : %d",
 774                   ActiveProcessorCount);
 775     return ActiveProcessorCount;
 776   }
 777 
 778   DWORD_PTR lpProcessAffinityMask = 0;
 779   DWORD_PTR lpSystemAffinityMask = 0;
 780   int proc_count = processor_count();
 781   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 782       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 783     // Nof active processors is number of bits in process affinity mask
 784     int bitcount = 0;
 785     while (lpProcessAffinityMask != 0) {
 786       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 787       bitcount++;
 788     }
 789     return bitcount;
 790   } else {
 791     return proc_count;
 792   }
 793 }
 794 
 795 uint os::processor_id() {
 796   return (uint)GetCurrentProcessorNumber();
 797 }
 798 
 799 void os::set_native_thread_name(const char *name) {
 800 
 801   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 802   //
 803   // Note that unfortunately this only works if the process
 804   // is already attached to a debugger; debugger must observe
 805   // the exception below to show the correct name.
 806 
 807   // If there is no debugger attached skip raising the exception
 808   if (!IsDebuggerPresent()) {
 809     return;
 810   }
 811 
 812   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 813   struct {
 814     DWORD dwType;     // must be 0x1000
 815     LPCSTR szName;    // pointer to name (in user addr space)
 816     DWORD dwThreadID; // thread ID (-1=caller thread)
 817     DWORD dwFlags;    // reserved for future use, must be zero
 818   } info;
 819 
 820   info.dwType = 0x1000;
 821   info.szName = name;
 822   info.dwThreadID = -1;
 823   info.dwFlags = 0;
 824 
 825   __try {
 826     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 827   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 828 }
 829 
 830 bool os::bind_to_processor(uint processor_id) {
 831   // Not yet implemented.
 832   return false;
 833 }
 834 
 835 void os::win32::initialize_performance_counter() {
 836   LARGE_INTEGER count;
 837   QueryPerformanceFrequency(&count);
 838   performance_frequency = as_long(count);
 839   QueryPerformanceCounter(&count);
 840   initial_performance_count = as_long(count);
 841 }
 842 
 843 
 844 double os::elapsedTime() {
 845   return (double) elapsed_counter() / (double) elapsed_frequency();
 846 }
 847 
 848 
 849 // Windows format:
 850 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 851 // Java format:
 852 //   Java standards require the number of milliseconds since 1/1/1970
 853 
 854 // Constant offset - calculated using offset()
 855 static jlong  _offset   = 116444736000000000;
 856 // Fake time counter for reproducible results when debugging
 857 static jlong  fake_time = 0;
 858 
 859 #ifdef ASSERT
 860 // Just to be safe, recalculate the offset in debug mode
 861 static jlong _calculated_offset = 0;
 862 static int   _has_calculated_offset = 0;
 863 
 864 jlong offset() {
 865   if (_has_calculated_offset) return _calculated_offset;
 866   SYSTEMTIME java_origin;
 867   java_origin.wYear          = 1970;
 868   java_origin.wMonth         = 1;
 869   java_origin.wDayOfWeek     = 0; // ignored
 870   java_origin.wDay           = 1;
 871   java_origin.wHour          = 0;
 872   java_origin.wMinute        = 0;
 873   java_origin.wSecond        = 0;
 874   java_origin.wMilliseconds  = 0;
 875   FILETIME jot;
 876   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 877     fatal("Error = %d\nWindows error", GetLastError());
 878   }
 879   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 880   _has_calculated_offset = 1;
 881   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 882   return _calculated_offset;
 883 }
 884 #else
 885 jlong offset() {
 886   return _offset;
 887 }
 888 #endif
 889 
 890 jlong windows_to_java_time(FILETIME wt) {
 891   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 892   return (a - offset()) / 10000;
 893 }
 894 
 895 // Returns time ticks in (10th of micro seconds)
 896 jlong windows_to_time_ticks(FILETIME wt) {
 897   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 898   return (a - offset());
 899 }
 900 
 901 FILETIME java_to_windows_time(jlong l) {
 902   jlong a = (l * 10000) + offset();
 903   FILETIME result;
 904   result.dwHighDateTime = high(a);
 905   result.dwLowDateTime  = low(a);
 906   return result;
 907 }
 908 
 909 bool os::supports_vtime() { return true; }
 910 
 911 double os::elapsedVTime() {
 912   FILETIME created;
 913   FILETIME exited;
 914   FILETIME kernel;
 915   FILETIME user;
 916   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 917     // the resolution of windows_to_java_time() should be sufficient (ms)
 918     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 919   } else {
 920     return elapsedTime();
 921   }
 922 }
 923 
 924 jlong os::javaTimeMillis() {
 925   FILETIME wt;
 926   GetSystemTimeAsFileTime(&wt);
 927   return windows_to_java_time(wt);
 928 }
 929 
 930 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 931   FILETIME wt;
 932   GetSystemTimeAsFileTime(&wt);
 933   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 934   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 935   seconds = secs;
 936   nanos = jlong(ticks - (secs*10000000)) * 100;
 937 }
 938 
 939 jlong os::javaTimeNanos() {
 940     LARGE_INTEGER current_count;
 941     QueryPerformanceCounter(&current_count);
 942     double current = as_long(current_count);
 943     double freq = performance_frequency;
 944     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 945     return time;
 946 }
 947 
 948 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 949   jlong freq = performance_frequency;
 950   if (freq < NANOSECS_PER_SEC) {
 951     // the performance counter is 64 bits and we will
 952     // be multiplying it -- so no wrap in 64 bits
 953     info_ptr->max_value = ALL_64_BITS;
 954   } else if (freq > NANOSECS_PER_SEC) {
 955     // use the max value the counter can reach to
 956     // determine the max value which could be returned
 957     julong max_counter = (julong)ALL_64_BITS;
 958     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 959   } else {
 960     // the performance counter is 64 bits and we will
 961     // be using it directly -- so no wrap in 64 bits
 962     info_ptr->max_value = ALL_64_BITS;
 963   }
 964 
 965   // using a counter, so no skipping
 966   info_ptr->may_skip_backward = false;
 967   info_ptr->may_skip_forward = false;
 968 
 969   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 970 }
 971 
 972 char* os::local_time_string(char *buf, size_t buflen) {
 973   SYSTEMTIME st;
 974   GetLocalTime(&st);
 975   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 976                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 977   return buf;
 978 }
 979 
 980 bool os::getTimesSecs(double* process_real_time,
 981                       double* process_user_time,
 982                       double* process_system_time) {
 983   HANDLE h_process = GetCurrentProcess();
 984   FILETIME create_time, exit_time, kernel_time, user_time;
 985   BOOL result = GetProcessTimes(h_process,
 986                                 &create_time,
 987                                 &exit_time,
 988                                 &kernel_time,
 989                                 &user_time);
 990   if (result != 0) {
 991     FILETIME wt;
 992     GetSystemTimeAsFileTime(&wt);
 993     jlong rtc_millis = windows_to_java_time(wt);
 994     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 995     *process_user_time =
 996       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 997     *process_system_time =
 998       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
 999     return true;
1000   } else {
1001     return false;
1002   }
1003 }
1004 
1005 void os::shutdown() {
1006   // allow PerfMemory to attempt cleanup of any persistent resources
1007   perfMemory_exit();
1008 
1009   // flush buffered output, finish log files
1010   ostream_abort();
1011 
1012   // Check for abort hook
1013   abort_hook_t abort_hook = Arguments::abort_hook();
1014   if (abort_hook != NULL) {
1015     abort_hook();
1016   }
1017 }
1018 
1019 
1020 static HANDLE dumpFile = NULL;
1021 
1022 // Check if dump file can be created.
1023 void os::check_dump_limit(char* buffer, size_t buffsz) {
1024   bool status = true;
1025   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1026     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1027     status = false;
1028   }
1029 
1030 #ifndef ASSERT
1031   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1032     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1033     status = false;
1034   }
1035 #endif
1036 
1037   if (status) {
1038     const char* cwd = get_current_directory(NULL, 0);
1039     int pid = current_process_id();
1040     if (cwd != NULL) {
1041       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1042     } else {
1043       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1044     }
1045 
1046     if (dumpFile == NULL &&
1047        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1048                  == INVALID_HANDLE_VALUE) {
1049       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1050       status = false;
1051     }
1052   }
1053   VMError::record_coredump_status(buffer, status);
1054 }
1055 
1056 void os::abort(bool dump_core, void* siginfo, const void* context) {
1057   EXCEPTION_POINTERS ep;
1058   MINIDUMP_EXCEPTION_INFORMATION mei;
1059   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1060 
1061   HANDLE hProcess = GetCurrentProcess();
1062   DWORD processId = GetCurrentProcessId();
1063   MINIDUMP_TYPE dumpType;
1064 
1065   shutdown();
1066   if (!dump_core || dumpFile == NULL) {
1067     if (dumpFile != NULL) {
1068       CloseHandle(dumpFile);
1069     }
1070     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1071   }
1072 
1073   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1074     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1075 
1076   if (siginfo != NULL && context != NULL) {
1077     ep.ContextRecord = (PCONTEXT) context;
1078     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1079 
1080     mei.ThreadId = GetCurrentThreadId();
1081     mei.ExceptionPointers = &ep;
1082     pmei = &mei;
1083   } else {
1084     pmei = NULL;
1085   }
1086 
1087   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1088   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1089   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1090       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1091     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1092   }
1093   CloseHandle(dumpFile);
1094   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1095 }
1096 
1097 // Die immediately, no exit hook, no abort hook, no cleanup.
1098 void os::die() {
1099   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1100 }
1101 
1102 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1103 //  * dirent_md.c       1.15 00/02/02
1104 //
1105 // The declarations for DIR and struct dirent are in jvm_win32.h.
1106 
1107 // Caller must have already run dirname through JVM_NativePath, which removes
1108 // duplicate slashes and converts all instances of '/' into '\\'.
1109 
1110 DIR * os::opendir(const char *dirname) {
1111   assert(dirname != NULL, "just checking");   // hotspot change
1112   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1113   DWORD fattr;                                // hotspot change
1114   char alt_dirname[4] = { 0, 0, 0, 0 };
1115 
1116   if (dirp == 0) {
1117     errno = ENOMEM;
1118     return 0;
1119   }
1120 
1121   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1122   // as a directory in FindFirstFile().  We detect this case here and
1123   // prepend the current drive name.
1124   //
1125   if (dirname[1] == '\0' && dirname[0] == '\\') {
1126     alt_dirname[0] = _getdrive() + 'A' - 1;
1127     alt_dirname[1] = ':';
1128     alt_dirname[2] = '\\';
1129     alt_dirname[3] = '\0';
1130     dirname = alt_dirname;
1131   }
1132 
1133   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1134   if (dirp->path == 0) {
1135     free(dirp);
1136     errno = ENOMEM;
1137     return 0;
1138   }
1139   strcpy(dirp->path, dirname);
1140 
1141   fattr = GetFileAttributes(dirp->path);
1142   if (fattr == 0xffffffff) {
1143     free(dirp->path);
1144     free(dirp);
1145     errno = ENOENT;
1146     return 0;
1147   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1148     free(dirp->path);
1149     free(dirp);
1150     errno = ENOTDIR;
1151     return 0;
1152   }
1153 
1154   // Append "*.*", or possibly "\\*.*", to path
1155   if (dirp->path[1] == ':' &&
1156       (dirp->path[2] == '\0' ||
1157       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1158     // No '\\' needed for cases like "Z:" or "Z:\"
1159     strcat(dirp->path, "*.*");
1160   } else {
1161     strcat(dirp->path, "\\*.*");
1162   }
1163 
1164   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1165   if (dirp->handle == INVALID_HANDLE_VALUE) {
1166     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1167       free(dirp->path);
1168       free(dirp);
1169       errno = EACCES;
1170       return 0;
1171     }
1172   }
1173   return dirp;
1174 }
1175 
1176 struct dirent * os::readdir(DIR *dirp) {
1177   assert(dirp != NULL, "just checking");      // hotspot change
1178   if (dirp->handle == INVALID_HANDLE_VALUE) {
1179     return NULL;
1180   }
1181 
1182   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1183 
1184   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1185     if (GetLastError() == ERROR_INVALID_HANDLE) {
1186       errno = EBADF;
1187       return NULL;
1188     }
1189     FindClose(dirp->handle);
1190     dirp->handle = INVALID_HANDLE_VALUE;
1191   }
1192 
1193   return &dirp->dirent;
1194 }
1195 
1196 int os::closedir(DIR *dirp) {
1197   assert(dirp != NULL, "just checking");      // hotspot change
1198   if (dirp->handle != INVALID_HANDLE_VALUE) {
1199     if (!FindClose(dirp->handle)) {
1200       errno = EBADF;
1201       return -1;
1202     }
1203     dirp->handle = INVALID_HANDLE_VALUE;
1204   }
1205   free(dirp->path);
1206   free(dirp);
1207   return 0;
1208 }
1209 
1210 // This must be hard coded because it's the system's temporary
1211 // directory not the java application's temp directory, ala java.io.tmpdir.
1212 const char* os::get_temp_directory() {
1213   static char path_buf[MAX_PATH];
1214   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1215     return path_buf;
1216   } else {
1217     path_buf[0] = '\0';
1218     return path_buf;
1219   }
1220 }
1221 
1222 // Needs to be in os specific directory because windows requires another
1223 // header file <direct.h>
1224 const char* os::get_current_directory(char *buf, size_t buflen) {
1225   int n = static_cast<int>(buflen);
1226   if (buflen > INT_MAX)  n = INT_MAX;
1227   return _getcwd(buf, n);
1228 }
1229 
1230 //-----------------------------------------------------------
1231 // Helper functions for fatal error handler
1232 #ifdef _WIN64
1233 // Helper routine which returns true if address in
1234 // within the NTDLL address space.
1235 //
1236 static bool _addr_in_ntdll(address addr) {
1237   HMODULE hmod;
1238   MODULEINFO minfo;
1239 
1240   hmod = GetModuleHandle("NTDLL.DLL");
1241   if (hmod == NULL) return false;
1242   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1243                                           &minfo, sizeof(MODULEINFO))) {
1244     return false;
1245   }
1246 
1247   if ((addr >= minfo.lpBaseOfDll) &&
1248       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1249     return true;
1250   } else {
1251     return false;
1252   }
1253 }
1254 #endif
1255 
1256 struct _modinfo {
1257   address addr;
1258   char*   full_path;   // point to a char buffer
1259   int     buflen;      // size of the buffer
1260   address base_addr;
1261 };
1262 
1263 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1264                                   address top_address, void * param) {
1265   struct _modinfo *pmod = (struct _modinfo *)param;
1266   if (!pmod) return -1;
1267 
1268   if (base_addr   <= pmod->addr &&
1269       top_address > pmod->addr) {
1270     // if a buffer is provided, copy path name to the buffer
1271     if (pmod->full_path) {
1272       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1273     }
1274     pmod->base_addr = base_addr;
1275     return 1;
1276   }
1277   return 0;
1278 }
1279 
1280 bool os::dll_address_to_library_name(address addr, char* buf,
1281                                      int buflen, int* offset) {
1282   // buf is not optional, but offset is optional
1283   assert(buf != NULL, "sanity check");
1284 
1285 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1286 //       return the full path to the DLL file, sometimes it returns path
1287 //       to the corresponding PDB file (debug info); sometimes it only
1288 //       returns partial path, which makes life painful.
1289 
1290   struct _modinfo mi;
1291   mi.addr      = addr;
1292   mi.full_path = buf;
1293   mi.buflen    = buflen;
1294   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1295     // buf already contains path name
1296     if (offset) *offset = addr - mi.base_addr;
1297     return true;
1298   }
1299 
1300   buf[0] = '\0';
1301   if (offset) *offset = -1;
1302   return false;
1303 }
1304 
1305 bool os::dll_address_to_function_name(address addr, char *buf,
1306                                       int buflen, int *offset,
1307                                       bool demangle) {
1308   // buf is not optional, but offset is optional
1309   assert(buf != NULL, "sanity check");
1310 
1311   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1312     return true;
1313   }
1314   if (offset != NULL)  *offset  = -1;
1315   buf[0] = '\0';
1316   return false;
1317 }
1318 
1319 // save the start and end address of jvm.dll into param[0] and param[1]
1320 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1321                            address top_address, void * param) {
1322   if (!param) return -1;
1323 
1324   if (base_addr   <= (address)_locate_jvm_dll &&
1325       top_address > (address)_locate_jvm_dll) {
1326     ((address*)param)[0] = base_addr;
1327     ((address*)param)[1] = top_address;
1328     return 1;
1329   }
1330   return 0;
1331 }
1332 
1333 address vm_lib_location[2];    // start and end address of jvm.dll
1334 
1335 // check if addr is inside jvm.dll
1336 bool os::address_is_in_vm(address addr) {
1337   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1338     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1339       assert(false, "Can't find jvm module.");
1340       return false;
1341     }
1342   }
1343 
1344   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1345 }
1346 
1347 // print module info; param is outputStream*
1348 static int _print_module(const char* fname, address base_address,
1349                          address top_address, void* param) {
1350   if (!param) return -1;
1351 
1352   outputStream* st = (outputStream*)param;
1353 
1354   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1355   return 0;
1356 }
1357 
1358 // Loads .dll/.so and
1359 // in case of error it checks if .dll/.so was built for the
1360 // same architecture as Hotspot is running on
1361 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1362   log_info(os)("attempting shared library load of %s", name);
1363 
1364   void * result = LoadLibrary(name);
1365   if (result != NULL) {
1366     Events::log(NULL, "Loaded shared library %s", name);
1367     // Recalculate pdb search path if a DLL was loaded successfully.
1368     SymbolEngine::recalc_search_path();
1369     log_info(os)("shared library load of %s was successful", name);
1370     return result;
1371   }
1372   DWORD errcode = GetLastError();
1373   // Read system error message into ebuf
1374   // It may or may not be overwritten below (in the for loop and just above)
1375   lasterror(ebuf, (size_t) ebuflen);
1376   ebuf[ebuflen - 1] = '\0';
1377   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1378   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1379 
1380   if (errcode == ERROR_MOD_NOT_FOUND) {
1381     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1382     ebuf[ebuflen - 1] = '\0';
1383     return NULL;
1384   }
1385 
1386   // Parsing dll below
1387   // If we can read dll-info and find that dll was built
1388   // for an architecture other than Hotspot is running in
1389   // - then print to buffer "DLL was built for a different architecture"
1390   // else call os::lasterror to obtain system error message
1391   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1392   if (fd < 0) {
1393     return NULL;
1394   }
1395 
1396   uint32_t signature_offset;
1397   uint16_t lib_arch = 0;
1398   bool failed_to_get_lib_arch =
1399     ( // Go to position 3c in the dll
1400      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1401      ||
1402      // Read location of signature
1403      (sizeof(signature_offset) !=
1404      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1405      ||
1406      // Go to COFF File Header in dll
1407      // that is located after "signature" (4 bytes long)
1408      (os::seek_to_file_offset(fd,
1409      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1410      ||
1411      // Read field that contains code of architecture
1412      // that dll was built for
1413      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1414     );
1415 
1416   ::close(fd);
1417   if (failed_to_get_lib_arch) {
1418     // file i/o error - report os::lasterror(...) msg
1419     return NULL;
1420   }
1421 
1422   typedef struct {
1423     uint16_t arch_code;
1424     char* arch_name;
1425   } arch_t;
1426 
1427   static const arch_t arch_array[] = {
1428     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1429     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1430   };
1431 #if (defined _M_AMD64)
1432   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1433 #elif (defined _M_IX86)
1434   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1435 #else
1436   #error Method os::dll_load requires that one of following \
1437          is defined :_M_AMD64 or _M_IX86
1438 #endif
1439 
1440 
1441   // Obtain a string for printf operation
1442   // lib_arch_str shall contain string what platform this .dll was built for
1443   // running_arch_str shall string contain what platform Hotspot was built for
1444   char *running_arch_str = NULL, *lib_arch_str = NULL;
1445   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1446     if (lib_arch == arch_array[i].arch_code) {
1447       lib_arch_str = arch_array[i].arch_name;
1448     }
1449     if (running_arch == arch_array[i].arch_code) {
1450       running_arch_str = arch_array[i].arch_name;
1451     }
1452   }
1453 
1454   assert(running_arch_str,
1455          "Didn't find running architecture code in arch_array");
1456 
1457   // If the architecture is right
1458   // but some other error took place - report os::lasterror(...) msg
1459   if (lib_arch == running_arch) {
1460     return NULL;
1461   }
1462 
1463   if (lib_arch_str != NULL) {
1464     ::_snprintf(ebuf, ebuflen - 1,
1465                 "Can't load %s-bit .dll on a %s-bit platform",
1466                 lib_arch_str, running_arch_str);
1467   } else {
1468     // don't know what architecture this dll was build for
1469     ::_snprintf(ebuf, ebuflen - 1,
1470                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1471                 lib_arch, running_arch_str);
1472   }
1473 
1474   return NULL;
1475 }
1476 
1477 void os::print_dll_info(outputStream *st) {
1478   st->print_cr("Dynamic libraries:");
1479   get_loaded_modules_info(_print_module, (void *)st);
1480 }
1481 
1482 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1483   HANDLE   hProcess;
1484 
1485 # define MAX_NUM_MODULES 128
1486   HMODULE     modules[MAX_NUM_MODULES];
1487   static char filename[MAX_PATH];
1488   int         result = 0;
1489 
1490   int pid = os::current_process_id();
1491   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1492                          FALSE, pid);
1493   if (hProcess == NULL) return 0;
1494 
1495   DWORD size_needed;
1496   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1497     CloseHandle(hProcess);
1498     return 0;
1499   }
1500 
1501   // number of modules that are currently loaded
1502   int num_modules = size_needed / sizeof(HMODULE);
1503 
1504   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1505     // Get Full pathname:
1506     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1507       filename[0] = '\0';
1508     }
1509 
1510     MODULEINFO modinfo;
1511     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1512       modinfo.lpBaseOfDll = NULL;
1513       modinfo.SizeOfImage = 0;
1514     }
1515 
1516     // Invoke callback function
1517     result = callback(filename, (address)modinfo.lpBaseOfDll,
1518                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1519     if (result) break;
1520   }
1521 
1522   CloseHandle(hProcess);
1523   return result;
1524 }
1525 
1526 bool os::get_host_name(char* buf, size_t buflen) {
1527   DWORD size = (DWORD)buflen;
1528   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1529 }
1530 
1531 void os::get_summary_os_info(char* buf, size_t buflen) {
1532   stringStream sst(buf, buflen);
1533   os::win32::print_windows_version(&sst);
1534   // chop off newline character
1535   char* nl = strchr(buf, '\n');
1536   if (nl != NULL) *nl = '\0';
1537 }
1538 
1539 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1540 #if _MSC_VER >= 1900
1541   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1542   int result = ::vsnprintf(buf, len, fmt, args);
1543   // If an encoding error occurred (result < 0) then it's not clear
1544   // whether the buffer is NUL terminated, so ensure it is.
1545   if ((result < 0) && (len > 0)) {
1546     buf[len - 1] = '\0';
1547   }
1548   return result;
1549 #else
1550   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1551   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1552   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1553   // go straight to _vscprintf.  The output is going to be truncated in
1554   // that case, except in the unusual case of empty output.  More
1555   // importantly, the documentation for various versions of Visual Studio
1556   // are inconsistent about the behavior of _vsnprintf when len == 0,
1557   // including it possibly being an error.
1558   int result = -1;
1559   if (len > 0) {
1560     result = _vsnprintf(buf, len, fmt, args);
1561     // If output (including NUL terminator) is truncated, the buffer
1562     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1563     if ((result < 0) || ((size_t)result >= len)) {
1564       buf[len - 1] = '\0';
1565     }
1566   }
1567   if (result < 0) {
1568     result = _vscprintf(fmt, args);
1569   }
1570   return result;
1571 #endif // _MSC_VER dispatch
1572 }
1573 
1574 static inline time_t get_mtime(const char* filename) {
1575   struct stat st;
1576   int ret = os::stat(filename, &st);
1577   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1578   return st.st_mtime;
1579 }
1580 
1581 int os::compare_file_modified_times(const char* file1, const char* file2) {
1582   time_t t1 = get_mtime(file1);
1583   time_t t2 = get_mtime(file2);
1584   return t1 - t2;
1585 }
1586 
1587 void os::print_os_info_brief(outputStream* st) {
1588   os::print_os_info(st);
1589 }
1590 
1591 void os::win32::print_uptime_info(outputStream* st) {
1592   unsigned long long ticks = GetTickCount64();
1593   os::print_dhm(st, "OS uptime:", ticks/1000);
1594 }
1595 
1596 void os::print_os_info(outputStream* st) {
1597 #ifdef ASSERT
1598   char buffer[1024];
1599   st->print("HostName: ");
1600   if (get_host_name(buffer, sizeof(buffer))) {
1601     st->print("%s ", buffer);
1602   } else {
1603     st->print("N/A ");
1604   }
1605 #endif
1606   st->print_cr("OS:");
1607   os::win32::print_windows_version(st);
1608 
1609   os::win32::print_uptime_info(st);
1610 
1611 #ifdef _LP64
1612   VM_Version::print_platform_virtualization_info(st);
1613 #endif
1614 }
1615 
1616 void os::win32::print_windows_version(outputStream* st) {
1617   OSVERSIONINFOEX osvi;
1618   VS_FIXEDFILEINFO *file_info;
1619   TCHAR kernel32_path[MAX_PATH];
1620   UINT len, ret;
1621 
1622   // Use the GetVersionEx information to see if we're on a server or
1623   // workstation edition of Windows. Starting with Windows 8.1 we can't
1624   // trust the OS version information returned by this API.
1625   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1626   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1627   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1628     st->print_cr("Call to GetVersionEx failed");
1629     return;
1630   }
1631   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1632 
1633   // Get the full path to \Windows\System32\kernel32.dll and use that for
1634   // determining what version of Windows we're running on.
1635   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1636   ret = GetSystemDirectory(kernel32_path, len);
1637   if (ret == 0 || ret > len) {
1638     st->print_cr("Call to GetSystemDirectory failed");
1639     return;
1640   }
1641   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1642 
1643   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1644   if (version_size == 0) {
1645     st->print_cr("Call to GetFileVersionInfoSize failed");
1646     return;
1647   }
1648 
1649   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1650   if (version_info == NULL) {
1651     st->print_cr("Failed to allocate version_info");
1652     return;
1653   }
1654 
1655   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1656     os::free(version_info);
1657     st->print_cr("Call to GetFileVersionInfo failed");
1658     return;
1659   }
1660 
1661   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1662     os::free(version_info);
1663     st->print_cr("Call to VerQueryValue failed");
1664     return;
1665   }
1666 
1667   int major_version = HIWORD(file_info->dwProductVersionMS);
1668   int minor_version = LOWORD(file_info->dwProductVersionMS);
1669   int build_number = HIWORD(file_info->dwProductVersionLS);
1670   int build_minor = LOWORD(file_info->dwProductVersionLS);
1671   int os_vers = major_version * 1000 + minor_version;
1672   os::free(version_info);
1673 
1674   st->print(" Windows ");
1675   switch (os_vers) {
1676 
1677   case 6000:
1678     if (is_workstation) {
1679       st->print("Vista");
1680     } else {
1681       st->print("Server 2008");
1682     }
1683     break;
1684 
1685   case 6001:
1686     if (is_workstation) {
1687       st->print("7");
1688     } else {
1689       st->print("Server 2008 R2");
1690     }
1691     break;
1692 
1693   case 6002:
1694     if (is_workstation) {
1695       st->print("8");
1696     } else {
1697       st->print("Server 2012");
1698     }
1699     break;
1700 
1701   case 6003:
1702     if (is_workstation) {
1703       st->print("8.1");
1704     } else {
1705       st->print("Server 2012 R2");
1706     }
1707     break;
1708 
1709   case 10000:
1710     if (is_workstation) {
1711       st->print("10");
1712     } else {
1713       // distinguish Windows Server 2016 and 2019 by build number
1714       // Windows server 2019 GA 10/2018 build number is 17763
1715       if (build_number > 17762) {
1716         st->print("Server 2019");
1717       } else {
1718         st->print("Server 2016");
1719       }
1720     }
1721     break;
1722 
1723   default:
1724     // Unrecognized windows, print out its major and minor versions
1725     st->print("%d.%d", major_version, minor_version);
1726     break;
1727   }
1728 
1729   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1730   // find out whether we are running on 64 bit processor or not
1731   SYSTEM_INFO si;
1732   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1733   GetNativeSystemInfo(&si);
1734   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1735     st->print(" , 64 bit");
1736   }
1737 
1738   st->print(" Build %d", build_number);
1739   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1740   st->cr();
1741 }
1742 
1743 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1744   // Nothing to do for now.
1745 }
1746 
1747 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1748   HKEY key;
1749   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1750                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1751   if (status == ERROR_SUCCESS) {
1752     DWORD size = (DWORD)buflen;
1753     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1754     if (status != ERROR_SUCCESS) {
1755         strncpy(buf, "## __CPU__", buflen);
1756     }
1757     RegCloseKey(key);
1758   } else {
1759     // Put generic cpu info to return
1760     strncpy(buf, "## __CPU__", buflen);
1761   }
1762 }
1763 
1764 void os::print_memory_info(outputStream* st) {
1765   st->print("Memory:");
1766   st->print(" %dk page", os::vm_page_size()>>10);
1767 
1768   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1769   // value if total memory is larger than 4GB
1770   MEMORYSTATUSEX ms;
1771   ms.dwLength = sizeof(ms);
1772   int r1 = GlobalMemoryStatusEx(&ms);
1773 
1774   if (r1 != 0) {
1775     st->print(", system-wide physical " INT64_FORMAT "M ",
1776              (int64_t) ms.ullTotalPhys >> 20);
1777     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1778 
1779     st->print("TotalPageFile size " INT64_FORMAT "M ",
1780              (int64_t) ms.ullTotalPageFile >> 20);
1781     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1782              (int64_t) ms.ullAvailPageFile >> 20);
1783 
1784     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1785 #if defined(_M_IX86)
1786     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1787              (int64_t) ms.ullTotalVirtual >> 20);
1788     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1789 #endif
1790   } else {
1791     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1792   }
1793 
1794   // extended memory statistics for a process
1795   PROCESS_MEMORY_COUNTERS_EX pmex;
1796   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1797   pmex.cb = sizeof(pmex);
1798   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1799 
1800   if (r2 != 0) {
1801     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1802              (int64_t) pmex.WorkingSetSize >> 20);
1803     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1804 
1805     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1806              (int64_t) pmex.PrivateUsage >> 20);
1807     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1808   } else {
1809     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1810   }
1811 
1812   st->cr();
1813 }
1814 
1815 bool os::signal_sent_by_kill(const void* siginfo) {
1816   // TODO: Is this possible?
1817   return false;
1818 }
1819 
1820 void os::print_siginfo(outputStream *st, const void* siginfo) {
1821   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1822   st->print("siginfo:");
1823 
1824   char tmp[64];
1825   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1826     strcpy(tmp, "EXCEPTION_??");
1827   }
1828   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1829 
1830   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1831        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1832        er->NumberParameters >= 2) {
1833     switch (er->ExceptionInformation[0]) {
1834     case 0: st->print(", reading address"); break;
1835     case 1: st->print(", writing address"); break;
1836     case 8: st->print(", data execution prevention violation at address"); break;
1837     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1838                        er->ExceptionInformation[0]);
1839     }
1840     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1841   } else {
1842     int num = er->NumberParameters;
1843     if (num > 0) {
1844       st->print(", ExceptionInformation=");
1845       for (int i = 0; i < num; i++) {
1846         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1847       }
1848     }
1849   }
1850   st->cr();
1851 }
1852 
1853 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1854   // TODO: Can we kill thread?
1855   return false;
1856 }
1857 
1858 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1859   // do nothing
1860 }
1861 
1862 static char saved_jvm_path[MAX_PATH] = {0};
1863 
1864 // Find the full path to the current module, jvm.dll
1865 void os::jvm_path(char *buf, jint buflen) {
1866   // Error checking.
1867   if (buflen < MAX_PATH) {
1868     assert(false, "must use a large-enough buffer");
1869     buf[0] = '\0';
1870     return;
1871   }
1872   // Lazy resolve the path to current module.
1873   if (saved_jvm_path[0] != 0) {
1874     strcpy(buf, saved_jvm_path);
1875     return;
1876   }
1877 
1878   buf[0] = '\0';
1879   if (Arguments::sun_java_launcher_is_altjvm()) {
1880     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1881     // for a JAVA_HOME environment variable and fix up the path so it
1882     // looks like jvm.dll is installed there (append a fake suffix
1883     // hotspot/jvm.dll).
1884     char* java_home_var = ::getenv("JAVA_HOME");
1885     if (java_home_var != NULL && java_home_var[0] != 0 &&
1886         strlen(java_home_var) < (size_t)buflen) {
1887       strncpy(buf, java_home_var, buflen);
1888 
1889       // determine if this is a legacy image or modules image
1890       // modules image doesn't have "jre" subdirectory
1891       size_t len = strlen(buf);
1892       char* jrebin_p = buf + len;
1893       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1894       if (0 != _access(buf, 0)) {
1895         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1896       }
1897       len = strlen(buf);
1898       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1899     }
1900   }
1901 
1902   if (buf[0] == '\0') {
1903     GetModuleFileName(vm_lib_handle, buf, buflen);
1904   }
1905   strncpy(saved_jvm_path, buf, MAX_PATH);
1906   saved_jvm_path[MAX_PATH - 1] = '\0';
1907 }
1908 
1909 
1910 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1911 #ifndef _WIN64
1912   st->print("_");
1913 #endif
1914 }
1915 
1916 
1917 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1918 #ifndef _WIN64
1919   st->print("@%d", args_size  * sizeof(int));
1920 #endif
1921 }
1922 
1923 // This method is a copy of JDK's sysGetLastErrorString
1924 // from src/windows/hpi/src/system_md.c
1925 
1926 size_t os::lasterror(char* buf, size_t len) {
1927   DWORD errval;
1928 
1929   if ((errval = GetLastError()) != 0) {
1930     // DOS error
1931     size_t n = (size_t)FormatMessage(
1932                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1933                                      NULL,
1934                                      errval,
1935                                      0,
1936                                      buf,
1937                                      (DWORD)len,
1938                                      NULL);
1939     if (n > 3) {
1940       // Drop final '.', CR, LF
1941       if (buf[n - 1] == '\n') n--;
1942       if (buf[n - 1] == '\r') n--;
1943       if (buf[n - 1] == '.') n--;
1944       buf[n] = '\0';
1945     }
1946     return n;
1947   }
1948 
1949   if (errno != 0) {
1950     // C runtime error that has no corresponding DOS error code
1951     const char* s = os::strerror(errno);
1952     size_t n = strlen(s);
1953     if (n >= len) n = len - 1;
1954     strncpy(buf, s, n);
1955     buf[n] = '\0';
1956     return n;
1957   }
1958 
1959   return 0;
1960 }
1961 
1962 int os::get_last_error() {
1963   DWORD error = GetLastError();
1964   if (error == 0) {
1965     error = errno;
1966   }
1967   return (int)error;
1968 }
1969 
1970 // sun.misc.Signal
1971 // NOTE that this is a workaround for an apparent kernel bug where if
1972 // a signal handler for SIGBREAK is installed then that signal handler
1973 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1974 // See bug 4416763.
1975 static void (*sigbreakHandler)(int) = NULL;
1976 
1977 static void UserHandler(int sig, void *siginfo, void *context) {
1978   os::signal_notify(sig);
1979   // We need to reinstate the signal handler each time...
1980   os::signal(sig, (void*)UserHandler);
1981 }
1982 
1983 void* os::user_handler() {
1984   return (void*) UserHandler;
1985 }
1986 
1987 void* os::signal(int signal_number, void* handler) {
1988   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1989     void (*oldHandler)(int) = sigbreakHandler;
1990     sigbreakHandler = (void (*)(int)) handler;
1991     return (void*) oldHandler;
1992   } else {
1993     return (void*)::signal(signal_number, (void (*)(int))handler);
1994   }
1995 }
1996 
1997 void os::signal_raise(int signal_number) {
1998   raise(signal_number);
1999 }
2000 
2001 // The Win32 C runtime library maps all console control events other than ^C
2002 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2003 // logoff, and shutdown events.  We therefore install our own console handler
2004 // that raises SIGTERM for the latter cases.
2005 //
2006 static BOOL WINAPI consoleHandler(DWORD event) {
2007   switch (event) {
2008   case CTRL_C_EVENT:
2009     if (VMError::is_error_reported()) {
2010       // Ctrl-C is pressed during error reporting, likely because the error
2011       // handler fails to abort. Let VM die immediately.
2012       os::die();
2013     }
2014 
2015     os::signal_raise(SIGINT);
2016     return TRUE;
2017     break;
2018   case CTRL_BREAK_EVENT:
2019     if (sigbreakHandler != NULL) {
2020       (*sigbreakHandler)(SIGBREAK);
2021     }
2022     return TRUE;
2023     break;
2024   case CTRL_LOGOFF_EVENT: {
2025     // Don't terminate JVM if it is running in a non-interactive session,
2026     // such as a service process.
2027     USEROBJECTFLAGS flags;
2028     HANDLE handle = GetProcessWindowStation();
2029     if (handle != NULL &&
2030         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2031         sizeof(USEROBJECTFLAGS), NULL)) {
2032       // If it is a non-interactive session, let next handler to deal
2033       // with it.
2034       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2035         return FALSE;
2036       }
2037     }
2038   }
2039   case CTRL_CLOSE_EVENT:
2040   case CTRL_SHUTDOWN_EVENT:
2041     os::signal_raise(SIGTERM);
2042     return TRUE;
2043     break;
2044   default:
2045     break;
2046   }
2047   return FALSE;
2048 }
2049 
2050 // The following code is moved from os.cpp for making this
2051 // code platform specific, which it is by its very nature.
2052 
2053 // Return maximum OS signal used + 1 for internal use only
2054 // Used as exit signal for signal_thread
2055 int os::sigexitnum_pd() {
2056   return NSIG;
2057 }
2058 
2059 // a counter for each possible signal value, including signal_thread exit signal
2060 static volatile jint pending_signals[NSIG+1] = { 0 };
2061 static Semaphore* sig_sem = NULL;
2062 
2063 static void jdk_misc_signal_init() {
2064   // Initialize signal structures
2065   memset((void*)pending_signals, 0, sizeof(pending_signals));
2066 
2067   // Initialize signal semaphore
2068   sig_sem = new Semaphore();
2069 
2070   // Programs embedding the VM do not want it to attempt to receive
2071   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2072   // shutdown hooks mechanism introduced in 1.3.  For example, when
2073   // the VM is run as part of a Windows NT service (i.e., a servlet
2074   // engine in a web server), the correct behavior is for any console
2075   // control handler to return FALSE, not TRUE, because the OS's
2076   // "final" handler for such events allows the process to continue if
2077   // it is a service (while terminating it if it is not a service).
2078   // To make this behavior uniform and the mechanism simpler, we
2079   // completely disable the VM's usage of these console events if -Xrs
2080   // (=ReduceSignalUsage) is specified.  This means, for example, that
2081   // the CTRL-BREAK thread dump mechanism is also disabled in this
2082   // case.  See bugs 4323062, 4345157, and related bugs.
2083 
2084   // Add a CTRL-C handler
2085   SetConsoleCtrlHandler(consoleHandler, TRUE);
2086 }
2087 
2088 void os::signal_notify(int sig) {
2089   if (sig_sem != NULL) {
2090     Atomic::inc(&pending_signals[sig]);
2091     sig_sem->signal();
2092   } else {
2093     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2094     // initialization isn't called.
2095     assert(ReduceSignalUsage, "signal semaphore should be created");
2096   }
2097 }
2098 
2099 static int check_pending_signals() {
2100   while (true) {
2101     for (int i = 0; i < NSIG + 1; i++) {
2102       jint n = pending_signals[i];
2103       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2104         return i;
2105       }
2106     }
2107     JavaThread *thread = JavaThread::current();
2108 
2109     ThreadBlockInVM tbivm(thread);
2110 
2111     bool threadIsSuspended;
2112     do {
2113       thread->set_suspend_equivalent();
2114       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2115       sig_sem->wait();
2116 
2117       // were we externally suspended while we were waiting?
2118       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2119       if (threadIsSuspended) {
2120         // The semaphore has been incremented, but while we were waiting
2121         // another thread suspended us. We don't want to continue running
2122         // while suspended because that would surprise the thread that
2123         // suspended us.
2124         sig_sem->signal();
2125 
2126         thread->java_suspend_self();
2127       }
2128     } while (threadIsSuspended);
2129   }
2130 }
2131 
2132 int os::signal_wait() {
2133   return check_pending_signals();
2134 }
2135 
2136 // Implicit OS exception handling
2137 
2138 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2139                       address handler) {
2140   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2141   // Save pc in thread
2142 #ifdef _M_AMD64
2143   // Do not blow up if no thread info available.
2144   if (thread) {
2145     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2146   }
2147   // Set pc to handler
2148   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2149 #else
2150   // Do not blow up if no thread info available.
2151   if (thread) {
2152     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2153   }
2154   // Set pc to handler
2155   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2156 #endif
2157 
2158   // Continue the execution
2159   return EXCEPTION_CONTINUE_EXECUTION;
2160 }
2161 
2162 
2163 // Used for PostMortemDump
2164 extern "C" void safepoints();
2165 extern "C" void find(int x);
2166 extern "C" void events();
2167 
2168 // According to Windows API documentation, an illegal instruction sequence should generate
2169 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2170 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2171 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2172 
2173 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2174 
2175 // From "Execution Protection in the Windows Operating System" draft 0.35
2176 // Once a system header becomes available, the "real" define should be
2177 // included or copied here.
2178 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2179 
2180 // Windows Vista/2008 heap corruption check
2181 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2182 
2183 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2184 // C++ compiler contain this error code. Because this is a compiler-generated
2185 // error, the code is not listed in the Win32 API header files.
2186 // The code is actually a cryptic mnemonic device, with the initial "E"
2187 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2188 // ASCII values of "msc".
2189 
2190 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2191 
2192 #define def_excpt(val) { #val, (val) }
2193 
2194 static const struct { const char* name; uint number; } exceptlabels[] = {
2195     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2196     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2197     def_excpt(EXCEPTION_BREAKPOINT),
2198     def_excpt(EXCEPTION_SINGLE_STEP),
2199     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2200     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2201     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2202     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2203     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2204     def_excpt(EXCEPTION_FLT_OVERFLOW),
2205     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2206     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2207     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2208     def_excpt(EXCEPTION_INT_OVERFLOW),
2209     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2210     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2211     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2212     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2213     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2214     def_excpt(EXCEPTION_STACK_OVERFLOW),
2215     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2216     def_excpt(EXCEPTION_GUARD_PAGE),
2217     def_excpt(EXCEPTION_INVALID_HANDLE),
2218     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2219     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2220 };
2221 
2222 #undef def_excpt
2223 
2224 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2225   uint code = static_cast<uint>(exception_code);
2226   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2227     if (exceptlabels[i].number == code) {
2228       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2229       return buf;
2230     }
2231   }
2232 
2233   return NULL;
2234 }
2235 
2236 //-----------------------------------------------------------------------------
2237 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2238   // handle exception caused by idiv; should only happen for -MinInt/-1
2239   // (division by zero is handled explicitly)
2240 #ifdef  _M_AMD64
2241   PCONTEXT ctx = exceptionInfo->ContextRecord;
2242   address pc = (address)ctx->Rip;
2243   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2244   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2245   if (pc[0] == 0xF7) {
2246     // set correct result values and continue after idiv instruction
2247     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2248   } else {
2249     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2250   }
2251   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2252   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2253   // idiv opcode (0xF7).
2254   ctx->Rdx = (DWORD)0;             // remainder
2255   // Continue the execution
2256 #else
2257   PCONTEXT ctx = exceptionInfo->ContextRecord;
2258   address pc = (address)ctx->Eip;
2259   assert(pc[0] == 0xF7, "not an idiv opcode");
2260   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2261   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2262   // set correct result values and continue after idiv instruction
2263   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2264   ctx->Eax = (DWORD)min_jint;      // result
2265   ctx->Edx = (DWORD)0;             // remainder
2266   // Continue the execution
2267 #endif
2268   return EXCEPTION_CONTINUE_EXECUTION;
2269 }
2270 
2271 //-----------------------------------------------------------------------------
2272 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2273   PCONTEXT ctx = exceptionInfo->ContextRecord;
2274 #ifndef  _WIN64
2275   // handle exception caused by native method modifying control word
2276   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2277 
2278   switch (exception_code) {
2279   case EXCEPTION_FLT_DENORMAL_OPERAND:
2280   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2281   case EXCEPTION_FLT_INEXACT_RESULT:
2282   case EXCEPTION_FLT_INVALID_OPERATION:
2283   case EXCEPTION_FLT_OVERFLOW:
2284   case EXCEPTION_FLT_STACK_CHECK:
2285   case EXCEPTION_FLT_UNDERFLOW:
2286     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2287     if (fp_control_word != ctx->FloatSave.ControlWord) {
2288       // Restore FPCW and mask out FLT exceptions
2289       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2290       // Mask out pending FLT exceptions
2291       ctx->FloatSave.StatusWord &=  0xffffff00;
2292       return EXCEPTION_CONTINUE_EXECUTION;
2293     }
2294   }
2295 
2296   if (prev_uef_handler != NULL) {
2297     // We didn't handle this exception so pass it to the previous
2298     // UnhandledExceptionFilter.
2299     return (prev_uef_handler)(exceptionInfo);
2300   }
2301 #else // !_WIN64
2302   // On Windows, the mxcsr control bits are non-volatile across calls
2303   // See also CR 6192333
2304   //
2305   jint MxCsr = INITIAL_MXCSR;
2306   // we can't use StubRoutines::addr_mxcsr_std()
2307   // because in Win64 mxcsr is not saved there
2308   if (MxCsr != ctx->MxCsr) {
2309     ctx->MxCsr = MxCsr;
2310     return EXCEPTION_CONTINUE_EXECUTION;
2311   }
2312 #endif // !_WIN64
2313 
2314   return EXCEPTION_CONTINUE_SEARCH;
2315 }
2316 
2317 static inline void report_error(Thread* t, DWORD exception_code,
2318                                 address addr, void* siginfo, void* context) {
2319   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2320 
2321   // If UseOsErrorReporting, this will return here and save the error file
2322   // somewhere where we can find it in the minidump.
2323 }
2324 
2325 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2326         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2327   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2328   address addr = (address) exceptionRecord->ExceptionInformation[1];
2329   if (Interpreter::contains(pc)) {
2330     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2331     if (!fr->is_first_java_frame()) {
2332       // get_frame_at_stack_banging_point() is only called when we
2333       // have well defined stacks so java_sender() calls do not need
2334       // to assert safe_for_sender() first.
2335       *fr = fr->java_sender();
2336     }
2337   } else {
2338     // more complex code with compiled code
2339     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2340     CodeBlob* cb = CodeCache::find_blob(pc);
2341     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2342       // Not sure where the pc points to, fallback to default
2343       // stack overflow handling
2344       return false;
2345     } else {
2346       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2347       // in compiled code, the stack banging is performed just after the return pc
2348       // has been pushed on the stack
2349       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2350       if (!fr->is_java_frame()) {
2351         // See java_sender() comment above.
2352         *fr = fr->java_sender();
2353       }
2354     }
2355   }
2356   assert(fr->is_java_frame(), "Safety check");
2357   return true;
2358 }
2359 
2360 #if INCLUDE_AOT
2361 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2362   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2363   address addr = (address) exceptionRecord->ExceptionInformation[1];
2364   address pc = (address) exceptionInfo->ContextRecord->Rip;
2365 
2366   // Handle the case where we get an implicit exception in AOT generated
2367   // code.  AOT DLL's loaded are not registered for structured exceptions.
2368   // If the exception occurred in the codeCache or AOT code, pass control
2369   // to our normal exception handler.
2370   CodeBlob* cb = CodeCache::find_blob(pc);
2371   if (cb != NULL) {
2372     return topLevelExceptionFilter(exceptionInfo);
2373   }
2374 
2375   return EXCEPTION_CONTINUE_SEARCH;
2376 }
2377 #endif
2378 
2379 //-----------------------------------------------------------------------------
2380 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2381   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2382   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2383 #ifdef _M_AMD64
2384   address pc = (address) exceptionInfo->ContextRecord->Rip;
2385 #else
2386   address pc = (address) exceptionInfo->ContextRecord->Eip;
2387 #endif
2388   Thread* t = Thread::current_or_null_safe();
2389 
2390   // Handle SafeFetch32 and SafeFetchN exceptions.
2391   if (StubRoutines::is_safefetch_fault(pc)) {
2392     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2393   }
2394 
2395 #ifndef _WIN64
2396   // Execution protection violation - win32 running on AMD64 only
2397   // Handled first to avoid misdiagnosis as a "normal" access violation;
2398   // This is safe to do because we have a new/unique ExceptionInformation
2399   // code for this condition.
2400   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2401     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2402     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2403     address addr = (address) exceptionRecord->ExceptionInformation[1];
2404 
2405     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2406       int page_size = os::vm_page_size();
2407 
2408       // Make sure the pc and the faulting address are sane.
2409       //
2410       // If an instruction spans a page boundary, and the page containing
2411       // the beginning of the instruction is executable but the following
2412       // page is not, the pc and the faulting address might be slightly
2413       // different - we still want to unguard the 2nd page in this case.
2414       //
2415       // 15 bytes seems to be a (very) safe value for max instruction size.
2416       bool pc_is_near_addr =
2417         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2418       bool instr_spans_page_boundary =
2419         (align_down((intptr_t) pc ^ (intptr_t) addr,
2420                          (intptr_t) page_size) > 0);
2421 
2422       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2423         static volatile address last_addr =
2424           (address) os::non_memory_address_word();
2425 
2426         // In conservative mode, don't unguard unless the address is in the VM
2427         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2428             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2429 
2430           // Set memory to RWX and retry
2431           address page_start = align_down(addr, page_size);
2432           bool res = os::protect_memory((char*) page_start, page_size,
2433                                         os::MEM_PROT_RWX);
2434 
2435           log_debug(os)("Execution protection violation "
2436                         "at " INTPTR_FORMAT
2437                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2438                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2439 
2440           // Set last_addr so if we fault again at the same address, we don't
2441           // end up in an endless loop.
2442           //
2443           // There are two potential complications here.  Two threads trapping
2444           // at the same address at the same time could cause one of the
2445           // threads to think it already unguarded, and abort the VM.  Likely
2446           // very rare.
2447           //
2448           // The other race involves two threads alternately trapping at
2449           // different addresses and failing to unguard the page, resulting in
2450           // an endless loop.  This condition is probably even more unlikely
2451           // than the first.
2452           //
2453           // Although both cases could be avoided by using locks or thread
2454           // local last_addr, these solutions are unnecessary complication:
2455           // this handler is a best-effort safety net, not a complete solution.
2456           // It is disabled by default and should only be used as a workaround
2457           // in case we missed any no-execute-unsafe VM code.
2458 
2459           last_addr = addr;
2460 
2461           return EXCEPTION_CONTINUE_EXECUTION;
2462         }
2463       }
2464 
2465       // Last unguard failed or not unguarding
2466       tty->print_raw_cr("Execution protection violation");
2467       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2468                    exceptionInfo->ContextRecord);
2469       return EXCEPTION_CONTINUE_SEARCH;
2470     }
2471   }
2472 #endif // _WIN64
2473 
2474   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2475       VM_Version::is_cpuinfo_segv_addr(pc)) {
2476     // Verify that OS save/restore AVX registers.
2477     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2478   }
2479 
2480   if (t != NULL && t->is_Java_thread()) {
2481     JavaThread* thread = (JavaThread*) t;
2482     bool in_java = thread->thread_state() == _thread_in_Java;
2483 
2484     // Handle potential stack overflows up front.
2485     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2486       if (thread->stack_guards_enabled()) {
2487         if (in_java) {
2488           frame fr;
2489           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2490           address addr = (address) exceptionRecord->ExceptionInformation[1];
2491           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2492             assert(fr.is_java_frame(), "Must be a Java frame");
2493             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2494           }
2495         }
2496         // Yellow zone violation.  The o/s has unprotected the first yellow
2497         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2498         // update the enabled status, even if the zone contains only one page.
2499         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2500         thread->disable_stack_yellow_reserved_zone();
2501         // If not in java code, return and hope for the best.
2502         return in_java
2503             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2504             :  EXCEPTION_CONTINUE_EXECUTION;
2505       } else {
2506         // Fatal red zone violation.
2507         thread->disable_stack_red_zone();
2508         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2509         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2510                       exceptionInfo->ContextRecord);
2511         return EXCEPTION_CONTINUE_SEARCH;
2512       }
2513     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2514       // Either stack overflow or null pointer exception.
2515       if (in_java) {
2516         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2517         address addr = (address) exceptionRecord->ExceptionInformation[1];
2518         address stack_end = thread->stack_end();
2519         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2520           // Stack overflow.
2521           assert(!os::uses_stack_guard_pages(),
2522                  "should be caught by red zone code above.");
2523           return Handle_Exception(exceptionInfo,
2524                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2525         }
2526         // Check for safepoint polling and implicit null
2527         // We only expect null pointers in the stubs (vtable)
2528         // the rest are checked explicitly now.
2529         CodeBlob* cb = CodeCache::find_blob(pc);
2530         if (cb != NULL) {
2531           if (SafepointMechanism::is_poll_address(addr)) {
2532             address stub = SharedRuntime::get_poll_stub(pc);
2533             return Handle_Exception(exceptionInfo, stub);
2534           }
2535         }
2536         {
2537 #ifdef _WIN64
2538           // If it's a legal stack address map the entire region in
2539           //
2540           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2541           address addr = (address) exceptionRecord->ExceptionInformation[1];
2542           if (thread->is_in_usable_stack(addr)) {
2543             addr = (address)((uintptr_t)addr &
2544                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2545             os::commit_memory((char *)addr, thread->stack_base() - addr,
2546                               !ExecMem);
2547             return EXCEPTION_CONTINUE_EXECUTION;
2548           } else
2549 #endif
2550           {
2551             // Null pointer exception.
2552             if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2553               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2554               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2555             }
2556             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2557                          exceptionInfo->ContextRecord);
2558             return EXCEPTION_CONTINUE_SEARCH;
2559           }
2560         }
2561       }
2562 
2563 #ifdef _WIN64
2564       // Special care for fast JNI field accessors.
2565       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2566       // in and the heap gets shrunk before the field access.
2567       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2568         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2569         if (addr != (address)-1) {
2570           return Handle_Exception(exceptionInfo, addr);
2571         }
2572       }
2573 #endif
2574 
2575       // Stack overflow or null pointer exception in native code.
2576       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2577                    exceptionInfo->ContextRecord);
2578       return EXCEPTION_CONTINUE_SEARCH;
2579     } // /EXCEPTION_ACCESS_VIOLATION
2580     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2581 
2582     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2583       CompiledMethod* nm = NULL;
2584       JavaThread* thread = (JavaThread*)t;
2585       if (in_java) {
2586         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2587         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2588       }
2589 
2590       bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2591       if (((thread->thread_state() == _thread_in_vm ||
2592            thread->thread_state() == _thread_in_native ||
2593            is_unsafe_arraycopy) &&
2594           thread->doing_unsafe_access()) ||
2595           (nm != NULL && nm->has_unsafe_access())) {
2596         address next_pc =  Assembler::locate_next_instruction(pc);
2597         if (is_unsafe_arraycopy) {
2598           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2599         }
2600         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2601       }
2602     }
2603 
2604     if (in_java) {
2605       switch (exception_code) {
2606       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2607         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2608 
2609       case EXCEPTION_INT_OVERFLOW:
2610         return Handle_IDiv_Exception(exceptionInfo);
2611 
2612       } // switch
2613     }
2614     if (((thread->thread_state() == _thread_in_Java) ||
2615          (thread->thread_state() == _thread_in_native)) &&
2616          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2617       LONG result=Handle_FLT_Exception(exceptionInfo);
2618       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2619     }
2620   }
2621 
2622   if (exception_code != EXCEPTION_BREAKPOINT) {
2623     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2624                  exceptionInfo->ContextRecord);
2625   }
2626   return EXCEPTION_CONTINUE_SEARCH;
2627 }
2628 
2629 #ifndef _WIN64
2630 // Special care for fast JNI accessors.
2631 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2632 // the heap gets shrunk before the field access.
2633 // Need to install our own structured exception handler since native code may
2634 // install its own.
2635 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2636   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2637   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2638     address pc = (address) exceptionInfo->ContextRecord->Eip;
2639     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2640     if (addr != (address)-1) {
2641       return Handle_Exception(exceptionInfo, addr);
2642     }
2643   }
2644   return EXCEPTION_CONTINUE_SEARCH;
2645 }
2646 
2647 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2648   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2649                                                      jobject obj,           \
2650                                                      jfieldID fieldID) {    \
2651     __try {                                                                 \
2652       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2653                                                                  obj,       \
2654                                                                  fieldID);  \
2655     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2656                                               _exception_info())) {         \
2657     }                                                                       \
2658     return 0;                                                               \
2659   }
2660 
2661 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2662 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2663 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2664 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2665 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2666 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2667 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2668 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2669 
2670 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2671   switch (type) {
2672   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2673   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2674   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2675   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2676   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2677   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2678   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2679   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2680   default:        ShouldNotReachHere();
2681   }
2682   return (address)-1;
2683 }
2684 #endif
2685 
2686 // Virtual Memory
2687 
2688 int os::vm_page_size() { return os::win32::vm_page_size(); }
2689 int os::vm_allocation_granularity() {
2690   return os::win32::vm_allocation_granularity();
2691 }
2692 
2693 // Windows large page support is available on Windows 2003. In order to use
2694 // large page memory, the administrator must first assign additional privilege
2695 // to the user:
2696 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2697 //   + select Local Policies -> User Rights Assignment
2698 //   + double click "Lock pages in memory", add users and/or groups
2699 //   + reboot
2700 // Note the above steps are needed for administrator as well, as administrators
2701 // by default do not have the privilege to lock pages in memory.
2702 //
2703 // Note about Windows 2003: although the API supports committing large page
2704 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2705 // scenario, I found through experiment it only uses large page if the entire
2706 // memory region is reserved and committed in a single VirtualAlloc() call.
2707 // This makes Windows large page support more or less like Solaris ISM, in
2708 // that the entire heap must be committed upfront. This probably will change
2709 // in the future, if so the code below needs to be revisited.
2710 
2711 #ifndef MEM_LARGE_PAGES
2712   #define MEM_LARGE_PAGES 0x20000000
2713 #endif
2714 
2715 #define VirtualFreeChecked(mem, size, type)                       \
2716   do {                                                            \
2717     bool ret = VirtualFree(mem, size, type);                      \
2718     assert(ret, "Failed to free memory: " PTR_FORMAT, p2i(mem));  \
2719   } while (false)
2720 
2721 // The number of bytes is setup to match 1 pixel and 32 bits per pixel.
2722 static const int gdi_tiny_bitmap_width_bytes = 4;
2723 
2724 static HBITMAP gdi_create_tiny_bitmap(void* mem) {
2725   // The documentation for CreateBitmap states a word-alignment requirement.
2726   STATIC_ASSERT(is_aligned_(gdi_tiny_bitmap_width_bytes, sizeof(WORD)));
2727 
2728   // Some callers use this function to test if memory crossing separate memory
2729   // reservations can be used. Create a height of 2 to make sure that one pixel
2730   // ends up in the first reservation and the other in the second.
2731   int nHeight = 2;
2732 
2733   assert(is_aligned(mem, gdi_tiny_bitmap_width_bytes), "Incorrect alignment");
2734 
2735   // Width is one pixel and correlates with gdi_tiny_bitmap_width_bytes.
2736   int nWidth = 1;
2737 
2738   // Calculate bit count - will be 32.
2739   UINT nBitCount = gdi_tiny_bitmap_width_bytes / nWidth * BitsPerByte;
2740 
2741   return CreateBitmap(
2742       nWidth,
2743       nHeight,
2744       1,         // nPlanes
2745       nBitCount,
2746       mem);      // lpBits
2747 }
2748 
2749 // It has been found that some of the GDI functions fail under these two situations:
2750 //  1) When used with large pages
2751 //  2) When mem crosses the boundary between two separate memory reservations.
2752 //
2753 // This is a small test used to see if the current GDI implementation is
2754 // susceptible to any of these problems.
2755 static bool gdi_can_use_memory(void* mem) {
2756   HBITMAP bitmap = gdi_create_tiny_bitmap(mem);
2757   if (bitmap != NULL) {
2758     DeleteObject(bitmap);
2759     return true;
2760   }
2761 
2762   // Verify that the bitmap could be created with a normal page.
2763   // If this fails, the testing method above isn't reliable.
2764 #ifdef ASSERT
2765   void* verify_mem = ::malloc(4 * 1024);
2766   HBITMAP verify_bitmap = gdi_create_tiny_bitmap(verify_mem);
2767   if (verify_bitmap == NULL) {
2768     fatal("Couldn't create test bitmap with malloced memory");
2769   } else {
2770     DeleteObject(verify_bitmap);
2771   }
2772   ::free(verify_mem);
2773 #endif
2774 
2775   return false;
2776 }
2777 
2778 // Test if GDI functions work when memory spans
2779 // two adjacent memory reservations.
2780 static bool gdi_can_use_split_reservation_memory(bool use_large_pages, size_t granule) {
2781   DWORD mem_large_pages = use_large_pages ? MEM_LARGE_PAGES : 0;
2782 
2783   // Find virtual memory range. Two granules for regions and one for alignment.
2784   void* reserved = VirtualAlloc(NULL,
2785                                 granule * 3,
2786                                 MEM_RESERVE,
2787                                 PAGE_NOACCESS);
2788   if (reserved == NULL) {
2789     // Can't proceed with test - pessimistically report false
2790     return false;
2791   }
2792   VirtualFreeChecked(reserved, 0, MEM_RELEASE);
2793 
2794   // Ensure proper alignment
2795   void* res0 = align_up(reserved, granule);
2796   void* res1 = (char*)res0 + granule;
2797 
2798   // Reserve and commit the first part
2799   void* mem0 = VirtualAlloc(res0,
2800                             granule,
2801                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2802                             PAGE_READWRITE);
2803   if (mem0 != res0) {
2804     // Can't proceed with test - pessimistically report false
2805     return false;
2806   }
2807 
2808   // Reserve and commit the second part
2809   void* mem1 = VirtualAlloc(res1,
2810                             granule,
2811                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2812                             PAGE_READWRITE);
2813   if (mem1 != res1) {
2814     VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2815     // Can't proceed with test - pessimistically report false
2816     return false;
2817   }
2818 
2819   // Set the bitmap's bits to point one "width" bytes before, so that
2820   // the bitmap extends across the reservation boundary.
2821   void* bitmapBits = (char*)mem1 - gdi_tiny_bitmap_width_bytes;
2822 
2823   bool success = gdi_can_use_memory(bitmapBits);
2824 
2825   VirtualFreeChecked(mem1, 0, MEM_RELEASE);
2826   VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2827 
2828   return success;
2829 }
2830 
2831 // Container for NUMA node list info
2832 class NUMANodeListHolder {
2833  private:
2834   int *_numa_used_node_list;  // allocated below
2835   int _numa_used_node_count;
2836 
2837   void free_node_list() {
2838     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2839   }
2840 
2841  public:
2842   NUMANodeListHolder() {
2843     _numa_used_node_count = 0;
2844     _numa_used_node_list = NULL;
2845     // do rest of initialization in build routine (after function pointers are set up)
2846   }
2847 
2848   ~NUMANodeListHolder() {
2849     free_node_list();
2850   }
2851 
2852   bool build() {
2853     DWORD_PTR proc_aff_mask;
2854     DWORD_PTR sys_aff_mask;
2855     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2856     ULONG highest_node_number;
2857     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2858     free_node_list();
2859     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2860     for (unsigned int i = 0; i <= highest_node_number; i++) {
2861       ULONGLONG proc_mask_numa_node;
2862       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2863       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2864         _numa_used_node_list[_numa_used_node_count++] = i;
2865       }
2866     }
2867     return (_numa_used_node_count > 1);
2868   }
2869 
2870   int get_count() { return _numa_used_node_count; }
2871   int get_node_list_entry(int n) {
2872     // for indexes out of range, returns -1
2873     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2874   }
2875 
2876 } numa_node_list_holder;
2877 
2878 static size_t _large_page_size = 0;
2879 
2880 static bool request_lock_memory_privilege() {
2881   HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2882                                 os::current_process_id());
2883 
2884   bool success = false;
2885   HANDLE hToken = NULL;
2886   LUID luid;
2887   if (hProcess != NULL &&
2888       OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
2889       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2890 
2891     TOKEN_PRIVILEGES tp;
2892     tp.PrivilegeCount = 1;
2893     tp.Privileges[0].Luid = luid;
2894     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2895 
2896     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2897     // privilege. Check GetLastError() too. See MSDN document.
2898     if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2899         (GetLastError() == ERROR_SUCCESS)) {
2900       success = true;
2901     }
2902   }
2903 
2904   // Cleanup
2905   if (hProcess != NULL) {
2906     CloseHandle(hProcess);
2907   }
2908   if (hToken != NULL) {
2909     CloseHandle(hToken);
2910   }
2911 
2912   return success;
2913 }
2914 
2915 static bool numa_interleaving_init() {
2916   bool success = false;
2917 
2918   // print a warning if UseNUMAInterleaving flag is specified on command line
2919   bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2920 
2921 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2922 
2923   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2924   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2925   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2926 
2927   if (!numa_node_list_holder.build()) {
2928     WARN("Process does not cover multiple NUMA nodes.");
2929     WARN("...Ignoring UseNUMAInterleaving flag.");
2930     return false;
2931   }
2932 
2933   if (!gdi_can_use_split_reservation_memory(UseLargePages, min_interleave_granularity)) {
2934     WARN("Windows GDI cannot handle split reservations.");
2935     WARN("...Ignoring UseNUMAInterleaving flag.");
2936     return false;
2937   }
2938 
2939   if (log_is_enabled(Debug, os, cpu)) {
2940     Log(os, cpu) log;
2941     log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2942     for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2943       log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2944     }
2945   }
2946 
2947 #undef WARN
2948 
2949   return true;
2950 }
2951 
2952 // this routine is used whenever we need to reserve a contiguous VA range
2953 // but we need to make separate VirtualAlloc calls for each piece of the range
2954 // Reasons for doing this:
2955 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2956 //  * UseNUMAInterleaving requires a separate node for each piece
2957 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2958                                          DWORD prot,
2959                                          bool should_inject_error = false) {
2960   char * p_buf;
2961   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2962   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2963   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2964 
2965   // first reserve enough address space in advance since we want to be
2966   // able to break a single contiguous virtual address range into multiple
2967   // large page commits but WS2003 does not allow reserving large page space
2968   // so we just use 4K pages for reserve, this gives us a legal contiguous
2969   // address space. then we will deallocate that reservation, and re alloc
2970   // using large pages
2971   const size_t size_of_reserve = bytes + chunk_size;
2972   if (bytes > size_of_reserve) {
2973     // Overflowed.
2974     return NULL;
2975   }
2976   p_buf = (char *) VirtualAlloc(addr,
2977                                 size_of_reserve,  // size of Reserve
2978                                 MEM_RESERVE,
2979                                 PAGE_READWRITE);
2980   // If reservation failed, return NULL
2981   if (p_buf == NULL) return NULL;
2982   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2983   os::release_memory(p_buf, bytes + chunk_size);
2984 
2985   // we still need to round up to a page boundary (in case we are using large pages)
2986   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2987   // instead we handle this in the bytes_to_rq computation below
2988   p_buf = align_up(p_buf, page_size);
2989 
2990   // now go through and allocate one chunk at a time until all bytes are
2991   // allocated
2992   size_t  bytes_remaining = bytes;
2993   // An overflow of align_up() would have been caught above
2994   // in the calculation of size_of_reserve.
2995   char * next_alloc_addr = p_buf;
2996   HANDLE hProc = GetCurrentProcess();
2997 
2998 #ifdef ASSERT
2999   // Variable for the failure injection
3000   int ran_num = os::random();
3001   size_t fail_after = ran_num % bytes;
3002 #endif
3003 
3004   int count=0;
3005   while (bytes_remaining) {
3006     // select bytes_to_rq to get to the next chunk_size boundary
3007 
3008     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
3009     // Note allocate and commit
3010     char * p_new;
3011 
3012 #ifdef ASSERT
3013     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3014 #else
3015     const bool inject_error_now = false;
3016 #endif
3017 
3018     if (inject_error_now) {
3019       p_new = NULL;
3020     } else {
3021       if (!UseNUMAInterleaving) {
3022         p_new = (char *) VirtualAlloc(next_alloc_addr,
3023                                       bytes_to_rq,
3024                                       flags,
3025                                       prot);
3026       } else {
3027         // get the next node to use from the used_node_list
3028         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3029         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3030         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3031       }
3032     }
3033 
3034     if (p_new == NULL) {
3035       // Free any allocated pages
3036       if (next_alloc_addr > p_buf) {
3037         // Some memory was committed so release it.
3038         size_t bytes_to_release = bytes - bytes_remaining;
3039         // NMT has yet to record any individual blocks, so it
3040         // need to create a dummy 'reserve' record to match
3041         // the release.
3042         MemTracker::record_virtual_memory_reserve((address)p_buf,
3043                                                   bytes_to_release, CALLER_PC);
3044         os::release_memory(p_buf, bytes_to_release);
3045       }
3046 #ifdef ASSERT
3047       if (should_inject_error) {
3048         log_develop_debug(pagesize)("Reserving pages individually failed.");
3049       }
3050 #endif
3051       return NULL;
3052     }
3053 
3054     bytes_remaining -= bytes_to_rq;
3055     next_alloc_addr += bytes_to_rq;
3056     count++;
3057   }
3058   // Although the memory is allocated individually, it is returned as one.
3059   // NMT records it as one block.
3060   if ((flags & MEM_COMMIT) != 0) {
3061     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3062   } else {
3063     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3064   }
3065 
3066   // made it this far, success
3067   return p_buf;
3068 }
3069 
3070 static size_t large_page_init_decide_size() {
3071   // print a warning if any large page related flag is specified on command line
3072   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3073                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3074 
3075 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3076 
3077   if (!request_lock_memory_privilege()) {
3078     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3079     return 0;
3080   }
3081 
3082   size_t size = GetLargePageMinimum();
3083   if (size == 0) {
3084     WARN("Large page is not supported by the processor.");
3085     return 0;
3086   }
3087 
3088 #if defined(IA32) || defined(AMD64)
3089   if (size > 4*M || LargePageSizeInBytes > 4*M) {
3090     WARN("JVM cannot use large pages bigger than 4mb.");
3091     return 0;
3092   }
3093 #endif
3094 
3095   if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3096     size = LargePageSizeInBytes;
3097   }
3098 
3099   // Now test allocating a page
3100   void* large_page = VirtualAlloc(NULL,
3101                                   size,
3102                                   MEM_RESERVE|MEM_COMMIT|MEM_LARGE_PAGES,
3103                                   PAGE_READWRITE);
3104   if (large_page == NULL) {
3105     WARN("JVM cannot allocate one single large page.");
3106     return 0;
3107   }
3108 
3109   // Detect if GDI can use memory backed by large pages
3110   if (!gdi_can_use_memory(large_page)) {
3111     WARN("JVM cannot use large pages because of bug in Windows GDI.");
3112     return 0;
3113   }
3114 
3115   // Release test page
3116   VirtualFreeChecked(large_page, 0, MEM_RELEASE);
3117 
3118 #undef WARN
3119 
3120   return size;
3121 }
3122 
3123 void os::large_page_init() {
3124   if (!UseLargePages) {
3125     return;
3126   }
3127 
3128   _large_page_size = large_page_init_decide_size();
3129 
3130   const size_t default_page_size = (size_t) vm_page_size();
3131   if (_large_page_size > default_page_size) {
3132     _page_sizes[0] = _large_page_size;
3133     _page_sizes[1] = default_page_size;
3134     _page_sizes[2] = 0;
3135   }
3136 
3137   UseLargePages = _large_page_size != 0;
3138 
3139   if (UseLargePages && UseLargePagesIndividualAllocation) {
3140     if (!gdi_can_use_split_reservation_memory(true /* use_large_pages */, _large_page_size)) {
3141       if (FLAG_IS_CMDLINE(UseLargePagesIndividualAllocation)) {
3142         warning("Windows GDI cannot handle split reservations.");
3143         warning("...Ignoring UseLargePagesIndividualAllocation flag.");
3144       }
3145       UseLargePagesIndividualAllocation = false;
3146     }
3147   }
3148 }
3149 
3150 int os::create_file_for_heap(const char* dir) {
3151 
3152   const char name_template[] = "/jvmheap.XXXXXX";
3153 
3154   size_t fullname_len = strlen(dir) + strlen(name_template);
3155   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3156   if (fullname == NULL) {
3157     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3158     return -1;
3159   }
3160   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3161   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3162 
3163   os::native_path(fullname);
3164 
3165   char *path = _mktemp(fullname);
3166   if (path == NULL) {
3167     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3168     os::free(fullname);
3169     return -1;
3170   }
3171 
3172   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3173 
3174   os::free(fullname);
3175   if (fd < 0) {
3176     warning("Problem opening file for heap (%s)", os::strerror(errno));
3177     return -1;
3178   }
3179   return fd;
3180 }
3181 
3182 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3183 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3184   assert(fd != -1, "File descriptor is not valid");
3185 
3186   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3187 #ifdef _LP64
3188   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3189     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3190 #else
3191   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3192     0, (DWORD)size, NULL);
3193 #endif
3194   if (fileMapping == NULL) {
3195     if (GetLastError() == ERROR_DISK_FULL) {
3196       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3197     }
3198     else {
3199       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3200     }
3201 
3202     return NULL;
3203   }
3204 
3205   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3206 
3207   CloseHandle(fileMapping);
3208 
3209   return (char*)addr;
3210 }
3211 
3212 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3213   assert(fd != -1, "File descriptor is not valid");
3214   assert(base != NULL, "Base address cannot be NULL");
3215 
3216   release_memory(base, size);
3217   return map_memory_to_file(base, size, fd);
3218 }
3219 
3220 // On win32, one cannot release just a part of reserved memory, it's an
3221 // all or nothing deal.  When we split a reservation, we must break the
3222 // reservation into two reservations.
3223 void os::split_reserved_memory(char *base, size_t size, size_t split) {
3224 
3225   char* const split_address = base + split;
3226   assert(size > 0, "Sanity");
3227   assert(size > split, "Sanity");
3228   assert(split > 0, "Sanity");
3229   assert(is_aligned(base, os::vm_allocation_granularity()), "Sanity");
3230   assert(is_aligned(split_address, os::vm_allocation_granularity()), "Sanity");
3231 
3232   release_memory(base, size);
3233   reserve_memory(split, base);
3234   reserve_memory(size - split, split_address);
3235 
3236   // NMT: nothing to do here. Since Windows implements the split by
3237   //  releasing and re-reserving memory, the parts are already registered
3238   //  as individual mappings with NMT.
3239 
3240 }
3241 
3242 // Multiple threads can race in this code but it's not possible to unmap small sections of
3243 // virtual space to get requested alignment, like posix-like os's.
3244 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3245 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3246   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3247          "Alignment must be a multiple of allocation granularity (page size)");
3248   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3249 
3250   size_t extra_size = size + alignment;
3251   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3252 
3253   char* aligned_base = NULL;
3254 
3255   do {
3256     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3257     if (extra_base == NULL) {
3258       return NULL;
3259     }
3260     // Do manual alignment
3261     aligned_base = align_up(extra_base, alignment);
3262 
3263     if (file_desc != -1) {
3264       os::unmap_memory(extra_base, extra_size);
3265     } else {
3266       os::release_memory(extra_base, extra_size);
3267     }
3268 
3269     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3270 
3271   } while (aligned_base == NULL);
3272 
3273   return aligned_base;
3274 }
3275 
3276 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3277   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3278          "reserve alignment");
3279   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3280   char* res;
3281   // note that if UseLargePages is on, all the areas that require interleaving
3282   // will go thru reserve_memory_special rather than thru here.
3283   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3284   if (!use_individual) {
3285     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3286   } else {
3287     elapsedTimer reserveTimer;
3288     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3289     // in numa interleaving, we have to allocate pages individually
3290     // (well really chunks of NUMAInterleaveGranularity size)
3291     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3292     if (res == NULL) {
3293       warning("NUMA page allocation failed");
3294     }
3295     if (Verbose && PrintMiscellaneous) {
3296       reserveTimer.stop();
3297       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3298                     reserveTimer.milliseconds(), reserveTimer.ticks());
3299     }
3300   }
3301   assert(res == NULL || addr == NULL || addr == res,
3302          "Unexpected address from reserve.");
3303 
3304   return res;
3305 }
3306 
3307 // Reserve memory at an arbitrary address, only if that area is
3308 // available (and not reserved for something else).
3309 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3310   // Windows os::reserve_memory() fails of the requested address range is
3311   // not avilable.
3312   return reserve_memory(bytes, requested_addr);
3313 }
3314 
3315 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3316   assert(file_desc >= 0, "file_desc is not valid");
3317   return map_memory_to_file(requested_addr, bytes, file_desc);
3318 }
3319 
3320 size_t os::large_page_size() {
3321   return _large_page_size;
3322 }
3323 
3324 bool os::can_commit_large_page_memory() {
3325   // Windows only uses large page memory when the entire region is reserved
3326   // and committed in a single VirtualAlloc() call. This may change in the
3327   // future, but with Windows 2003 it's not possible to commit on demand.
3328   return false;
3329 }
3330 
3331 bool os::can_execute_large_page_memory() {
3332   return true;
3333 }
3334 
3335 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3336                                     bool exec) {
3337   assert(UseLargePages, "only for large pages");
3338 
3339   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3340     return NULL; // Fallback to small pages.
3341   }
3342 
3343   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3344   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3345 
3346   // with large pages, there are two cases where we need to use Individual Allocation
3347   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3348   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3349   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3350     log_debug(pagesize)("Reserving large pages individually.");
3351 
3352     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3353     if (p_buf == NULL) {
3354       // give an appropriate warning message
3355       if (UseNUMAInterleaving) {
3356         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3357       }
3358       if (UseLargePagesIndividualAllocation) {
3359         warning("Individually allocated large pages failed, "
3360                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3361       }
3362       return NULL;
3363     }
3364 
3365     return p_buf;
3366 
3367   } else {
3368     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3369 
3370     // normal policy just allocate it all at once
3371     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3372     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3373 
3374     return res;
3375   }
3376 }
3377 
3378 bool os::pd_release_memory_special(char* base, size_t bytes) {
3379   assert(base != NULL, "Sanity check");
3380   return pd_release_memory(base, bytes);
3381 }
3382 
3383 void os::print_statistics() {
3384 }
3385 
3386 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3387   int err = os::get_last_error();
3388   char buf[256];
3389   size_t buf_len = os::lasterror(buf, sizeof(buf));
3390   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3391           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3392           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3393 }
3394 
3395 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3396   if (bytes == 0) {
3397     // Don't bother the OS with noops.
3398     return true;
3399   }
3400   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3401   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3402   // Don't attempt to print anything if the OS call fails. We're
3403   // probably low on resources, so the print itself may cause crashes.
3404 
3405   // unless we have NUMAInterleaving enabled, the range of a commit
3406   // is always within a reserve covered by a single VirtualAlloc
3407   // in that case we can just do a single commit for the requested size
3408   if (!UseNUMAInterleaving) {
3409     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3410       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3411       return false;
3412     }
3413     if (exec) {
3414       DWORD oldprot;
3415       // Windows doc says to use VirtualProtect to get execute permissions
3416       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3417         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3418         return false;
3419       }
3420     }
3421     return true;
3422   } else {
3423 
3424     // when NUMAInterleaving is enabled, the commit might cover a range that
3425     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3426     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3427     // returns represents the number of bytes that can be committed in one step.
3428     size_t bytes_remaining = bytes;
3429     char * next_alloc_addr = addr;
3430     while (bytes_remaining > 0) {
3431       MEMORY_BASIC_INFORMATION alloc_info;
3432       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3433       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3434       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3435                        PAGE_READWRITE) == NULL) {
3436         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3437                                             exec);)
3438         return false;
3439       }
3440       if (exec) {
3441         DWORD oldprot;
3442         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3443                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3444           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3445                                               exec);)
3446           return false;
3447         }
3448       }
3449       bytes_remaining -= bytes_to_rq;
3450       next_alloc_addr += bytes_to_rq;
3451     }
3452   }
3453   // if we made it this far, return true
3454   return true;
3455 }
3456 
3457 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3458                           bool exec) {
3459   // alignment_hint is ignored on this OS
3460   return pd_commit_memory(addr, size, exec);
3461 }
3462 
3463 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3464                                   const char* mesg) {
3465   assert(mesg != NULL, "mesg must be specified");
3466   if (!pd_commit_memory(addr, size, exec)) {
3467     warn_fail_commit_memory(addr, size, exec);
3468     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3469   }
3470 }
3471 
3472 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3473                                   size_t alignment_hint, bool exec,
3474                                   const char* mesg) {
3475   // alignment_hint is ignored on this OS
3476   pd_commit_memory_or_exit(addr, size, exec, mesg);
3477 }
3478 
3479 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3480   if (bytes == 0) {
3481     // Don't bother the OS with noops.
3482     return true;
3483   }
3484   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3485   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3486   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3487 }
3488 
3489 bool os::pd_release_memory(char* addr, size_t bytes) {
3490   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3491 }
3492 
3493 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3494   return os::commit_memory(addr, size, !ExecMem);
3495 }
3496 
3497 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3498   return os::uncommit_memory(addr, size);
3499 }
3500 
3501 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3502   uint count = 0;
3503   bool ret = false;
3504   size_t bytes_remaining = bytes;
3505   char * next_protect_addr = addr;
3506 
3507   // Use VirtualQuery() to get the chunk size.
3508   while (bytes_remaining) {
3509     MEMORY_BASIC_INFORMATION alloc_info;
3510     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3511       return false;
3512     }
3513 
3514     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3515     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3516     // but we don't distinguish here as both cases are protected by same API.
3517     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3518     warning("Failed protecting pages individually for chunk #%u", count);
3519     if (!ret) {
3520       return false;
3521     }
3522 
3523     bytes_remaining -= bytes_to_protect;
3524     next_protect_addr += bytes_to_protect;
3525     count++;
3526   }
3527   return ret;
3528 }
3529 
3530 // Set protections specified
3531 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3532                         bool is_committed) {
3533   unsigned int p = 0;
3534   switch (prot) {
3535   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3536   case MEM_PROT_READ: p = PAGE_READONLY; break;
3537   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3538   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3539   default:
3540     ShouldNotReachHere();
3541   }
3542 
3543   DWORD old_status;
3544 
3545   // Strange enough, but on Win32 one can change protection only for committed
3546   // memory, not a big deal anyway, as bytes less or equal than 64K
3547   if (!is_committed) {
3548     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3549                           "cannot commit protection page");
3550   }
3551   // One cannot use os::guard_memory() here, as on Win32 guard page
3552   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3553   //
3554   // Pages in the region become guard pages. Any attempt to access a guard page
3555   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3556   // the guard page status. Guard pages thus act as a one-time access alarm.
3557   bool ret;
3558   if (UseNUMAInterleaving) {
3559     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3560     // so we must protect the chunks individually.
3561     ret = protect_pages_individually(addr, bytes, p, &old_status);
3562   } else {
3563     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3564   }
3565 #ifdef ASSERT
3566   if (!ret) {
3567     int err = os::get_last_error();
3568     char buf[256];
3569     size_t buf_len = os::lasterror(buf, sizeof(buf));
3570     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3571           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3572           buf_len != 0 ? buf : "<no_error_string>", err);
3573   }
3574 #endif
3575   return ret;
3576 }
3577 
3578 bool os::guard_memory(char* addr, size_t bytes) {
3579   DWORD old_status;
3580   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3581 }
3582 
3583 bool os::unguard_memory(char* addr, size_t bytes) {
3584   DWORD old_status;
3585   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3586 }
3587 
3588 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3589 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3590 void os::numa_make_global(char *addr, size_t bytes)    { }
3591 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3592 bool os::numa_topology_changed()                       { return false; }
3593 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3594 int os::numa_get_group_id()                            { return 0; }
3595 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3596   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3597     // Provide an answer for UMA systems
3598     ids[0] = 0;
3599     return 1;
3600   } else {
3601     // check for size bigger than actual groups_num
3602     size = MIN2(size, numa_get_groups_num());
3603     for (int i = 0; i < (int)size; i++) {
3604       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3605     }
3606     return size;
3607   }
3608 }
3609 
3610 int os::numa_get_group_id_for_address(const void* address) {
3611   return 0;
3612 }
3613 
3614 bool os::get_page_info(char *start, page_info* info) {
3615   return false;
3616 }
3617 
3618 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3619                      page_info* page_found) {
3620   return end;
3621 }
3622 
3623 char* os::non_memory_address_word() {
3624   // Must never look like an address returned by reserve_memory,
3625   // even in its subfields (as defined by the CPU immediate fields,
3626   // if the CPU splits constants across multiple instructions).
3627   return (char*)-1;
3628 }
3629 
3630 #define MAX_ERROR_COUNT 100
3631 #define SYS_THREAD_ERROR 0xffffffffUL
3632 
3633 void os::pd_start_thread(Thread* thread) {
3634   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3635   // Returns previous suspend state:
3636   // 0:  Thread was not suspended
3637   // 1:  Thread is running now
3638   // >1: Thread is still suspended.
3639   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3640 }
3641 
3642 
3643 // Short sleep, direct OS call.
3644 //
3645 // ms = 0, means allow others (if any) to run.
3646 //
3647 void os::naked_short_sleep(jlong ms) {
3648   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3649   Sleep(ms);
3650 }
3651 
3652 // Windows does not provide sleep functionality with nanosecond resolution, so we
3653 // try to approximate this with spinning combined with yielding if another thread
3654 // is ready to run on the current processor.
3655 void os::naked_short_nanosleep(jlong ns) {
3656   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3657 
3658   int64_t start = os::javaTimeNanos();
3659   do {
3660     if (SwitchToThread() == 0) {
3661       // Nothing else is ready to run on this cpu, spin a little
3662       SpinPause();
3663     }
3664   } while (os::javaTimeNanos() - start < ns);
3665 }
3666 
3667 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3668 void os::infinite_sleep() {
3669   while (true) {    // sleep forever ...
3670     Sleep(100000);  // ... 100 seconds at a time
3671   }
3672 }
3673 
3674 typedef BOOL (WINAPI * STTSignature)(void);
3675 
3676 void os::naked_yield() {
3677   // Consider passing back the return value from SwitchToThread().
3678   SwitchToThread();
3679 }
3680 
3681 // Win32 only gives you access to seven real priorities at a time,
3682 // so we compress Java's ten down to seven.  It would be better
3683 // if we dynamically adjusted relative priorities.
3684 
3685 int os::java_to_os_priority[CriticalPriority + 1] = {
3686   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3687   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3688   THREAD_PRIORITY_LOWEST,                       // 2
3689   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3690   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3691   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3692   THREAD_PRIORITY_NORMAL,                       // 6
3693   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3694   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3695   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3696   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3697   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3698 };
3699 
3700 int prio_policy1[CriticalPriority + 1] = {
3701   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3702   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3703   THREAD_PRIORITY_LOWEST,                       // 2
3704   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3705   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3706   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3707   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3708   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3709   THREAD_PRIORITY_HIGHEST,                      // 8
3710   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3711   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3712   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3713 };
3714 
3715 static int prio_init() {
3716   // If ThreadPriorityPolicy is 1, switch tables
3717   if (ThreadPriorityPolicy == 1) {
3718     int i;
3719     for (i = 0; i < CriticalPriority + 1; i++) {
3720       os::java_to_os_priority[i] = prio_policy1[i];
3721     }
3722   }
3723   if (UseCriticalJavaThreadPriority) {
3724     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3725   }
3726   return 0;
3727 }
3728 
3729 OSReturn os::set_native_priority(Thread* thread, int priority) {
3730   if (!UseThreadPriorities) return OS_OK;
3731   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3732   return ret ? OS_OK : OS_ERR;
3733 }
3734 
3735 OSReturn os::get_native_priority(const Thread* const thread,
3736                                  int* priority_ptr) {
3737   if (!UseThreadPriorities) {
3738     *priority_ptr = java_to_os_priority[NormPriority];
3739     return OS_OK;
3740   }
3741   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3742   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3743     assert(false, "GetThreadPriority failed");
3744     return OS_ERR;
3745   }
3746   *priority_ptr = os_prio;
3747   return OS_OK;
3748 }
3749 
3750 // GetCurrentThreadId() returns DWORD
3751 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3752 
3753 static int _initial_pid = 0;
3754 
3755 int os::current_process_id() {
3756   return (_initial_pid ? _initial_pid : _getpid());
3757 }
3758 
3759 int    os::win32::_vm_page_size              = 0;
3760 int    os::win32::_vm_allocation_granularity = 0;
3761 int    os::win32::_processor_type            = 0;
3762 // Processor level is not available on non-NT systems, use vm_version instead
3763 int    os::win32::_processor_level           = 0;
3764 julong os::win32::_physical_memory           = 0;
3765 size_t os::win32::_default_stack_size        = 0;
3766 
3767 intx          os::win32::_os_thread_limit    = 0;
3768 volatile intx os::win32::_os_thread_count    = 0;
3769 
3770 bool   os::win32::_is_windows_server         = false;
3771 
3772 // 6573254
3773 // Currently, the bug is observed across all the supported Windows releases,
3774 // including the latest one (as of this writing - Windows Server 2012 R2)
3775 bool   os::win32::_has_exit_bug              = true;
3776 
3777 void os::win32::initialize_system_info() {
3778   SYSTEM_INFO si;
3779   GetSystemInfo(&si);
3780   _vm_page_size    = si.dwPageSize;
3781   _vm_allocation_granularity = si.dwAllocationGranularity;
3782   _processor_type  = si.dwProcessorType;
3783   _processor_level = si.wProcessorLevel;
3784   set_processor_count(si.dwNumberOfProcessors);
3785 
3786   MEMORYSTATUSEX ms;
3787   ms.dwLength = sizeof(ms);
3788 
3789   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3790   // dwMemoryLoad (% of memory in use)
3791   GlobalMemoryStatusEx(&ms);
3792   _physical_memory = ms.ullTotalPhys;
3793 
3794   if (FLAG_IS_DEFAULT(MaxRAM)) {
3795     // Adjust MaxRAM according to the maximum virtual address space available.
3796     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3797   }
3798 
3799   OSVERSIONINFOEX oi;
3800   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3801   GetVersionEx((OSVERSIONINFO*)&oi);
3802   switch (oi.dwPlatformId) {
3803   case VER_PLATFORM_WIN32_NT:
3804     {
3805       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3806       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3807           oi.wProductType == VER_NT_SERVER) {
3808         _is_windows_server = true;
3809       }
3810     }
3811     break;
3812   default: fatal("Unknown platform");
3813   }
3814 
3815   _default_stack_size = os::current_stack_size();
3816   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3817   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3818          "stack size not a multiple of page size");
3819 
3820   initialize_performance_counter();
3821 }
3822 
3823 
3824 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3825                                       int ebuflen) {
3826   char path[MAX_PATH];
3827   DWORD size;
3828   DWORD pathLen = (DWORD)sizeof(path);
3829   HINSTANCE result = NULL;
3830 
3831   // only allow library name without path component
3832   assert(strchr(name, '\\') == NULL, "path not allowed");
3833   assert(strchr(name, ':') == NULL, "path not allowed");
3834   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3835     jio_snprintf(ebuf, ebuflen,
3836                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3837     return NULL;
3838   }
3839 
3840   // search system directory
3841   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3842     if (size >= pathLen) {
3843       return NULL; // truncated
3844     }
3845     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3846       return NULL; // truncated
3847     }
3848     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3849       return result;
3850     }
3851   }
3852 
3853   // try Windows directory
3854   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3855     if (size >= pathLen) {
3856       return NULL; // truncated
3857     }
3858     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3859       return NULL; // truncated
3860     }
3861     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3862       return result;
3863     }
3864   }
3865 
3866   jio_snprintf(ebuf, ebuflen,
3867                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3868   return NULL;
3869 }
3870 
3871 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3872 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3873 
3874 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3875   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3876   return TRUE;
3877 }
3878 
3879 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3880   // Basic approach:
3881   //  - Each exiting thread registers its intent to exit and then does so.
3882   //  - A thread trying to terminate the process must wait for all
3883   //    threads currently exiting to complete their exit.
3884 
3885   if (os::win32::has_exit_bug()) {
3886     // The array holds handles of the threads that have started exiting by calling
3887     // _endthreadex().
3888     // Should be large enough to avoid blocking the exiting thread due to lack of
3889     // a free slot.
3890     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3891     static int handle_count = 0;
3892 
3893     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3894     static CRITICAL_SECTION crit_sect;
3895     static volatile DWORD process_exiting = 0;
3896     int i, j;
3897     DWORD res;
3898     HANDLE hproc, hthr;
3899 
3900     // We only attempt to register threads until a process exiting
3901     // thread manages to set the process_exiting flag. Any threads
3902     // that come through here after the process_exiting flag is set
3903     // are unregistered and will be caught in the SuspendThread()
3904     // infinite loop below.
3905     bool registered = false;
3906 
3907     // The first thread that reached this point, initializes the critical section.
3908     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3909       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3910     } else if (Atomic::load_acquire(&process_exiting) == 0) {
3911       if (what != EPT_THREAD) {
3912         // Atomically set process_exiting before the critical section
3913         // to increase the visibility between racing threads.
3914         Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
3915       }
3916       EnterCriticalSection(&crit_sect);
3917 
3918       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3919         // Remove from the array those handles of the threads that have completed exiting.
3920         for (i = 0, j = 0; i < handle_count; ++i) {
3921           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3922           if (res == WAIT_TIMEOUT) {
3923             handles[j++] = handles[i];
3924           } else {
3925             if (res == WAIT_FAILED) {
3926               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3927                       GetLastError(), __FILE__, __LINE__);
3928             }
3929             // Don't keep the handle, if we failed waiting for it.
3930             CloseHandle(handles[i]);
3931           }
3932         }
3933 
3934         // If there's no free slot in the array of the kept handles, we'll have to
3935         // wait until at least one thread completes exiting.
3936         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3937           // Raise the priority of the oldest exiting thread to increase its chances
3938           // to complete sooner.
3939           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3940           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3941           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3942             i = (res - WAIT_OBJECT_0);
3943             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3944             for (; i < handle_count; ++i) {
3945               handles[i] = handles[i + 1];
3946             }
3947           } else {
3948             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3949                     (res == WAIT_FAILED ? "failed" : "timed out"),
3950                     GetLastError(), __FILE__, __LINE__);
3951             // Don't keep handles, if we failed waiting for them.
3952             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3953               CloseHandle(handles[i]);
3954             }
3955             handle_count = 0;
3956           }
3957         }
3958 
3959         // Store a duplicate of the current thread handle in the array of handles.
3960         hproc = GetCurrentProcess();
3961         hthr = GetCurrentThread();
3962         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3963                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3964           warning("DuplicateHandle failed (%u) in %s: %d\n",
3965                   GetLastError(), __FILE__, __LINE__);
3966 
3967           // We can't register this thread (no more handles) so this thread
3968           // may be racing with a thread that is calling exit(). If the thread
3969           // that is calling exit() has managed to set the process_exiting
3970           // flag, then this thread will be caught in the SuspendThread()
3971           // infinite loop below which closes that race. A small timing
3972           // window remains before the process_exiting flag is set, but it
3973           // is only exposed when we are out of handles.
3974         } else {
3975           ++handle_count;
3976           registered = true;
3977 
3978           // The current exiting thread has stored its handle in the array, and now
3979           // should leave the critical section before calling _endthreadex().
3980         }
3981 
3982       } else if (what != EPT_THREAD && handle_count > 0) {
3983         jlong start_time, finish_time, timeout_left;
3984         // Before ending the process, make sure all the threads that had called
3985         // _endthreadex() completed.
3986 
3987         // Set the priority level of the current thread to the same value as
3988         // the priority level of exiting threads.
3989         // This is to ensure it will be given a fair chance to execute if
3990         // the timeout expires.
3991         hthr = GetCurrentThread();
3992         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3993         start_time = os::javaTimeNanos();
3994         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3995         for (i = 0; ; ) {
3996           int portion_count = handle_count - i;
3997           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3998             portion_count = MAXIMUM_WAIT_OBJECTS;
3999           }
4000           for (j = 0; j < portion_count; ++j) {
4001             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4002           }
4003           timeout_left = (finish_time - start_time) / 1000000L;
4004           if (timeout_left < 0) {
4005             timeout_left = 0;
4006           }
4007           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4008           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4009             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4010                     (res == WAIT_FAILED ? "failed" : "timed out"),
4011                     GetLastError(), __FILE__, __LINE__);
4012             // Reset portion_count so we close the remaining
4013             // handles due to this error.
4014             portion_count = handle_count - i;
4015           }
4016           for (j = 0; j < portion_count; ++j) {
4017             CloseHandle(handles[i + j]);
4018           }
4019           if ((i += portion_count) >= handle_count) {
4020             break;
4021           }
4022           start_time = os::javaTimeNanos();
4023         }
4024         handle_count = 0;
4025       }
4026 
4027       LeaveCriticalSection(&crit_sect);
4028     }
4029 
4030     if (!registered &&
4031         Atomic::load_acquire(&process_exiting) != 0 &&
4032         process_exiting != GetCurrentThreadId()) {
4033       // Some other thread is about to call exit(), so we don't let
4034       // the current unregistered thread proceed to exit() or _endthreadex()
4035       while (true) {
4036         SuspendThread(GetCurrentThread());
4037         // Avoid busy-wait loop, if SuspendThread() failed.
4038         Sleep(EXIT_TIMEOUT);
4039       }
4040     }
4041   }
4042 
4043   // We are here if either
4044   // - there's no 'race at exit' bug on this OS release;
4045   // - initialization of the critical section failed (unlikely);
4046   // - the current thread has registered itself and left the critical section;
4047   // - the process-exiting thread has raised the flag and left the critical section.
4048   if (what == EPT_THREAD) {
4049     _endthreadex((unsigned)exit_code);
4050   } else if (what == EPT_PROCESS) {
4051     ::exit(exit_code);
4052   } else {
4053     _exit(exit_code);
4054   }
4055 
4056   // Should not reach here
4057   return exit_code;
4058 }
4059 
4060 #undef EXIT_TIMEOUT
4061 
4062 void os::win32::setmode_streams() {
4063   _setmode(_fileno(stdin), _O_BINARY);
4064   _setmode(_fileno(stdout), _O_BINARY);
4065   _setmode(_fileno(stderr), _O_BINARY);
4066 }
4067 
4068 void os::wait_for_keypress_at_exit(void) {
4069   if (PauseAtExit) {
4070     fprintf(stderr, "Press any key to continue...\n");
4071     fgetc(stdin);
4072   }
4073 }
4074 
4075 
4076 bool os::message_box(const char* title, const char* message) {
4077   int result = MessageBox(NULL, message, title,
4078                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4079   return result == IDYES;
4080 }
4081 
4082 #ifndef PRODUCT
4083 #ifndef _WIN64
4084 // Helpers to check whether NX protection is enabled
4085 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4086   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4087       pex->ExceptionRecord->NumberParameters > 0 &&
4088       pex->ExceptionRecord->ExceptionInformation[0] ==
4089       EXCEPTION_INFO_EXEC_VIOLATION) {
4090     return EXCEPTION_EXECUTE_HANDLER;
4091   }
4092   return EXCEPTION_CONTINUE_SEARCH;
4093 }
4094 
4095 void nx_check_protection() {
4096   // If NX is enabled we'll get an exception calling into code on the stack
4097   char code[] = { (char)0xC3 }; // ret
4098   void *code_ptr = (void *)code;
4099   __try {
4100     __asm call code_ptr
4101   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4102     tty->print_raw_cr("NX protection detected.");
4103   }
4104 }
4105 #endif // _WIN64
4106 #endif // PRODUCT
4107 
4108 // This is called _before_ the global arguments have been parsed
4109 void os::init(void) {
4110   _initial_pid = _getpid();
4111 
4112   init_random(1234567);
4113 
4114   win32::initialize_system_info();
4115   win32::setmode_streams();
4116   init_page_sizes((size_t) win32::vm_page_size());
4117 
4118   // This may be overridden later when argument processing is done.
4119   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4120 
4121   // Initialize main_process and main_thread
4122   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4123   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4124                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4125     fatal("DuplicateHandle failed\n");
4126   }
4127   main_thread_id = (int) GetCurrentThreadId();
4128 
4129   // initialize fast thread access - only used for 32-bit
4130   win32::initialize_thread_ptr_offset();
4131 }
4132 
4133 // To install functions for atexit processing
4134 extern "C" {
4135   static void perfMemory_exit_helper() {
4136     perfMemory_exit();
4137   }
4138 }
4139 
4140 static jint initSock();
4141 
4142 // this is called _after_ the global arguments have been parsed
4143 jint os::init_2(void) {
4144 
4145   // This could be set any time but all platforms
4146   // have to set it the same so we have to mirror Solaris.
4147   DEBUG_ONLY(os::set_mutex_init_done();)
4148 
4149   // Setup Windows Exceptions
4150 
4151 #if INCLUDE_AOT
4152   // If AOT is enabled we need to install a vectored exception handler
4153   // in order to forward implicit exceptions from code in AOT
4154   // generated DLLs.  This is necessary since these DLLs are not
4155   // registered for structured exceptions like codecache methods are.
4156   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4157     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4158   }
4159 #endif
4160 
4161   // for debugging float code generation bugs
4162   if (ForceFloatExceptions) {
4163 #ifndef  _WIN64
4164     static long fp_control_word = 0;
4165     __asm { fstcw fp_control_word }
4166     // see Intel PPro Manual, Vol. 2, p 7-16
4167     const long precision = 0x20;
4168     const long underflow = 0x10;
4169     const long overflow  = 0x08;
4170     const long zero_div  = 0x04;
4171     const long denorm    = 0x02;
4172     const long invalid   = 0x01;
4173     fp_control_word |= invalid;
4174     __asm { fldcw fp_control_word }
4175 #endif
4176   }
4177 
4178   // If stack_commit_size is 0, windows will reserve the default size,
4179   // but only commit a small portion of it.
4180   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4181   size_t default_reserve_size = os::win32::default_stack_size();
4182   size_t actual_reserve_size = stack_commit_size;
4183   if (stack_commit_size < default_reserve_size) {
4184     // If stack_commit_size == 0, we want this too
4185     actual_reserve_size = default_reserve_size;
4186   }
4187 
4188   // Check minimum allowable stack size for thread creation and to initialize
4189   // the java system classes, including StackOverflowError - depends on page
4190   // size.  Add two 4K pages for compiler2 recursion in main thread.
4191   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4192   // class initialization depending on 32 or 64 bit VM.
4193   size_t min_stack_allowed =
4194             (size_t)(JavaThread::stack_guard_zone_size() +
4195                      JavaThread::stack_shadow_zone_size() +
4196                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4197 
4198   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4199 
4200   if (actual_reserve_size < min_stack_allowed) {
4201     tty->print_cr("\nThe Java thread stack size specified is too small. "
4202                   "Specify at least %dk",
4203                   min_stack_allowed / K);
4204     return JNI_ERR;
4205   }
4206 
4207   JavaThread::set_stack_size_at_create(stack_commit_size);
4208 
4209   // Calculate theoretical max. size of Threads to guard gainst artifical
4210   // out-of-memory situations, where all available address-space has been
4211   // reserved by thread stacks.
4212   assert(actual_reserve_size != 0, "Must have a stack");
4213 
4214   // Calculate the thread limit when we should start doing Virtual Memory
4215   // banging. Currently when the threads will have used all but 200Mb of space.
4216   //
4217   // TODO: consider performing a similar calculation for commit size instead
4218   // as reserve size, since on a 64-bit platform we'll run into that more
4219   // often than running out of virtual memory space.  We can use the
4220   // lower value of the two calculations as the os_thread_limit.
4221   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4222   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4223 
4224   // at exit methods are called in the reverse order of their registration.
4225   // there is no limit to the number of functions registered. atexit does
4226   // not set errno.
4227 
4228   if (PerfAllowAtExitRegistration) {
4229     // only register atexit functions if PerfAllowAtExitRegistration is set.
4230     // atexit functions can be delayed until process exit time, which
4231     // can be problematic for embedded VM situations. Embedded VMs should
4232     // call DestroyJavaVM() to assure that VM resources are released.
4233 
4234     // note: perfMemory_exit_helper atexit function may be removed in
4235     // the future if the appropriate cleanup code can be added to the
4236     // VM_Exit VMOperation's doit method.
4237     if (atexit(perfMemory_exit_helper) != 0) {
4238       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4239     }
4240   }
4241 
4242 #ifndef _WIN64
4243   // Print something if NX is enabled (win32 on AMD64)
4244   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4245 #endif
4246 
4247   // initialize thread priority policy
4248   prio_init();
4249 
4250   if (UseNUMA && !ForceNUMA) {
4251     UseNUMA = false; // We don't fully support this yet
4252   }
4253 
4254   if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4255     if (!numa_interleaving_init()) {
4256       FLAG_SET_ERGO(UseNUMAInterleaving, false);
4257     } else if (!UseNUMAInterleaving) {
4258       // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4259       FLAG_SET_ERGO(UseNUMAInterleaving, true);
4260     }
4261   }
4262 
4263   if (initSock() != JNI_OK) {
4264     return JNI_ERR;
4265   }
4266 
4267   SymbolEngine::recalc_search_path();
4268 
4269   // Initialize data for jdk.internal.misc.Signal
4270   if (!ReduceSignalUsage) {
4271     jdk_misc_signal_init();
4272   }
4273 
4274   return JNI_OK;
4275 }
4276 
4277 // combine the high and low DWORD into a ULONGLONG
4278 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4279   ULONGLONG value = high_word;
4280   value <<= sizeof(high_word) * 8;
4281   value |= low_word;
4282   return value;
4283 }
4284 
4285 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4286 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4287   ::memset((void*)sbuf, 0, sizeof(struct stat));
4288   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4289   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4290                                   file_data.ftLastWriteTime.dwLowDateTime);
4291   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4292                                   file_data.ftCreationTime.dwLowDateTime);
4293   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4294                                   file_data.ftLastAccessTime.dwLowDateTime);
4295   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4296     sbuf->st_mode |= S_IFDIR;
4297   } else {
4298     sbuf->st_mode |= S_IFREG;
4299   }
4300 }
4301 
4302 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4303   // Get required buffer size to convert to Unicode
4304   int unicode_path_len = MultiByteToWideChar(CP_ACP,
4305                                              MB_ERR_INVALID_CHARS,
4306                                              char_path, -1,
4307                                              NULL, 0);
4308   if (unicode_path_len == 0) {
4309     return EINVAL;
4310   }
4311 
4312   *unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4313 
4314   int result = MultiByteToWideChar(CP_ACP,
4315                                    MB_ERR_INVALID_CHARS,
4316                                    char_path, -1,
4317                                    *unicode_path, unicode_path_len);
4318   assert(result == unicode_path_len, "length already checked above");
4319 
4320   return ERROR_SUCCESS;
4321 }
4322 
4323 static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4324   // Get required buffer size to convert to full path. The return
4325   // value INCLUDES the terminating null character.
4326   DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4327   if (full_path_len == 0) {
4328     return EINVAL;
4329   }
4330 
4331   *full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4332 
4333   // When the buffer has sufficient size, the return value EXCLUDES the
4334   // terminating null character
4335   DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4336   assert(result <= full_path_len, "length already checked above");
4337 
4338   return ERROR_SUCCESS;
4339 }
4340 
4341 static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4342   *prefix_off = 0;
4343   *needs_fullpath = true;
4344 
4345   if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4346     *prefix = L"\\\\?\\";
4347   } else if (buf[0] == '\\' && buf[1] == '\\') {
4348     if (buf[2] == '?' && buf[3] == '\\') {
4349       *prefix = L"";
4350       *needs_fullpath = false;
4351     } else {
4352       *prefix = L"\\\\?\\UNC";
4353       *prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4354     }
4355   } else {
4356     *prefix = L"\\\\?\\";
4357   }
4358 }
4359 
4360 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4361 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4362 // additional_space is the size of space, in wchar_t, the function will additionally add to
4363 // the allocation of return buffer (such that the size of the returned buffer is at least
4364 // wcslen(buf) + 1 + additional_space).
4365 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4366   if ((path == NULL) || (path[0] == '\0')) {
4367     err = ENOENT;
4368     return NULL;
4369   }
4370 
4371   // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4372   size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4373   char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4374   strncpy(buf, path, buf_len);
4375   os::native_path(buf);
4376 
4377   LPWSTR prefix = NULL;
4378   int prefix_off = 0;
4379   bool needs_fullpath = true;
4380   set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4381 
4382   LPWSTR unicode_path = NULL;
4383   err = convert_to_unicode(buf, &unicode_path);
4384   FREE_C_HEAP_ARRAY(char, buf);
4385   if (err != ERROR_SUCCESS) {
4386     return NULL;
4387   }
4388 
4389   LPWSTR converted_path = NULL;
4390   if (needs_fullpath) {
4391     err = get_full_path(unicode_path, &converted_path);
4392   } else {
4393     converted_path = unicode_path;
4394   }
4395 
4396   LPWSTR result = NULL;
4397   if (converted_path != NULL) {
4398     size_t prefix_len = wcslen(prefix);
4399     size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4400     result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4401     _snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4402 
4403     // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4404     result_len = wcslen(result);
4405     if ((result[result_len - 1] == L'\\') &&
4406         !(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4407       result[result_len - 1] = L'\0';
4408     }
4409   }
4410 
4411   if (converted_path != unicode_path) {
4412     FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4413   }
4414   FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4415 
4416   return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4417 }
4418 
4419 int os::stat(const char *path, struct stat *sbuf) {
4420   errno_t err;
4421   wchar_t* wide_path = wide_abs_unc_path(path, err);
4422 
4423   if (wide_path == NULL) {
4424     errno = err;
4425     return -1;
4426   }
4427 
4428   WIN32_FILE_ATTRIBUTE_DATA file_data;;
4429   BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4430   os::free(wide_path);
4431 
4432   if (!bret) {
4433     errno = ::GetLastError();
4434     return -1;
4435   }
4436 
4437   file_attribute_data_to_stat(sbuf, file_data);
4438   return 0;
4439 }
4440 
4441 static HANDLE create_read_only_file_handle(const char* file) {
4442   errno_t err;
4443   wchar_t* wide_path = wide_abs_unc_path(file, err);
4444 
4445   if (wide_path == NULL) {
4446     errno = err;
4447     return INVALID_HANDLE_VALUE;
4448   }
4449 
4450   HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4451                                 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4452   os::free(wide_path);
4453 
4454   return handle;
4455 }
4456 
4457 bool os::same_files(const char* file1, const char* file2) {
4458 
4459   if (file1 == NULL && file2 == NULL) {
4460     return true;
4461   }
4462 
4463   if (file1 == NULL || file2 == NULL) {
4464     return false;
4465   }
4466 
4467   if (strcmp(file1, file2) == 0) {
4468     return true;
4469   }
4470 
4471   HANDLE handle1 = create_read_only_file_handle(file1);
4472   HANDLE handle2 = create_read_only_file_handle(file2);
4473   bool result = false;
4474 
4475   // if we could open both paths...
4476   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4477     BY_HANDLE_FILE_INFORMATION fileInfo1;
4478     BY_HANDLE_FILE_INFORMATION fileInfo2;
4479     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4480       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4481       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4482       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4483         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4484         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4485         result = true;
4486       }
4487     }
4488   }
4489 
4490   //free the handles
4491   if (handle1 != INVALID_HANDLE_VALUE) {
4492     ::CloseHandle(handle1);
4493   }
4494 
4495   if (handle2 != INVALID_HANDLE_VALUE) {
4496     ::CloseHandle(handle2);
4497   }
4498 
4499   return result;
4500 }
4501 
4502 #define FT2INT64(ft) \
4503   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4504 
4505 
4506 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4507 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4508 // of a thread.
4509 //
4510 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4511 // the fast estimate available on the platform.
4512 
4513 // current_thread_cpu_time() is not optimized for Windows yet
4514 jlong os::current_thread_cpu_time() {
4515   // return user + sys since the cost is the same
4516   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4517 }
4518 
4519 jlong os::thread_cpu_time(Thread* thread) {
4520   // consistent with what current_thread_cpu_time() returns.
4521   return os::thread_cpu_time(thread, true /* user+sys */);
4522 }
4523 
4524 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4525   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4526 }
4527 
4528 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4529   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4530   // If this function changes, os::is_thread_cpu_time_supported() should too
4531   FILETIME CreationTime;
4532   FILETIME ExitTime;
4533   FILETIME KernelTime;
4534   FILETIME UserTime;
4535 
4536   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4537                       &ExitTime, &KernelTime, &UserTime) == 0) {
4538     return -1;
4539   } else if (user_sys_cpu_time) {
4540     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4541   } else {
4542     return FT2INT64(UserTime) * 100;
4543   }
4544 }
4545 
4546 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4547   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4548   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4549   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4550   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4551 }
4552 
4553 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4554   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4555   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4556   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4557   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4558 }
4559 
4560 bool os::is_thread_cpu_time_supported() {
4561   // see os::thread_cpu_time
4562   FILETIME CreationTime;
4563   FILETIME ExitTime;
4564   FILETIME KernelTime;
4565   FILETIME UserTime;
4566 
4567   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4568                       &KernelTime, &UserTime) == 0) {
4569     return false;
4570   } else {
4571     return true;
4572   }
4573 }
4574 
4575 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4576 // It does have primitives (PDH API) to get CPU usage and run queue length.
4577 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4578 // If we wanted to implement loadavg on Windows, we have a few options:
4579 //
4580 // a) Query CPU usage and run queue length and "fake" an answer by
4581 //    returning the CPU usage if it's under 100%, and the run queue
4582 //    length otherwise.  It turns out that querying is pretty slow
4583 //    on Windows, on the order of 200 microseconds on a fast machine.
4584 //    Note that on the Windows the CPU usage value is the % usage
4585 //    since the last time the API was called (and the first call
4586 //    returns 100%), so we'd have to deal with that as well.
4587 //
4588 // b) Sample the "fake" answer using a sampling thread and store
4589 //    the answer in a global variable.  The call to loadavg would
4590 //    just return the value of the global, avoiding the slow query.
4591 //
4592 // c) Sample a better answer using exponential decay to smooth the
4593 //    value.  This is basically the algorithm used by UNIX kernels.
4594 //
4595 // Note that sampling thread starvation could affect both (b) and (c).
4596 int os::loadavg(double loadavg[], int nelem) {
4597   return -1;
4598 }
4599 
4600 
4601 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4602 bool os::dont_yield() {
4603   return DontYieldALot;
4604 }
4605 
4606 int os::open(const char *path, int oflag, int mode) {
4607   errno_t err;
4608   wchar_t* wide_path = wide_abs_unc_path(path, err);
4609 
4610   if (wide_path == NULL) {
4611     errno = err;
4612     return -1;
4613   }
4614   int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4615   os::free(wide_path);
4616 
4617   if (fd == -1) {
4618     errno = ::GetLastError();
4619   }
4620 
4621   return fd;
4622 }
4623 
4624 FILE* os::open(int fd, const char* mode) {
4625   return ::_fdopen(fd, mode);
4626 }
4627 
4628 // Is a (classpath) directory empty?
4629 bool os::dir_is_empty(const char* path) {
4630   errno_t err;
4631   wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4632 
4633   if (wide_path == NULL) {
4634     errno = err;
4635     return false;
4636   }
4637 
4638   // Make sure we end with "\\*"
4639   if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4640     wcscat(wide_path, L"*");
4641   } else {
4642     wcscat(wide_path, L"\\*");
4643   }
4644 
4645   WIN32_FIND_DATAW fd;
4646   HANDLE f = ::FindFirstFileW(wide_path, &fd);
4647   os::free(wide_path);
4648   bool is_empty = true;
4649 
4650   if (f != INVALID_HANDLE_VALUE) {
4651     while (is_empty && ::FindNextFileW(f, &fd)) {
4652       // An empty directory contains only the current directory file
4653       // and the previous directory file.
4654       if ((wcscmp(fd.cFileName, L".") != 0) &&
4655           (wcscmp(fd.cFileName, L"..") != 0)) {
4656         is_empty = false;
4657       }
4658     }
4659     FindClose(f);
4660   } else {
4661     errno = ::GetLastError();
4662   }
4663 
4664   return is_empty;
4665 }
4666 
4667 // create binary file, rewriting existing file if required
4668 int os::create_binary_file(const char* path, bool rewrite_existing) {
4669   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4670   if (!rewrite_existing) {
4671     oflags |= _O_EXCL;
4672   }
4673   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4674 }
4675 
4676 // return current position of file pointer
4677 jlong os::current_file_offset(int fd) {
4678   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4679 }
4680 
4681 // move file pointer to the specified offset
4682 jlong os::seek_to_file_offset(int fd, jlong offset) {
4683   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4684 }
4685 
4686 
4687 jlong os::lseek(int fd, jlong offset, int whence) {
4688   return (jlong) ::_lseeki64(fd, offset, whence);
4689 }
4690 
4691 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4692   OVERLAPPED ov;
4693   DWORD nread;
4694   BOOL result;
4695 
4696   ZeroMemory(&ov, sizeof(ov));
4697   ov.Offset = (DWORD)offset;
4698   ov.OffsetHigh = (DWORD)(offset >> 32);
4699 
4700   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4701 
4702   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4703 
4704   return result ? nread : 0;
4705 }
4706 
4707 
4708 // This method is a slightly reworked copy of JDK's sysNativePath
4709 // from src/windows/hpi/src/path_md.c
4710 
4711 // Convert a pathname to native format.  On win32, this involves forcing all
4712 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4713 // sometimes rejects '/') and removing redundant separators.  The input path is
4714 // assumed to have been converted into the character encoding used by the local
4715 // system.  Because this might be a double-byte encoding, care is taken to
4716 // treat double-byte lead characters correctly.
4717 //
4718 // This procedure modifies the given path in place, as the result is never
4719 // longer than the original.  There is no error return; this operation always
4720 // succeeds.
4721 char * os::native_path(char *path) {
4722   char *src = path, *dst = path, *end = path;
4723   char *colon = NULL;  // If a drive specifier is found, this will
4724                        // point to the colon following the drive letter
4725 
4726   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4727   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4728           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4729 
4730   // Check for leading separators
4731 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4732   while (isfilesep(*src)) {
4733     src++;
4734   }
4735 
4736   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4737     // Remove leading separators if followed by drive specifier.  This
4738     // hack is necessary to support file URLs containing drive
4739     // specifiers (e.g., "file://c:/path").  As a side effect,
4740     // "/c:/path" can be used as an alternative to "c:/path".
4741     *dst++ = *src++;
4742     colon = dst;
4743     *dst++ = ':';
4744     src++;
4745   } else {
4746     src = path;
4747     if (isfilesep(src[0]) && isfilesep(src[1])) {
4748       // UNC pathname: Retain first separator; leave src pointed at
4749       // second separator so that further separators will be collapsed
4750       // into the second separator.  The result will be a pathname
4751       // beginning with "\\\\" followed (most likely) by a host name.
4752       src = dst = path + 1;
4753       path[0] = '\\';     // Force first separator to '\\'
4754     }
4755   }
4756 
4757   end = dst;
4758 
4759   // Remove redundant separators from remainder of path, forcing all
4760   // separators to be '\\' rather than '/'. Also, single byte space
4761   // characters are removed from the end of the path because those
4762   // are not legal ending characters on this operating system.
4763   //
4764   while (*src != '\0') {
4765     if (isfilesep(*src)) {
4766       *dst++ = '\\'; src++;
4767       while (isfilesep(*src)) src++;
4768       if (*src == '\0') {
4769         // Check for trailing separator
4770         end = dst;
4771         if (colon == dst - 2) break;  // "z:\\"
4772         if (dst == path + 1) break;   // "\\"
4773         if (dst == path + 2 && isfilesep(path[0])) {
4774           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4775           // beginning of a UNC pathname.  Even though it is not, by
4776           // itself, a valid UNC pathname, we leave it as is in order
4777           // to be consistent with the path canonicalizer as well
4778           // as the win32 APIs, which treat this case as an invalid
4779           // UNC pathname rather than as an alias for the root
4780           // directory of the current drive.
4781           break;
4782         }
4783         end = --dst;  // Path does not denote a root directory, so
4784                       // remove trailing separator
4785         break;
4786       }
4787       end = dst;
4788     } else {
4789       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4790         *dst++ = *src++;
4791         if (*src) *dst++ = *src++;
4792         end = dst;
4793       } else {  // Copy a single-byte character
4794         char c = *src++;
4795         *dst++ = c;
4796         // Space is not a legal ending character
4797         if (c != ' ') end = dst;
4798       }
4799     }
4800   }
4801 
4802   *end = '\0';
4803 
4804   // For "z:", add "." to work around a bug in the C runtime library
4805   if (colon == dst - 1) {
4806     path[2] = '.';
4807     path[3] = '\0';
4808   }
4809 
4810   return path;
4811 }
4812 
4813 // This code is a copy of JDK's sysSetLength
4814 // from src/windows/hpi/src/sys_api_md.c
4815 
4816 int os::ftruncate(int fd, jlong length) {
4817   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4818   long high = (long)(length >> 32);
4819   DWORD ret;
4820 
4821   if (h == (HANDLE)(-1)) {
4822     return -1;
4823   }
4824 
4825   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4826   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4827     return -1;
4828   }
4829 
4830   if (::SetEndOfFile(h) == FALSE) {
4831     return -1;
4832   }
4833 
4834   return 0;
4835 }
4836 
4837 int os::get_fileno(FILE* fp) {
4838   return _fileno(fp);
4839 }
4840 
4841 // This code is a copy of JDK's sysSync
4842 // from src/windows/hpi/src/sys_api_md.c
4843 // except for the legacy workaround for a bug in Win 98
4844 
4845 int os::fsync(int fd) {
4846   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4847 
4848   if ((!::FlushFileBuffers(handle)) &&
4849       (GetLastError() != ERROR_ACCESS_DENIED)) {
4850     // from winerror.h
4851     return -1;
4852   }
4853   return 0;
4854 }
4855 
4856 static int nonSeekAvailable(int, long *);
4857 static int stdinAvailable(int, long *);
4858 
4859 // This code is a copy of JDK's sysAvailable
4860 // from src/windows/hpi/src/sys_api_md.c
4861 
4862 int os::available(int fd, jlong *bytes) {
4863   jlong cur, end;
4864   struct _stati64 stbuf64;
4865 
4866   if (::_fstati64(fd, &stbuf64) >= 0) {
4867     int mode = stbuf64.st_mode;
4868     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4869       int ret;
4870       long lpbytes;
4871       if (fd == 0) {
4872         ret = stdinAvailable(fd, &lpbytes);
4873       } else {
4874         ret = nonSeekAvailable(fd, &lpbytes);
4875       }
4876       (*bytes) = (jlong)(lpbytes);
4877       return ret;
4878     }
4879     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4880       return FALSE;
4881     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4882       return FALSE;
4883     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4884       return FALSE;
4885     }
4886     *bytes = end - cur;
4887     return TRUE;
4888   } else {
4889     return FALSE;
4890   }
4891 }
4892 
4893 void os::flockfile(FILE* fp) {
4894   _lock_file(fp);
4895 }
4896 
4897 void os::funlockfile(FILE* fp) {
4898   _unlock_file(fp);
4899 }
4900 
4901 // This code is a copy of JDK's nonSeekAvailable
4902 // from src/windows/hpi/src/sys_api_md.c
4903 
4904 static int nonSeekAvailable(int fd, long *pbytes) {
4905   // This is used for available on non-seekable devices
4906   // (like both named and anonymous pipes, such as pipes
4907   //  connected to an exec'd process).
4908   // Standard Input is a special case.
4909   HANDLE han;
4910 
4911   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4912     return FALSE;
4913   }
4914 
4915   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4916     // PeekNamedPipe fails when at EOF.  In that case we
4917     // simply make *pbytes = 0 which is consistent with the
4918     // behavior we get on Solaris when an fd is at EOF.
4919     // The only alternative is to raise an Exception,
4920     // which isn't really warranted.
4921     //
4922     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4923       return FALSE;
4924     }
4925     *pbytes = 0;
4926   }
4927   return TRUE;
4928 }
4929 
4930 #define MAX_INPUT_EVENTS 2000
4931 
4932 // This code is a copy of JDK's stdinAvailable
4933 // from src/windows/hpi/src/sys_api_md.c
4934 
4935 static int stdinAvailable(int fd, long *pbytes) {
4936   HANDLE han;
4937   DWORD numEventsRead = 0;  // Number of events read from buffer
4938   DWORD numEvents = 0;      // Number of events in buffer
4939   DWORD i = 0;              // Loop index
4940   DWORD curLength = 0;      // Position marker
4941   DWORD actualLength = 0;   // Number of bytes readable
4942   BOOL error = FALSE;       // Error holder
4943   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4944 
4945   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4946     return FALSE;
4947   }
4948 
4949   // Construct an array of input records in the console buffer
4950   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4951   if (error == 0) {
4952     return nonSeekAvailable(fd, pbytes);
4953   }
4954 
4955   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4956   if (numEvents > MAX_INPUT_EVENTS) {
4957     numEvents = MAX_INPUT_EVENTS;
4958   }
4959 
4960   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4961   if (lpBuffer == NULL) {
4962     return FALSE;
4963   }
4964 
4965   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4966   if (error == 0) {
4967     os::free(lpBuffer);
4968     return FALSE;
4969   }
4970 
4971   // Examine input records for the number of bytes available
4972   for (i=0; i<numEvents; i++) {
4973     if (lpBuffer[i].EventType == KEY_EVENT) {
4974 
4975       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4976                                       &(lpBuffer[i].Event);
4977       if (keyRecord->bKeyDown == TRUE) {
4978         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4979         curLength++;
4980         if (*keyPressed == '\r') {
4981           actualLength = curLength;
4982         }
4983       }
4984     }
4985   }
4986 
4987   if (lpBuffer != NULL) {
4988     os::free(lpBuffer);
4989   }
4990 
4991   *pbytes = (long) actualLength;
4992   return TRUE;
4993 }
4994 
4995 // Map a block of memory.
4996 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4997                         char *addr, size_t bytes, bool read_only,
4998                         bool allow_exec) {
4999   HANDLE hFile;
5000   char* base;
5001 
5002   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
5003                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
5004   if (hFile == NULL) {
5005     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
5006     return NULL;
5007   }
5008 
5009   if (allow_exec) {
5010     // CreateFileMapping/MapViewOfFileEx can't map executable memory
5011     // unless it comes from a PE image (which the shared archive is not.)
5012     // Even VirtualProtect refuses to give execute access to mapped memory
5013     // that was not previously executable.
5014     //
5015     // Instead, stick the executable region in anonymous memory.  Yuck.
5016     // Penalty is that ~4 pages will not be shareable - in the future
5017     // we might consider DLLizing the shared archive with a proper PE
5018     // header so that mapping executable + sharing is possible.
5019 
5020     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
5021                                 PAGE_READWRITE);
5022     if (base == NULL) {
5023       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
5024       CloseHandle(hFile);
5025       return NULL;
5026     }
5027 
5028     // Record virtual memory allocation
5029     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
5030 
5031     DWORD bytes_read;
5032     OVERLAPPED overlapped;
5033     overlapped.Offset = (DWORD)file_offset;
5034     overlapped.OffsetHigh = 0;
5035     overlapped.hEvent = NULL;
5036     // ReadFile guarantees that if the return value is true, the requested
5037     // number of bytes were read before returning.
5038     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
5039     if (!res) {
5040       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
5041       release_memory(base, bytes);
5042       CloseHandle(hFile);
5043       return NULL;
5044     }
5045   } else {
5046     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
5047                                     NULL /* file_name */);
5048     if (hMap == NULL) {
5049       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
5050       CloseHandle(hFile);
5051       return NULL;
5052     }
5053 
5054     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
5055     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
5056                                   (DWORD)bytes, addr);
5057     if (base == NULL) {
5058       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
5059       CloseHandle(hMap);
5060       CloseHandle(hFile);
5061       return NULL;
5062     }
5063 
5064     if (CloseHandle(hMap) == 0) {
5065       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
5066       CloseHandle(hFile);
5067       return base;
5068     }
5069   }
5070 
5071   if (allow_exec) {
5072     DWORD old_protect;
5073     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
5074     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
5075 
5076     if (!res) {
5077       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
5078       // Don't consider this a hard error, on IA32 even if the
5079       // VirtualProtect fails, we should still be able to execute
5080       CloseHandle(hFile);
5081       return base;
5082     }
5083   }
5084 
5085   if (CloseHandle(hFile) == 0) {
5086     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
5087     return base;
5088   }
5089 
5090   return base;
5091 }
5092 
5093 
5094 // Remap a block of memory.
5095 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5096                           char *addr, size_t bytes, bool read_only,
5097                           bool allow_exec) {
5098   // This OS does not allow existing memory maps to be remapped so we
5099   // would have to unmap the memory before we remap it.
5100 
5101   // Because there is a small window between unmapping memory and mapping
5102   // it in again with different protections, CDS archives are mapped RW
5103   // on windows, so this function isn't called.
5104   ShouldNotReachHere();
5105   return NULL;
5106 }
5107 
5108 
5109 // Unmap a block of memory.
5110 // Returns true=success, otherwise false.
5111 
5112 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5113   MEMORY_BASIC_INFORMATION mem_info;
5114   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5115     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5116     return false;
5117   }
5118 
5119   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5120   // Instead, executable region was allocated using VirtualAlloc(). See
5121   // pd_map_memory() above.
5122   //
5123   // The following flags should match the 'exec_access' flages used for
5124   // VirtualProtect() in pd_map_memory().
5125   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5126       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5127     return pd_release_memory(addr, bytes);
5128   }
5129 
5130   BOOL result = UnmapViewOfFile(addr);
5131   if (result == 0) {
5132     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
5133     return false;
5134   }
5135   return true;
5136 }
5137 
5138 void os::pause() {
5139   char filename[MAX_PATH];
5140   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5141     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5142   } else {
5143     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5144   }
5145 
5146   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5147   if (fd != -1) {
5148     struct stat buf;
5149     ::close(fd);
5150     while (::stat(filename, &buf) == 0) {
5151       Sleep(100);
5152     }
5153   } else {
5154     jio_fprintf(stderr,
5155                 "Could not open pause file '%s', continuing immediately.\n", filename);
5156   }
5157 }
5158 
5159 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5160 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5161 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5162 
5163 os::ThreadCrashProtection::ThreadCrashProtection() {
5164 }
5165 
5166 // See the caveats for this class in os_windows.hpp
5167 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5168 // into this method and returns false. If no OS EXCEPTION was raised, returns
5169 // true.
5170 // The callback is supposed to provide the method that should be protected.
5171 //
5172 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5173 
5174   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5175 
5176   _protected_thread = Thread::current_or_null();
5177   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5178 
5179   bool success = true;
5180   __try {
5181     _crash_protection = this;
5182     cb.call();
5183   } __except(EXCEPTION_EXECUTE_HANDLER) {
5184     // only for protection, nothing to do
5185     success = false;
5186   }
5187   _crash_protection = NULL;
5188   _protected_thread = NULL;
5189   Thread::muxRelease(&_crash_mux);
5190   return success;
5191 }
5192 
5193 
5194 class HighResolutionInterval : public CHeapObj<mtThread> {
5195   // The default timer resolution seems to be 10 milliseconds.
5196   // (Where is this written down?)
5197   // If someone wants to sleep for only a fraction of the default,
5198   // then we set the timer resolution down to 1 millisecond for
5199   // the duration of their interval.
5200   // We carefully set the resolution back, since otherwise we
5201   // seem to incur an overhead (3%?) that we don't need.
5202   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5203   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5204   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5205   // timeBeginPeriod() if the relative error exceeded some threshold.
5206   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5207   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5208   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5209   // resolution timers running.
5210  private:
5211   jlong resolution;
5212  public:
5213   HighResolutionInterval(jlong ms) {
5214     resolution = ms % 10L;
5215     if (resolution != 0) {
5216       MMRESULT result = timeBeginPeriod(1L);
5217     }
5218   }
5219   ~HighResolutionInterval() {
5220     if (resolution != 0) {
5221       MMRESULT result = timeEndPeriod(1L);
5222     }
5223     resolution = 0L;
5224   }
5225 };
5226 
5227 // An Event wraps a win32 "CreateEvent" kernel handle.
5228 //
5229 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5230 //
5231 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5232 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5233 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5234 //     In addition, an unpark() operation might fetch the handle field, but the
5235 //     event could recycle between the fetch and the SetEvent() operation.
5236 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5237 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5238 //     on an stale but recycled handle would be harmless, but in practice this might
5239 //     confuse other non-Sun code, so it's not a viable approach.
5240 //
5241 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5242 //     with the Event.  The event handle is never closed.  This could be construed
5243 //     as handle leakage, but only up to the maximum # of threads that have been extant
5244 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5245 //     permit a process to have hundreds of thousands of open handles.
5246 //
5247 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5248 //     and release unused handles.
5249 //
5250 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5251 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5252 //
5253 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5254 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5255 //
5256 // We use (2).
5257 //
5258 // TODO-FIXME:
5259 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5260 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5261 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5262 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5263 //     into a single win32 CreateEvent() handle.
5264 //
5265 // Assumption:
5266 //    Only one parker can exist on an event, which is why we allocate
5267 //    them per-thread. Multiple unparkers can coexist.
5268 //
5269 // _Event transitions in park()
5270 //   -1 => -1 : illegal
5271 //    1 =>  0 : pass - return immediately
5272 //    0 => -1 : block; then set _Event to 0 before returning
5273 //
5274 // _Event transitions in unpark()
5275 //    0 => 1 : just return
5276 //    1 => 1 : just return
5277 //   -1 => either 0 or 1; must signal target thread
5278 //         That is, we can safely transition _Event from -1 to either
5279 //         0 or 1.
5280 //
5281 // _Event serves as a restricted-range semaphore.
5282 //   -1 : thread is blocked, i.e. there is a waiter
5283 //    0 : neutral: thread is running or ready,
5284 //        could have been signaled after a wait started
5285 //    1 : signaled - thread is running or ready
5286 //
5287 // Another possible encoding of _Event would be with
5288 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5289 //
5290 
5291 int os::PlatformEvent::park(jlong Millis) {
5292   // Transitions for _Event:
5293   //   -1 => -1 : illegal
5294   //    1 =>  0 : pass - return immediately
5295   //    0 => -1 : block; then set _Event to 0 before returning
5296 
5297   guarantee(_ParkHandle != NULL , "Invariant");
5298   guarantee(Millis > 0          , "Invariant");
5299 
5300   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5301   // the initial park() operation.
5302   // Consider: use atomic decrement instead of CAS-loop
5303 
5304   int v;
5305   for (;;) {
5306     v = _Event;
5307     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5308   }
5309   guarantee((v == 0) || (v == 1), "invariant");
5310   if (v != 0) return OS_OK;
5311 
5312   // Do this the hard way by blocking ...
5313   // TODO: consider a brief spin here, gated on the success of recent
5314   // spin attempts by this thread.
5315   //
5316   // We decompose long timeouts into series of shorter timed waits.
5317   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5318   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5319   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5320   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5321   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5322   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5323   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5324   // for the already waited time.  This policy does not admit any new outcomes.
5325   // In the future, however, we might want to track the accumulated wait time and
5326   // adjust Millis accordingly if we encounter a spurious wakeup.
5327 
5328   const int MAXTIMEOUT = 0x10000000;
5329   DWORD rv = WAIT_TIMEOUT;
5330   while (_Event < 0 && Millis > 0) {
5331     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5332     if (Millis > MAXTIMEOUT) {
5333       prd = MAXTIMEOUT;
5334     }
5335     HighResolutionInterval *phri = NULL;
5336     if (!ForceTimeHighResolution) {
5337       phri = new HighResolutionInterval(prd);
5338     }
5339     rv = ::WaitForSingleObject(_ParkHandle, prd);
5340     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5341     if (rv == WAIT_TIMEOUT) {
5342       Millis -= prd;
5343     }
5344     delete phri; // if it is NULL, harmless
5345   }
5346   v = _Event;
5347   _Event = 0;
5348   // see comment at end of os::PlatformEvent::park() below:
5349   OrderAccess::fence();
5350   // If we encounter a nearly simultanous timeout expiry and unpark()
5351   // we return OS_OK indicating we awoke via unpark().
5352   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5353   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5354 }
5355 
5356 void os::PlatformEvent::park() {
5357   // Transitions for _Event:
5358   //   -1 => -1 : illegal
5359   //    1 =>  0 : pass - return immediately
5360   //    0 => -1 : block; then set _Event to 0 before returning
5361 
5362   guarantee(_ParkHandle != NULL, "Invariant");
5363   // Invariant: Only the thread associated with the Event/PlatformEvent
5364   // may call park().
5365   // Consider: use atomic decrement instead of CAS-loop
5366   int v;
5367   for (;;) {
5368     v = _Event;
5369     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5370   }
5371   guarantee((v == 0) || (v == 1), "invariant");
5372   if (v != 0) return;
5373 
5374   // Do this the hard way by blocking ...
5375   // TODO: consider a brief spin here, gated on the success of recent
5376   // spin attempts by this thread.
5377   while (_Event < 0) {
5378     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5379     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5380   }
5381 
5382   // Usually we'll find _Event == 0 at this point, but as
5383   // an optional optimization we clear it, just in case can
5384   // multiple unpark() operations drove _Event up to 1.
5385   _Event = 0;
5386   OrderAccess::fence();
5387   guarantee(_Event >= 0, "invariant");
5388 }
5389 
5390 void os::PlatformEvent::unpark() {
5391   guarantee(_ParkHandle != NULL, "Invariant");
5392 
5393   // Transitions for _Event:
5394   //    0 => 1 : just return
5395   //    1 => 1 : just return
5396   //   -1 => either 0 or 1; must signal target thread
5397   //         That is, we can safely transition _Event from -1 to either
5398   //         0 or 1.
5399   // See also: "Semaphores in Plan 9" by Mullender & Cox
5400   //
5401   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5402   // that it will take two back-to-back park() calls for the owning
5403   // thread to block. This has the benefit of forcing a spurious return
5404   // from the first park() call after an unpark() call which will help
5405   // shake out uses of park() and unpark() without condition variables.
5406 
5407   if (Atomic::xchg(&_Event, 1) >= 0) return;
5408 
5409   ::SetEvent(_ParkHandle);
5410 }
5411 
5412 
5413 // JSR166
5414 // -------------------------------------------------------
5415 
5416 // The Windows implementation of Park is very straightforward: Basic
5417 // operations on Win32 Events turn out to have the right semantics to
5418 // use them directly. We opportunistically resuse the event inherited
5419 // from Monitor.
5420 
5421 void Parker::park(bool isAbsolute, jlong time) {
5422   guarantee(_ParkEvent != NULL, "invariant");
5423   // First, demultiplex/decode time arguments
5424   if (time < 0) { // don't wait
5425     return;
5426   } else if (time == 0 && !isAbsolute) {
5427     time = INFINITE;
5428   } else if (isAbsolute) {
5429     time -= os::javaTimeMillis(); // convert to relative time
5430     if (time <= 0) {  // already elapsed
5431       return;
5432     }
5433   } else { // relative
5434     time /= 1000000;  // Must coarsen from nanos to millis
5435     if (time == 0) {  // Wait for the minimal time unit if zero
5436       time = 1;
5437     }
5438   }
5439 
5440   JavaThread* thread = JavaThread::current();
5441 
5442   // Don't wait if interrupted or already triggered
5443   if (thread->is_interrupted(false) ||
5444       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5445     ResetEvent(_ParkEvent);
5446     return;
5447   } else {
5448     ThreadBlockInVM tbivm(thread);
5449     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5450     thread->set_suspend_equivalent();
5451 
5452     WaitForSingleObject(_ParkEvent, time);
5453     ResetEvent(_ParkEvent);
5454 
5455     // If externally suspended while waiting, re-suspend
5456     if (thread->handle_special_suspend_equivalent_condition()) {
5457       thread->java_suspend_self();
5458     }
5459   }
5460 }
5461 
5462 void Parker::unpark() {
5463   guarantee(_ParkEvent != NULL, "invariant");
5464   SetEvent(_ParkEvent);
5465 }
5466 
5467 // Platform Monitor implementation
5468 
5469 // Must already be locked
5470 int os::PlatformMonitor::wait(jlong millis) {
5471   assert(millis >= 0, "negative timeout");
5472   int ret = OS_TIMEOUT;
5473   int status = SleepConditionVariableCS(&_cond, &_mutex,
5474                                         millis == 0 ? INFINITE : millis);
5475   if (status != 0) {
5476     ret = OS_OK;
5477   }
5478   #ifndef PRODUCT
5479   else {
5480     DWORD err = GetLastError();
5481     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5482   }
5483   #endif
5484   return ret;
5485 }
5486 
5487 // Run the specified command in a separate process. Return its exit value,
5488 // or -1 on failure (e.g. can't create a new process).
5489 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5490   STARTUPINFO si;
5491   PROCESS_INFORMATION pi;
5492   DWORD exit_code;
5493 
5494   char * cmd_string;
5495   const char * cmd_prefix = "cmd /C ";
5496   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5497   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5498   if (cmd_string == NULL) {
5499     return -1;
5500   }
5501   cmd_string[0] = '\0';
5502   strcat(cmd_string, cmd_prefix);
5503   strcat(cmd_string, cmd);
5504 
5505   // now replace all '\n' with '&'
5506   char * substring = cmd_string;
5507   while ((substring = strchr(substring, '\n')) != NULL) {
5508     substring[0] = '&';
5509     substring++;
5510   }
5511   memset(&si, 0, sizeof(si));
5512   si.cb = sizeof(si);
5513   memset(&pi, 0, sizeof(pi));
5514   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5515                             cmd_string,    // command line
5516                             NULL,   // process security attribute
5517                             NULL,   // thread security attribute
5518                             TRUE,   // inherits system handles
5519                             0,      // no creation flags
5520                             NULL,   // use parent's environment block
5521                             NULL,   // use parent's starting directory
5522                             &si,    // (in) startup information
5523                             &pi);   // (out) process information
5524 
5525   if (rslt) {
5526     // Wait until child process exits.
5527     WaitForSingleObject(pi.hProcess, INFINITE);
5528 
5529     GetExitCodeProcess(pi.hProcess, &exit_code);
5530 
5531     // Close process and thread handles.
5532     CloseHandle(pi.hProcess);
5533     CloseHandle(pi.hThread);
5534   } else {
5535     exit_code = -1;
5536   }
5537 
5538   FREE_C_HEAP_ARRAY(char, cmd_string);
5539   return (int)exit_code;
5540 }
5541 
5542 bool os::find(address addr, outputStream* st) {
5543   int offset = -1;
5544   bool result = false;
5545   char buf[256];
5546   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5547     st->print(PTR_FORMAT " ", addr);
5548     if (strlen(buf) < sizeof(buf) - 1) {
5549       char* p = strrchr(buf, '\\');
5550       if (p) {
5551         st->print("%s", p + 1);
5552       } else {
5553         st->print("%s", buf);
5554       }
5555     } else {
5556         // The library name is probably truncated. Let's omit the library name.
5557         // See also JDK-8147512.
5558     }
5559     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5560       st->print("::%s + 0x%x", buf, offset);
5561     }
5562     st->cr();
5563     result = true;
5564   }
5565   return result;
5566 }
5567 
5568 static jint initSock() {
5569   WSADATA wsadata;
5570 
5571   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5572     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5573                 ::GetLastError());
5574     return JNI_ERR;
5575   }
5576   return JNI_OK;
5577 }
5578 
5579 struct hostent* os::get_host_by_name(char* name) {
5580   return (struct hostent*)gethostbyname(name);
5581 }
5582 
5583 int os::socket_close(int fd) {
5584   return ::closesocket(fd);
5585 }
5586 
5587 int os::socket(int domain, int type, int protocol) {
5588   return ::socket(domain, type, protocol);
5589 }
5590 
5591 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5592   return ::connect(fd, him, len);
5593 }
5594 
5595 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5596   return ::recv(fd, buf, (int)nBytes, flags);
5597 }
5598 
5599 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5600   return ::send(fd, buf, (int)nBytes, flags);
5601 }
5602 
5603 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5604   return ::send(fd, buf, (int)nBytes, flags);
5605 }
5606 
5607 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5608 #if defined(IA32)
5609   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5610 #elif defined (AMD64)
5611   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5612 #endif
5613 
5614 // returns true if thread could be suspended,
5615 // false otherwise
5616 static bool do_suspend(HANDLE* h) {
5617   if (h != NULL) {
5618     if (SuspendThread(*h) != ~0) {
5619       return true;
5620     }
5621   }
5622   return false;
5623 }
5624 
5625 // resume the thread
5626 // calling resume on an active thread is a no-op
5627 static void do_resume(HANDLE* h) {
5628   if (h != NULL) {
5629     ResumeThread(*h);
5630   }
5631 }
5632 
5633 // retrieve a suspend/resume context capable handle
5634 // from the tid. Caller validates handle return value.
5635 void get_thread_handle_for_extended_context(HANDLE* h,
5636                                             OSThread::thread_id_t tid) {
5637   if (h != NULL) {
5638     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5639   }
5640 }
5641 
5642 // Thread sampling implementation
5643 //
5644 void os::SuspendedThreadTask::internal_do_task() {
5645   CONTEXT    ctxt;
5646   HANDLE     h = NULL;
5647 
5648   // get context capable handle for thread
5649   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5650 
5651   // sanity
5652   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5653     return;
5654   }
5655 
5656   // suspend the thread
5657   if (do_suspend(&h)) {
5658     ctxt.ContextFlags = sampling_context_flags;
5659     // get thread context
5660     GetThreadContext(h, &ctxt);
5661     SuspendedThreadTaskContext context(_thread, &ctxt);
5662     // pass context to Thread Sampling impl
5663     do_task(context);
5664     // resume thread
5665     do_resume(&h);
5666   }
5667 
5668   // close handle
5669   CloseHandle(h);
5670 }
5671 
5672 bool os::start_debugging(char *buf, int buflen) {
5673   int len = (int)strlen(buf);
5674   char *p = &buf[len];
5675 
5676   jio_snprintf(p, buflen-len,
5677              "\n\n"
5678              "Do you want to debug the problem?\n\n"
5679              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5680              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5681              "Otherwise, select 'No' to abort...",
5682              os::current_process_id(), os::current_thread_id());
5683 
5684   bool yes = os::message_box("Unexpected Error", buf);
5685 
5686   if (yes) {
5687     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5688     // exception. If VM is running inside a debugger, the debugger will
5689     // catch the exception. Otherwise, the breakpoint exception will reach
5690     // the default windows exception handler, which can spawn a debugger and
5691     // automatically attach to the dying VM.
5692     os::breakpoint();
5693     yes = false;
5694   }
5695   return yes;
5696 }
5697 
5698 void* os::get_default_process_handle() {
5699   return (void*)GetModuleHandle(NULL);
5700 }
5701 
5702 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5703 // which is used to find statically linked in agents.
5704 // Additionally for windows, takes into account __stdcall names.
5705 // Parameters:
5706 //            sym_name: Symbol in library we are looking for
5707 //            lib_name: Name of library to look in, NULL for shared libs.
5708 //            is_absolute_path == true if lib_name is absolute path to agent
5709 //                                     such as "C:/a/b/L.dll"
5710 //            == false if only the base name of the library is passed in
5711 //               such as "L"
5712 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5713                                     bool is_absolute_path) {
5714   char *agent_entry_name;
5715   size_t len;
5716   size_t name_len;
5717   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5718   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5719   const char *start;
5720 
5721   if (lib_name != NULL) {
5722     len = name_len = strlen(lib_name);
5723     if (is_absolute_path) {
5724       // Need to strip path, prefix and suffix
5725       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5726         lib_name = ++start;
5727       } else {
5728         // Need to check for drive prefix
5729         if ((start = strchr(lib_name, ':')) != NULL) {
5730           lib_name = ++start;
5731         }
5732       }
5733       if (len <= (prefix_len + suffix_len)) {
5734         return NULL;
5735       }
5736       lib_name += prefix_len;
5737       name_len = strlen(lib_name) - suffix_len;
5738     }
5739   }
5740   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5741   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5742   if (agent_entry_name == NULL) {
5743     return NULL;
5744   }
5745   if (lib_name != NULL) {
5746     const char *p = strrchr(sym_name, '@');
5747     if (p != NULL && p != sym_name) {
5748       // sym_name == _Agent_OnLoad@XX
5749       strncpy(agent_entry_name, sym_name, (p - sym_name));
5750       agent_entry_name[(p-sym_name)] = '\0';
5751       // agent_entry_name == _Agent_OnLoad
5752       strcat(agent_entry_name, "_");
5753       strncat(agent_entry_name, lib_name, name_len);
5754       strcat(agent_entry_name, p);
5755       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5756     } else {
5757       strcpy(agent_entry_name, sym_name);
5758       strcat(agent_entry_name, "_");
5759       strncat(agent_entry_name, lib_name, name_len);
5760     }
5761   } else {
5762     strcpy(agent_entry_name, sym_name);
5763   }
5764   return agent_entry_name;
5765 }
5766 
5767 #ifndef PRODUCT
5768 
5769 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5770 // contiguous memory block at a particular address.
5771 // The test first tries to find a good approximate address to allocate at by using the same
5772 // method to allocate some memory at any address. The test then tries to allocate memory in
5773 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5774 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5775 // the previously allocated memory is available for allocation. The only actual failure
5776 // that is reported is when the test tries to allocate at a particular location but gets a
5777 // different valid one. A NULL return value at this point is not considered an error but may
5778 // be legitimate.
5779 void TestReserveMemorySpecial_test() {
5780   if (!UseLargePages) {
5781     return;
5782   }
5783   // save current value of globals
5784   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5785   bool old_use_numa_interleaving = UseNUMAInterleaving;
5786 
5787   // set globals to make sure we hit the correct code path
5788   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5789 
5790   // do an allocation at an address selected by the OS to get a good one.
5791   const size_t large_allocation_size = os::large_page_size() * 4;
5792   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5793   if (result == NULL) {
5794   } else {
5795     os::release_memory_special(result, large_allocation_size);
5796 
5797     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5798     // we managed to get it once.
5799     const size_t expected_allocation_size = os::large_page_size();
5800     char* expected_location = result + os::large_page_size();
5801     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5802     if (actual_location == NULL) {
5803     } else {
5804       // release memory
5805       os::release_memory_special(actual_location, expected_allocation_size);
5806       // only now check, after releasing any memory to avoid any leaks.
5807       assert(actual_location == expected_location,
5808              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5809              expected_location, expected_allocation_size, actual_location);
5810     }
5811   }
5812 
5813   // restore globals
5814   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5815   UseNUMAInterleaving = old_use_numa_interleaving;
5816 }
5817 #endif // PRODUCT
5818 
5819 /*
5820   All the defined signal names for Windows.
5821 
5822   NOTE that not all of these names are accepted by FindSignal!
5823 
5824   For various reasons some of these may be rejected at runtime.
5825 
5826   Here are the names currently accepted by a user of sun.misc.Signal with
5827   1.4.1 (ignoring potential interaction with use of chaining, etc):
5828 
5829      (LIST TBD)
5830 
5831 */
5832 int os::get_signal_number(const char* name) {
5833   static const struct {
5834     const char* name;
5835     int         number;
5836   } siglabels [] =
5837     // derived from version 6.0 VC98/include/signal.h
5838   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5839   "FPE",        SIGFPE,         // floating point exception
5840   "SEGV",       SIGSEGV,        // segment violation
5841   "INT",        SIGINT,         // interrupt
5842   "TERM",       SIGTERM,        // software term signal from kill
5843   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5844   "ILL",        SIGILL};        // illegal instruction
5845   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5846     if (strcmp(name, siglabels[i].name) == 0) {
5847       return siglabels[i].number;
5848     }
5849   }
5850   return -1;
5851 }
5852 
5853 // Fast current thread access
5854 
5855 int os::win32::_thread_ptr_offset = 0;
5856 
5857 static void call_wrapper_dummy() {}
5858 
5859 // We need to call the os_exception_wrapper once so that it sets
5860 // up the offset from FS of the thread pointer.
5861 void os::win32::initialize_thread_ptr_offset() {
5862   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5863                            NULL, methodHandle(), NULL, NULL);
5864 }
5865 
5866 bool os::supports_map_sync() {
5867   return false;
5868 }