1 /*
   2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/codeCache.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/disassembler.hpp"
  38 #include "interpreter/interpreter.hpp"
  39 #include "logging/log.hpp"
  40 #include "logging/logStream.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "os_share_windows.hpp"
  45 #include "os_windows.inline.hpp"
  46 #include "prims/jniFastGetField.hpp"
  47 #include "prims/jvm_misc.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/atomic.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/safepointMechanism.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/statSampler.hpp"
  62 #include "runtime/stubRoutines.hpp"
  63 #include "runtime/thread.inline.hpp"
  64 #include "runtime/threadCritical.hpp"
  65 #include "runtime/timer.hpp"
  66 #include "runtime/vm_version.hpp"
  67 #include "services/attachListener.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/align.hpp"
  71 #include "utilities/decoder.hpp"
  72 #include "utilities/defaultStream.hpp"
  73 #include "utilities/events.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 #include "symbolengine.hpp"
  77 #include "windbghelp.hpp"
  78 
  79 #ifdef _DEBUG
  80 #include <crtdbg.h>
  81 #endif
  82 
  83 #include <windows.h>
  84 #include <sys/types.h>
  85 #include <sys/stat.h>
  86 #include <sys/timeb.h>
  87 #include <objidl.h>
  88 #include <shlobj.h>
  89 
  90 #include <malloc.h>
  91 #include <signal.h>
  92 #include <direct.h>
  93 #include <errno.h>
  94 #include <fcntl.h>
  95 #include <io.h>
  96 #include <process.h>              // For _beginthreadex(), _endthreadex()
  97 #include <imagehlp.h>             // For os::dll_address_to_function_name
  98 // for enumerating dll libraries
  99 #include <vdmdbg.h>
 100 #include <psapi.h>
 101 #include <mmsystem.h>
 102 #include <winsock2.h>
 103 
 104 // for timer info max values which include all bits
 105 #define ALL_64_BITS CONST64(-1)
 106 
 107 // For DLL loading/load error detection
 108 // Values of PE COFF
 109 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 110 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 111 
 112 static HANDLE main_process;
 113 static HANDLE main_thread;
 114 static int    main_thread_id;
 115 
 116 static FILETIME process_creation_time;
 117 static FILETIME process_exit_time;
 118 static FILETIME process_user_time;
 119 static FILETIME process_kernel_time;
 120 
 121 #ifdef _M_AMD64
 122   #define __CPU__ amd64
 123 #else
 124   #define __CPU__ i486
 125 #endif
 126 
 127 #if INCLUDE_AOT
 128 PVOID  topLevelVectoredExceptionHandler = NULL;
 129 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 130 #endif
 131 
 132 // save DLL module handle, used by GetModuleFileName
 133 
 134 HINSTANCE vm_lib_handle;
 135 
 136 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 137   switch (reason) {
 138   case DLL_PROCESS_ATTACH:
 139     vm_lib_handle = hinst;
 140     if (ForceTimeHighResolution) {
 141       timeBeginPeriod(1L);
 142     }
 143     WindowsDbgHelp::pre_initialize();
 144     SymbolEngine::pre_initialize();
 145     break;
 146   case DLL_PROCESS_DETACH:
 147     if (ForceTimeHighResolution) {
 148       timeEndPeriod(1L);
 149     }
 150 #if INCLUDE_AOT
 151     if (topLevelVectoredExceptionHandler != NULL) {
 152       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 153       topLevelVectoredExceptionHandler = NULL;
 154     }
 155 #endif
 156     break;
 157   default:
 158     break;
 159   }
 160   return true;
 161 }
 162 
 163 static inline double fileTimeAsDouble(FILETIME* time) {
 164   const double high  = (double) ((unsigned int) ~0);
 165   const double split = 10000000.0;
 166   double result = (time->dwLowDateTime / split) +
 167                    time->dwHighDateTime * (high/split);
 168   return result;
 169 }
 170 
 171 // Implementation of os
 172 
 173 bool os::unsetenv(const char* name) {
 174   assert(name != NULL, "Null pointer");
 175   return (SetEnvironmentVariable(name, NULL) == TRUE);
 176 }
 177 
 178 // No setuid programs under Windows.
 179 bool os::have_special_privileges() {
 180   return false;
 181 }
 182 
 183 
 184 // This method is  a periodic task to check for misbehaving JNI applications
 185 // under CheckJNI, we can add any periodic checks here.
 186 // For Windows at the moment does nothing
 187 void os::run_periodic_checks() {
 188   return;
 189 }
 190 
 191 // previous UnhandledExceptionFilter, if there is one
 192 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 193 
 194 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 195 
 196 void os::init_system_properties_values() {
 197   // sysclasspath, java_home, dll_dir
 198   {
 199     char *home_path;
 200     char *dll_path;
 201     char *pslash;
 202     const char *bin = "\\bin";
 203     char home_dir[MAX_PATH + 1];
 204     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 205 
 206     if (alt_home_dir != NULL)  {
 207       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 208       home_dir[MAX_PATH] = '\0';
 209     } else {
 210       os::jvm_path(home_dir, sizeof(home_dir));
 211       // Found the full path to jvm.dll.
 212       // Now cut the path to <java_home>/jre if we can.
 213       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 214       pslash = strrchr(home_dir, '\\');
 215       if (pslash != NULL) {
 216         *pslash = '\0';                   // get rid of \{client|server}
 217         pslash = strrchr(home_dir, '\\');
 218         if (pslash != NULL) {
 219           *pslash = '\0';                 // get rid of \bin
 220         }
 221       }
 222     }
 223 
 224     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 225     strcpy(home_path, home_dir);
 226     Arguments::set_java_home(home_path);
 227     FREE_C_HEAP_ARRAY(char, home_path);
 228 
 229     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 230                                 mtInternal);
 231     strcpy(dll_path, home_dir);
 232     strcat(dll_path, bin);
 233     Arguments::set_dll_dir(dll_path);
 234     FREE_C_HEAP_ARRAY(char, dll_path);
 235 
 236     if (!set_boot_path('\\', ';')) {
 237       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 238     }
 239   }
 240 
 241 // library_path
 242 #define EXT_DIR "\\lib\\ext"
 243 #define BIN_DIR "\\bin"
 244 #define PACKAGE_DIR "\\Sun\\Java"
 245   {
 246     // Win32 library search order (See the documentation for LoadLibrary):
 247     //
 248     // 1. The directory from which application is loaded.
 249     // 2. The system wide Java Extensions directory (Java only)
 250     // 3. System directory (GetSystemDirectory)
 251     // 4. Windows directory (GetWindowsDirectory)
 252     // 5. The PATH environment variable
 253     // 6. The current directory
 254 
 255     char *library_path;
 256     char tmp[MAX_PATH];
 257     char *path_str = ::getenv("PATH");
 258 
 259     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 260                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 261 
 262     library_path[0] = '\0';
 263 
 264     GetModuleFileName(NULL, tmp, sizeof(tmp));
 265     *(strrchr(tmp, '\\')) = '\0';
 266     strcat(library_path, tmp);
 267 
 268     GetWindowsDirectory(tmp, sizeof(tmp));
 269     strcat(library_path, ";");
 270     strcat(library_path, tmp);
 271     strcat(library_path, PACKAGE_DIR BIN_DIR);
 272 
 273     GetSystemDirectory(tmp, sizeof(tmp));
 274     strcat(library_path, ";");
 275     strcat(library_path, tmp);
 276 
 277     GetWindowsDirectory(tmp, sizeof(tmp));
 278     strcat(library_path, ";");
 279     strcat(library_path, tmp);
 280 
 281     if (path_str) {
 282       strcat(library_path, ";");
 283       strcat(library_path, path_str);
 284     }
 285 
 286     strcat(library_path, ";.");
 287 
 288     Arguments::set_library_path(library_path);
 289     FREE_C_HEAP_ARRAY(char, library_path);
 290   }
 291 
 292   // Default extensions directory
 293   {
 294     char path[MAX_PATH];
 295     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 296     GetWindowsDirectory(path, MAX_PATH);
 297     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 298             path, PACKAGE_DIR, EXT_DIR);
 299     Arguments::set_ext_dirs(buf);
 300   }
 301   #undef EXT_DIR
 302   #undef BIN_DIR
 303   #undef PACKAGE_DIR
 304 
 305 #ifndef _WIN64
 306   // set our UnhandledExceptionFilter and save any previous one
 307   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 308 #endif
 309 
 310   // Done
 311   return;
 312 }
 313 
 314 void os::breakpoint() {
 315   DebugBreak();
 316 }
 317 
 318 // Invoked from the BREAKPOINT Macro
 319 extern "C" void breakpoint() {
 320   os::breakpoint();
 321 }
 322 
 323 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 324 // So far, this method is only used by Native Memory Tracking, which is
 325 // only supported on Windows XP or later.
 326 //
 327 int os::get_native_stack(address* stack, int frames, int toSkip) {
 328   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 329   for (int index = captured; index < frames; index ++) {
 330     stack[index] = NULL;
 331   }
 332   return captured;
 333 }
 334 
 335 
 336 // os::current_stack_base()
 337 //
 338 //   Returns the base of the stack, which is the stack's
 339 //   starting address.  This function must be called
 340 //   while running on the stack of the thread being queried.
 341 
 342 address os::current_stack_base() {
 343   MEMORY_BASIC_INFORMATION minfo;
 344   address stack_bottom;
 345   size_t stack_size;
 346 
 347   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 348   stack_bottom =  (address)minfo.AllocationBase;
 349   stack_size = minfo.RegionSize;
 350 
 351   // Add up the sizes of all the regions with the same
 352   // AllocationBase.
 353   while (1) {
 354     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 355     if (stack_bottom == (address)minfo.AllocationBase) {
 356       stack_size += minfo.RegionSize;
 357     } else {
 358       break;
 359     }
 360   }
 361   return stack_bottom + stack_size;
 362 }
 363 
 364 size_t os::current_stack_size() {
 365   size_t sz;
 366   MEMORY_BASIC_INFORMATION minfo;
 367   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 368   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 369   return sz;
 370 }
 371 
 372 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 373   MEMORY_BASIC_INFORMATION minfo;
 374   committed_start = NULL;
 375   committed_size = 0;
 376   address top = start + size;
 377   const address start_addr = start;
 378   while (start < top) {
 379     VirtualQuery(start, &minfo, sizeof(minfo));
 380     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 381       if (committed_start != NULL) {
 382         break;
 383       }
 384     } else {  // committed
 385       if (committed_start == NULL) {
 386         committed_start = start;
 387       }
 388       size_t offset = start - (address)minfo.BaseAddress;
 389       committed_size += minfo.RegionSize - offset;
 390     }
 391     start = (address)minfo.BaseAddress + minfo.RegionSize;
 392   }
 393 
 394   if (committed_start == NULL) {
 395     assert(committed_size == 0, "Sanity");
 396     return false;
 397   } else {
 398     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 399     // current region may go beyond the limit, trim to the limit
 400     committed_size = MIN2(committed_size, size_t(top - committed_start));
 401     return true;
 402   }
 403 }
 404 
 405 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 406   const struct tm* time_struct_ptr = localtime(clock);
 407   if (time_struct_ptr != NULL) {
 408     *res = *time_struct_ptr;
 409     return res;
 410   }
 411   return NULL;
 412 }
 413 
 414 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 415   const struct tm* time_struct_ptr = gmtime(clock);
 416   if (time_struct_ptr != NULL) {
 417     *res = *time_struct_ptr;
 418     return res;
 419   }
 420   return NULL;
 421 }
 422 
 423 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 424 
 425 // Thread start routine for all newly created threads
 426 static unsigned __stdcall thread_native_entry(Thread* thread) {
 427 
 428   thread->record_stack_base_and_size();
 429 
 430   // Try to randomize the cache line index of hot stack frames.
 431   // This helps when threads of the same stack traces evict each other's
 432   // cache lines. The threads can be either from the same JVM instance, or
 433   // from different JVM instances. The benefit is especially true for
 434   // processors with hyperthreading technology.
 435   static int counter = 0;
 436   int pid = os::current_process_id();
 437   _alloca(((pid ^ counter++) & 7) * 128);
 438 
 439   thread->initialize_thread_current();
 440 
 441   OSThread* osthr = thread->osthread();
 442   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 443 
 444   if (UseNUMA) {
 445     int lgrp_id = os::numa_get_group_id();
 446     if (lgrp_id != -1) {
 447       thread->set_lgrp_id(lgrp_id);
 448     }
 449   }
 450 
 451   // Diagnostic code to investigate JDK-6573254
 452   int res = 30115;  // non-java thread
 453   if (thread->is_Java_thread()) {
 454     res = 20115;    // java thread
 455   }
 456 
 457   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 458 
 459   // Install a win32 structured exception handler around every thread created
 460   // by VM, so VM can generate error dump when an exception occurred in non-
 461   // Java thread (e.g. VM thread).
 462   __try {
 463     thread->call_run();
 464   } __except(topLevelExceptionFilter(
 465                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 466     // Nothing to do.
 467   }
 468 
 469   // Note: at this point the thread object may already have deleted itself.
 470   // Do not dereference it from here on out.
 471 
 472   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 473 
 474   // One less thread is executing
 475   // When the VMThread gets here, the main thread may have already exited
 476   // which frees the CodeHeap containing the Atomic::add code
 477   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 478     Atomic::dec(&os::win32::_os_thread_count);
 479   }
 480 
 481   // Thread must not return from exit_process_or_thread(), but if it does,
 482   // let it proceed to exit normally
 483   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 484 }
 485 
 486 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 487                                   int thread_id) {
 488   // Allocate the OSThread object
 489   OSThread* osthread = new OSThread(NULL, NULL);
 490   if (osthread == NULL) return NULL;
 491 
 492   // Initialize the JDK library's interrupt event.
 493   // This should really be done when OSThread is constructed,
 494   // but there is no way for a constructor to report failure to
 495   // allocate the event.
 496   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 497   if (interrupt_event == NULL) {
 498     delete osthread;
 499     return NULL;
 500   }
 501   osthread->set_interrupt_event(interrupt_event);
 502 
 503   // Store info on the Win32 thread into the OSThread
 504   osthread->set_thread_handle(thread_handle);
 505   osthread->set_thread_id(thread_id);
 506 
 507   if (UseNUMA) {
 508     int lgrp_id = os::numa_get_group_id();
 509     if (lgrp_id != -1) {
 510       thread->set_lgrp_id(lgrp_id);
 511     }
 512   }
 513 
 514   // Initial thread state is INITIALIZED, not SUSPENDED
 515   osthread->set_state(INITIALIZED);
 516 
 517   return osthread;
 518 }
 519 
 520 
 521 bool os::create_attached_thread(JavaThread* thread) {
 522 #ifdef ASSERT
 523   thread->verify_not_published();
 524 #endif
 525   HANDLE thread_h;
 526   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 527                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 528     fatal("DuplicateHandle failed\n");
 529   }
 530   OSThread* osthread = create_os_thread(thread, thread_h,
 531                                         (int)current_thread_id());
 532   if (osthread == NULL) {
 533     return false;
 534   }
 535 
 536   // Initial thread state is RUNNABLE
 537   osthread->set_state(RUNNABLE);
 538 
 539   thread->set_osthread(osthread);
 540 
 541   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 542     os::current_thread_id());
 543 
 544   return true;
 545 }
 546 
 547 bool os::create_main_thread(JavaThread* thread) {
 548 #ifdef ASSERT
 549   thread->verify_not_published();
 550 #endif
 551   if (_starting_thread == NULL) {
 552     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 553     if (_starting_thread == NULL) {
 554       return false;
 555     }
 556   }
 557 
 558   // The primordial thread is runnable from the start)
 559   _starting_thread->set_state(RUNNABLE);
 560 
 561   thread->set_osthread(_starting_thread);
 562   return true;
 563 }
 564 
 565 // Helper function to trace _beginthreadex attributes,
 566 //  similar to os::Posix::describe_pthread_attr()
 567 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 568                                                size_t stacksize, unsigned initflag) {
 569   stringStream ss(buf, buflen);
 570   if (stacksize == 0) {
 571     ss.print("stacksize: default, ");
 572   } else {
 573     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 574   }
 575   ss.print("flags: ");
 576   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 577   #define ALL(X) \
 578     X(CREATE_SUSPENDED) \
 579     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 580   ALL(PRINT_FLAG)
 581   #undef ALL
 582   #undef PRINT_FLAG
 583   return buf;
 584 }
 585 
 586 // Allocate and initialize a new OSThread
 587 bool os::create_thread(Thread* thread, ThreadType thr_type,
 588                        size_t stack_size) {
 589   unsigned thread_id;
 590 
 591   // Allocate the OSThread object
 592   OSThread* osthread = new OSThread(NULL, NULL);
 593   if (osthread == NULL) {
 594     return false;
 595   }
 596 
 597   // Initialize the JDK library's interrupt event.
 598   // This should really be done when OSThread is constructed,
 599   // but there is no way for a constructor to report failure to
 600   // allocate the event.
 601   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 602   if (interrupt_event == NULL) {
 603     delete osthread;
 604     return false;
 605   }
 606   osthread->set_interrupt_event(interrupt_event);
 607   // We don't call set_interrupted(false) as it will trip the assert in there
 608   // as we are not operating on the current thread. We don't need to call it
 609   // because the initial state is already correct.
 610 
 611   thread->set_osthread(osthread);
 612 
 613   if (stack_size == 0) {
 614     switch (thr_type) {
 615     case os::java_thread:
 616       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 617       if (JavaThread::stack_size_at_create() > 0) {
 618         stack_size = JavaThread::stack_size_at_create();
 619       }
 620       break;
 621     case os::compiler_thread:
 622       if (CompilerThreadStackSize > 0) {
 623         stack_size = (size_t)(CompilerThreadStackSize * K);
 624         break;
 625       } // else fall through:
 626         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 627     case os::vm_thread:
 628     case os::pgc_thread:
 629     case os::cgc_thread:
 630     case os::watcher_thread:
 631       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 632       break;
 633     }
 634   }
 635 
 636   // Create the Win32 thread
 637   //
 638   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 639   // does not specify stack size. Instead, it specifies the size of
 640   // initially committed space. The stack size is determined by
 641   // PE header in the executable. If the committed "stack_size" is larger
 642   // than default value in the PE header, the stack is rounded up to the
 643   // nearest multiple of 1MB. For example if the launcher has default
 644   // stack size of 320k, specifying any size less than 320k does not
 645   // affect the actual stack size at all, it only affects the initial
 646   // commitment. On the other hand, specifying 'stack_size' larger than
 647   // default value may cause significant increase in memory usage, because
 648   // not only the stack space will be rounded up to MB, but also the
 649   // entire space is committed upfront.
 650   //
 651   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 652   // for CreateThread() that can treat 'stack_size' as stack size. However we
 653   // are not supposed to call CreateThread() directly according to MSDN
 654   // document because JVM uses C runtime library. The good news is that the
 655   // flag appears to work with _beginthredex() as well.
 656 
 657   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 658   HANDLE thread_handle =
 659     (HANDLE)_beginthreadex(NULL,
 660                            (unsigned)stack_size,
 661                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 662                            thread,
 663                            initflag,
 664                            &thread_id);
 665 
 666   char buf[64];
 667   if (thread_handle != NULL) {
 668     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 669       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 670   } else {
 671     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 672       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 673     // Log some OS information which might explain why creating the thread failed.
 674     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 675     LogStream st(Log(os, thread)::info());
 676     os::print_memory_info(&st);
 677   }
 678 
 679   if (thread_handle == NULL) {
 680     // Need to clean up stuff we've allocated so far
 681     thread->set_osthread(NULL);
 682     delete osthread;
 683     return false;
 684   }
 685 
 686   Atomic::inc(&os::win32::_os_thread_count);
 687 
 688   // Store info on the Win32 thread into the OSThread
 689   osthread->set_thread_handle(thread_handle);
 690   osthread->set_thread_id(thread_id);
 691 
 692   // Initial thread state is INITIALIZED, not SUSPENDED
 693   osthread->set_state(INITIALIZED);
 694 
 695   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 696   return true;
 697 }
 698 
 699 
 700 // Free Win32 resources related to the OSThread
 701 void os::free_thread(OSThread* osthread) {
 702   assert(osthread != NULL, "osthread not set");
 703 
 704   // We are told to free resources of the argument thread,
 705   // but we can only really operate on the current thread.
 706   assert(Thread::current()->osthread() == osthread,
 707          "os::free_thread but not current thread");
 708 
 709   CloseHandle(osthread->thread_handle());
 710   delete osthread;
 711 }
 712 
 713 static jlong first_filetime;
 714 static jlong initial_performance_count;
 715 static jlong performance_frequency;
 716 
 717 
 718 jlong as_long(LARGE_INTEGER x) {
 719   jlong result = 0; // initialization to avoid warning
 720   set_high(&result, x.HighPart);
 721   set_low(&result, x.LowPart);
 722   return result;
 723 }
 724 
 725 
 726 jlong os::elapsed_counter() {
 727   LARGE_INTEGER count;
 728   QueryPerformanceCounter(&count);
 729   return as_long(count) - initial_performance_count;
 730 }
 731 
 732 
 733 jlong os::elapsed_frequency() {
 734   return performance_frequency;
 735 }
 736 
 737 
 738 julong os::available_memory() {
 739   return win32::available_memory();
 740 }
 741 
 742 julong os::win32::available_memory() {
 743   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 744   // value if total memory is larger than 4GB
 745   MEMORYSTATUSEX ms;
 746   ms.dwLength = sizeof(ms);
 747   GlobalMemoryStatusEx(&ms);
 748 
 749   return (julong)ms.ullAvailPhys;
 750 }
 751 
 752 julong os::physical_memory() {
 753   return win32::physical_memory();
 754 }
 755 
 756 bool os::has_allocatable_memory_limit(julong* limit) {
 757   MEMORYSTATUSEX ms;
 758   ms.dwLength = sizeof(ms);
 759   GlobalMemoryStatusEx(&ms);
 760 #ifdef _LP64
 761   *limit = (julong)ms.ullAvailVirtual;
 762   return true;
 763 #else
 764   // Limit to 1400m because of the 2gb address space wall
 765   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 766   return true;
 767 #endif
 768 }
 769 
 770 int os::active_processor_count() {
 771   // User has overridden the number of active processors
 772   if (ActiveProcessorCount > 0) {
 773     log_trace(os)("active_processor_count: "
 774                   "active processor count set by user : %d",
 775                   ActiveProcessorCount);
 776     return ActiveProcessorCount;
 777   }
 778 
 779   DWORD_PTR lpProcessAffinityMask = 0;
 780   DWORD_PTR lpSystemAffinityMask = 0;
 781   int proc_count = processor_count();
 782   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 783       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 784     // Nof active processors is number of bits in process affinity mask
 785     int bitcount = 0;
 786     while (lpProcessAffinityMask != 0) {
 787       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 788       bitcount++;
 789     }
 790     return bitcount;
 791   } else {
 792     return proc_count;
 793   }
 794 }
 795 
 796 uint os::processor_id() {
 797   return (uint)GetCurrentProcessorNumber();
 798 }
 799 
 800 void os::set_native_thread_name(const char *name) {
 801 
 802   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 803   //
 804   // Note that unfortunately this only works if the process
 805   // is already attached to a debugger; debugger must observe
 806   // the exception below to show the correct name.
 807 
 808   // If there is no debugger attached skip raising the exception
 809   if (!IsDebuggerPresent()) {
 810     return;
 811   }
 812 
 813   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 814   struct {
 815     DWORD dwType;     // must be 0x1000
 816     LPCSTR szName;    // pointer to name (in user addr space)
 817     DWORD dwThreadID; // thread ID (-1=caller thread)
 818     DWORD dwFlags;    // reserved for future use, must be zero
 819   } info;
 820 
 821   info.dwType = 0x1000;
 822   info.szName = name;
 823   info.dwThreadID = -1;
 824   info.dwFlags = 0;
 825 
 826   __try {
 827     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 828   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 829 }
 830 
 831 bool os::bind_to_processor(uint processor_id) {
 832   // Not yet implemented.
 833   return false;
 834 }
 835 
 836 void os::win32::initialize_performance_counter() {
 837   LARGE_INTEGER count;
 838   QueryPerformanceFrequency(&count);
 839   performance_frequency = as_long(count);
 840   QueryPerformanceCounter(&count);
 841   initial_performance_count = as_long(count);
 842 }
 843 
 844 
 845 double os::elapsedTime() {
 846   return (double) elapsed_counter() / (double) elapsed_frequency();
 847 }
 848 
 849 
 850 // Windows format:
 851 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 852 // Java format:
 853 //   Java standards require the number of milliseconds since 1/1/1970
 854 
 855 // Constant offset - calculated using offset()
 856 static jlong  _offset   = 116444736000000000;
 857 // Fake time counter for reproducible results when debugging
 858 static jlong  fake_time = 0;
 859 
 860 #ifdef ASSERT
 861 // Just to be safe, recalculate the offset in debug mode
 862 static jlong _calculated_offset = 0;
 863 static int   _has_calculated_offset = 0;
 864 
 865 jlong offset() {
 866   if (_has_calculated_offset) return _calculated_offset;
 867   SYSTEMTIME java_origin;
 868   java_origin.wYear          = 1970;
 869   java_origin.wMonth         = 1;
 870   java_origin.wDayOfWeek     = 0; // ignored
 871   java_origin.wDay           = 1;
 872   java_origin.wHour          = 0;
 873   java_origin.wMinute        = 0;
 874   java_origin.wSecond        = 0;
 875   java_origin.wMilliseconds  = 0;
 876   FILETIME jot;
 877   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 878     fatal("Error = %d\nWindows error", GetLastError());
 879   }
 880   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 881   _has_calculated_offset = 1;
 882   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 883   return _calculated_offset;
 884 }
 885 #else
 886 jlong offset() {
 887   return _offset;
 888 }
 889 #endif
 890 
 891 jlong windows_to_java_time(FILETIME wt) {
 892   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 893   return (a - offset()) / 10000;
 894 }
 895 
 896 // Returns time ticks in (10th of micro seconds)
 897 jlong windows_to_time_ticks(FILETIME wt) {
 898   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 899   return (a - offset());
 900 }
 901 
 902 FILETIME java_to_windows_time(jlong l) {
 903   jlong a = (l * 10000) + offset();
 904   FILETIME result;
 905   result.dwHighDateTime = high(a);
 906   result.dwLowDateTime  = low(a);
 907   return result;
 908 }
 909 
 910 bool os::supports_vtime() { return true; }
 911 
 912 double os::elapsedVTime() {
 913   FILETIME created;
 914   FILETIME exited;
 915   FILETIME kernel;
 916   FILETIME user;
 917   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 918     // the resolution of windows_to_java_time() should be sufficient (ms)
 919     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 920   } else {
 921     return elapsedTime();
 922   }
 923 }
 924 
 925 jlong os::javaTimeMillis() {
 926   FILETIME wt;
 927   GetSystemTimeAsFileTime(&wt);
 928   return windows_to_java_time(wt);
 929 }
 930 
 931 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 932   FILETIME wt;
 933   GetSystemTimeAsFileTime(&wt);
 934   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 935   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 936   seconds = secs;
 937   nanos = jlong(ticks - (secs*10000000)) * 100;
 938 }
 939 
 940 jlong os::javaTimeNanos() {
 941     LARGE_INTEGER current_count;
 942     QueryPerformanceCounter(&current_count);
 943     double current = as_long(current_count);
 944     double freq = performance_frequency;
 945     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 946     return time;
 947 }
 948 
 949 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 950   jlong freq = performance_frequency;
 951   if (freq < NANOSECS_PER_SEC) {
 952     // the performance counter is 64 bits and we will
 953     // be multiplying it -- so no wrap in 64 bits
 954     info_ptr->max_value = ALL_64_BITS;
 955   } else if (freq > NANOSECS_PER_SEC) {
 956     // use the max value the counter can reach to
 957     // determine the max value which could be returned
 958     julong max_counter = (julong)ALL_64_BITS;
 959     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 960   } else {
 961     // the performance counter is 64 bits and we will
 962     // be using it directly -- so no wrap in 64 bits
 963     info_ptr->max_value = ALL_64_BITS;
 964   }
 965 
 966   // using a counter, so no skipping
 967   info_ptr->may_skip_backward = false;
 968   info_ptr->may_skip_forward = false;
 969 
 970   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 971 }
 972 
 973 char* os::local_time_string(char *buf, size_t buflen) {
 974   SYSTEMTIME st;
 975   GetLocalTime(&st);
 976   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 977                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 978   return buf;
 979 }
 980 
 981 bool os::getTimesSecs(double* process_real_time,
 982                       double* process_user_time,
 983                       double* process_system_time) {
 984   HANDLE h_process = GetCurrentProcess();
 985   FILETIME create_time, exit_time, kernel_time, user_time;
 986   BOOL result = GetProcessTimes(h_process,
 987                                 &create_time,
 988                                 &exit_time,
 989                                 &kernel_time,
 990                                 &user_time);
 991   if (result != 0) {
 992     FILETIME wt;
 993     GetSystemTimeAsFileTime(&wt);
 994     jlong rtc_millis = windows_to_java_time(wt);
 995     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 996     *process_user_time =
 997       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 998     *process_system_time =
 999       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1000     return true;
1001   } else {
1002     return false;
1003   }
1004 }
1005 
1006 void os::shutdown() {
1007   // allow PerfMemory to attempt cleanup of any persistent resources
1008   perfMemory_exit();
1009 
1010   // flush buffered output, finish log files
1011   ostream_abort();
1012 
1013   // Check for abort hook
1014   abort_hook_t abort_hook = Arguments::abort_hook();
1015   if (abort_hook != NULL) {
1016     abort_hook();
1017   }
1018 }
1019 
1020 
1021 static HANDLE dumpFile = NULL;
1022 
1023 // Check if dump file can be created.
1024 void os::check_dump_limit(char* buffer, size_t buffsz) {
1025   bool status = true;
1026   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1027     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1028     status = false;
1029   }
1030 
1031 #ifndef ASSERT
1032   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1033     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1034     status = false;
1035   }
1036 #endif
1037 
1038   if (status) {
1039     const char* cwd = get_current_directory(NULL, 0);
1040     int pid = current_process_id();
1041     if (cwd != NULL) {
1042       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1043     } else {
1044       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1045     }
1046 
1047     if (dumpFile == NULL &&
1048        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1049                  == INVALID_HANDLE_VALUE) {
1050       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1051       status = false;
1052     }
1053   }
1054   VMError::record_coredump_status(buffer, status);
1055 }
1056 
1057 void os::abort(bool dump_core, void* siginfo, const void* context) {
1058   EXCEPTION_POINTERS ep;
1059   MINIDUMP_EXCEPTION_INFORMATION mei;
1060   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1061 
1062   HANDLE hProcess = GetCurrentProcess();
1063   DWORD processId = GetCurrentProcessId();
1064   MINIDUMP_TYPE dumpType;
1065 
1066   shutdown();
1067   if (!dump_core || dumpFile == NULL) {
1068     if (dumpFile != NULL) {
1069       CloseHandle(dumpFile);
1070     }
1071     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1072   }
1073 
1074   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1075     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1076 
1077   if (siginfo != NULL && context != NULL) {
1078     ep.ContextRecord = (PCONTEXT) context;
1079     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1080 
1081     mei.ThreadId = GetCurrentThreadId();
1082     mei.ExceptionPointers = &ep;
1083     pmei = &mei;
1084   } else {
1085     pmei = NULL;
1086   }
1087 
1088   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1089   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1090   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1091       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1092     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1093   }
1094   CloseHandle(dumpFile);
1095   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1096 }
1097 
1098 // Die immediately, no exit hook, no abort hook, no cleanup.
1099 void os::die() {
1100   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1101 }
1102 
1103 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1104 //  * dirent_md.c       1.15 00/02/02
1105 //
1106 // The declarations for DIR and struct dirent are in jvm_win32.h.
1107 
1108 // Caller must have already run dirname through JVM_NativePath, which removes
1109 // duplicate slashes and converts all instances of '/' into '\\'.
1110 
1111 DIR * os::opendir(const char *dirname) {
1112   assert(dirname != NULL, "just checking");   // hotspot change
1113   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1114   DWORD fattr;                                // hotspot change
1115   char alt_dirname[4] = { 0, 0, 0, 0 };
1116 
1117   if (dirp == 0) {
1118     errno = ENOMEM;
1119     return 0;
1120   }
1121 
1122   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1123   // as a directory in FindFirstFile().  We detect this case here and
1124   // prepend the current drive name.
1125   //
1126   if (dirname[1] == '\0' && dirname[0] == '\\') {
1127     alt_dirname[0] = _getdrive() + 'A' - 1;
1128     alt_dirname[1] = ':';
1129     alt_dirname[2] = '\\';
1130     alt_dirname[3] = '\0';
1131     dirname = alt_dirname;
1132   }
1133 
1134   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1135   if (dirp->path == 0) {
1136     free(dirp);
1137     errno = ENOMEM;
1138     return 0;
1139   }
1140   strcpy(dirp->path, dirname);
1141 
1142   fattr = GetFileAttributes(dirp->path);
1143   if (fattr == 0xffffffff) {
1144     free(dirp->path);
1145     free(dirp);
1146     errno = ENOENT;
1147     return 0;
1148   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1149     free(dirp->path);
1150     free(dirp);
1151     errno = ENOTDIR;
1152     return 0;
1153   }
1154 
1155   // Append "*.*", or possibly "\\*.*", to path
1156   if (dirp->path[1] == ':' &&
1157       (dirp->path[2] == '\0' ||
1158       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1159     // No '\\' needed for cases like "Z:" or "Z:\"
1160     strcat(dirp->path, "*.*");
1161   } else {
1162     strcat(dirp->path, "\\*.*");
1163   }
1164 
1165   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1166   if (dirp->handle == INVALID_HANDLE_VALUE) {
1167     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1168       free(dirp->path);
1169       free(dirp);
1170       errno = EACCES;
1171       return 0;
1172     }
1173   }
1174   return dirp;
1175 }
1176 
1177 struct dirent * os::readdir(DIR *dirp) {
1178   assert(dirp != NULL, "just checking");      // hotspot change
1179   if (dirp->handle == INVALID_HANDLE_VALUE) {
1180     return NULL;
1181   }
1182 
1183   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1184 
1185   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1186     if (GetLastError() == ERROR_INVALID_HANDLE) {
1187       errno = EBADF;
1188       return NULL;
1189     }
1190     FindClose(dirp->handle);
1191     dirp->handle = INVALID_HANDLE_VALUE;
1192   }
1193 
1194   return &dirp->dirent;
1195 }
1196 
1197 int os::closedir(DIR *dirp) {
1198   assert(dirp != NULL, "just checking");      // hotspot change
1199   if (dirp->handle != INVALID_HANDLE_VALUE) {
1200     if (!FindClose(dirp->handle)) {
1201       errno = EBADF;
1202       return -1;
1203     }
1204     dirp->handle = INVALID_HANDLE_VALUE;
1205   }
1206   free(dirp->path);
1207   free(dirp);
1208   return 0;
1209 }
1210 
1211 // This must be hard coded because it's the system's temporary
1212 // directory not the java application's temp directory, ala java.io.tmpdir.
1213 const char* os::get_temp_directory() {
1214   static char path_buf[MAX_PATH];
1215   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1216     return path_buf;
1217   } else {
1218     path_buf[0] = '\0';
1219     return path_buf;
1220   }
1221 }
1222 
1223 // Needs to be in os specific directory because windows requires another
1224 // header file <direct.h>
1225 const char* os::get_current_directory(char *buf, size_t buflen) {
1226   int n = static_cast<int>(buflen);
1227   if (buflen > INT_MAX)  n = INT_MAX;
1228   return _getcwd(buf, n);
1229 }
1230 
1231 //-----------------------------------------------------------
1232 // Helper functions for fatal error handler
1233 #ifdef _WIN64
1234 // Helper routine which returns true if address in
1235 // within the NTDLL address space.
1236 //
1237 static bool _addr_in_ntdll(address addr) {
1238   HMODULE hmod;
1239   MODULEINFO minfo;
1240 
1241   hmod = GetModuleHandle("NTDLL.DLL");
1242   if (hmod == NULL) return false;
1243   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1244                                           &minfo, sizeof(MODULEINFO))) {
1245     return false;
1246   }
1247 
1248   if ((addr >= minfo.lpBaseOfDll) &&
1249       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1250     return true;
1251   } else {
1252     return false;
1253   }
1254 }
1255 #endif
1256 
1257 struct _modinfo {
1258   address addr;
1259   char*   full_path;   // point to a char buffer
1260   int     buflen;      // size of the buffer
1261   address base_addr;
1262 };
1263 
1264 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1265                                   address top_address, void * param) {
1266   struct _modinfo *pmod = (struct _modinfo *)param;
1267   if (!pmod) return -1;
1268 
1269   if (base_addr   <= pmod->addr &&
1270       top_address > pmod->addr) {
1271     // if a buffer is provided, copy path name to the buffer
1272     if (pmod->full_path) {
1273       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1274     }
1275     pmod->base_addr = base_addr;
1276     return 1;
1277   }
1278   return 0;
1279 }
1280 
1281 bool os::dll_address_to_library_name(address addr, char* buf,
1282                                      int buflen, int* offset) {
1283   // buf is not optional, but offset is optional
1284   assert(buf != NULL, "sanity check");
1285 
1286 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1287 //       return the full path to the DLL file, sometimes it returns path
1288 //       to the corresponding PDB file (debug info); sometimes it only
1289 //       returns partial path, which makes life painful.
1290 
1291   struct _modinfo mi;
1292   mi.addr      = addr;
1293   mi.full_path = buf;
1294   mi.buflen    = buflen;
1295   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1296     // buf already contains path name
1297     if (offset) *offset = addr - mi.base_addr;
1298     return true;
1299   }
1300 
1301   buf[0] = '\0';
1302   if (offset) *offset = -1;
1303   return false;
1304 }
1305 
1306 bool os::dll_address_to_function_name(address addr, char *buf,
1307                                       int buflen, int *offset,
1308                                       bool demangle) {
1309   // buf is not optional, but offset is optional
1310   assert(buf != NULL, "sanity check");
1311 
1312   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1313     return true;
1314   }
1315   if (offset != NULL)  *offset  = -1;
1316   buf[0] = '\0';
1317   return false;
1318 }
1319 
1320 // save the start and end address of jvm.dll into param[0] and param[1]
1321 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1322                            address top_address, void * param) {
1323   if (!param) return -1;
1324 
1325   if (base_addr   <= (address)_locate_jvm_dll &&
1326       top_address > (address)_locate_jvm_dll) {
1327     ((address*)param)[0] = base_addr;
1328     ((address*)param)[1] = top_address;
1329     return 1;
1330   }
1331   return 0;
1332 }
1333 
1334 address vm_lib_location[2];    // start and end address of jvm.dll
1335 
1336 // check if addr is inside jvm.dll
1337 bool os::address_is_in_vm(address addr) {
1338   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1339     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1340       assert(false, "Can't find jvm module.");
1341       return false;
1342     }
1343   }
1344 
1345   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1346 }
1347 
1348 // print module info; param is outputStream*
1349 static int _print_module(const char* fname, address base_address,
1350                          address top_address, void* param) {
1351   if (!param) return -1;
1352 
1353   outputStream* st = (outputStream*)param;
1354 
1355   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1356   return 0;
1357 }
1358 
1359 // Loads .dll/.so and
1360 // in case of error it checks if .dll/.so was built for the
1361 // same architecture as Hotspot is running on
1362 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1363   log_info(os)("attempting shared library load of %s", name);
1364 
1365   void * result = LoadLibrary(name);
1366   if (result != NULL) {
1367     Events::log(NULL, "Loaded shared library %s", name);
1368     // Recalculate pdb search path if a DLL was loaded successfully.
1369     SymbolEngine::recalc_search_path();
1370     log_info(os)("shared library load of %s was successful", name);
1371     return result;
1372   }
1373   DWORD errcode = GetLastError();
1374   // Read system error message into ebuf
1375   // It may or may not be overwritten below (in the for loop and just above)
1376   lasterror(ebuf, (size_t) ebuflen);
1377   ebuf[ebuflen - 1] = '\0';
1378   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1379   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1380 
1381   if (errcode == ERROR_MOD_NOT_FOUND) {
1382     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1383     ebuf[ebuflen - 1] = '\0';
1384     return NULL;
1385   }
1386 
1387   // Parsing dll below
1388   // If we can read dll-info and find that dll was built
1389   // for an architecture other than Hotspot is running in
1390   // - then print to buffer "DLL was built for a different architecture"
1391   // else call os::lasterror to obtain system error message
1392   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1393   if (fd < 0) {
1394     return NULL;
1395   }
1396 
1397   uint32_t signature_offset;
1398   uint16_t lib_arch = 0;
1399   bool failed_to_get_lib_arch =
1400     ( // Go to position 3c in the dll
1401      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1402      ||
1403      // Read location of signature
1404      (sizeof(signature_offset) !=
1405      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1406      ||
1407      // Go to COFF File Header in dll
1408      // that is located after "signature" (4 bytes long)
1409      (os::seek_to_file_offset(fd,
1410      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1411      ||
1412      // Read field that contains code of architecture
1413      // that dll was built for
1414      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1415     );
1416 
1417   ::close(fd);
1418   if (failed_to_get_lib_arch) {
1419     // file i/o error - report os::lasterror(...) msg
1420     return NULL;
1421   }
1422 
1423   typedef struct {
1424     uint16_t arch_code;
1425     char* arch_name;
1426   } arch_t;
1427 
1428   static const arch_t arch_array[] = {
1429     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1430     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1431   };
1432 #if (defined _M_AMD64)
1433   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1434 #elif (defined _M_IX86)
1435   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1436 #else
1437   #error Method os::dll_load requires that one of following \
1438          is defined :_M_AMD64 or _M_IX86
1439 #endif
1440 
1441 
1442   // Obtain a string for printf operation
1443   // lib_arch_str shall contain string what platform this .dll was built for
1444   // running_arch_str shall string contain what platform Hotspot was built for
1445   char *running_arch_str = NULL, *lib_arch_str = NULL;
1446   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1447     if (lib_arch == arch_array[i].arch_code) {
1448       lib_arch_str = arch_array[i].arch_name;
1449     }
1450     if (running_arch == arch_array[i].arch_code) {
1451       running_arch_str = arch_array[i].arch_name;
1452     }
1453   }
1454 
1455   assert(running_arch_str,
1456          "Didn't find running architecture code in arch_array");
1457 
1458   // If the architecture is right
1459   // but some other error took place - report os::lasterror(...) msg
1460   if (lib_arch == running_arch) {
1461     return NULL;
1462   }
1463 
1464   if (lib_arch_str != NULL) {
1465     ::_snprintf(ebuf, ebuflen - 1,
1466                 "Can't load %s-bit .dll on a %s-bit platform",
1467                 lib_arch_str, running_arch_str);
1468   } else {
1469     // don't know what architecture this dll was build for
1470     ::_snprintf(ebuf, ebuflen - 1,
1471                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1472                 lib_arch, running_arch_str);
1473   }
1474 
1475   return NULL;
1476 }
1477 
1478 void os::print_dll_info(outputStream *st) {
1479   st->print_cr("Dynamic libraries:");
1480   get_loaded_modules_info(_print_module, (void *)st);
1481 }
1482 
1483 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1484   HANDLE   hProcess;
1485 
1486 # define MAX_NUM_MODULES 128
1487   HMODULE     modules[MAX_NUM_MODULES];
1488   static char filename[MAX_PATH];
1489   int         result = 0;
1490 
1491   int pid = os::current_process_id();
1492   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1493                          FALSE, pid);
1494   if (hProcess == NULL) return 0;
1495 
1496   DWORD size_needed;
1497   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1498     CloseHandle(hProcess);
1499     return 0;
1500   }
1501 
1502   // number of modules that are currently loaded
1503   int num_modules = size_needed / sizeof(HMODULE);
1504 
1505   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1506     // Get Full pathname:
1507     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1508       filename[0] = '\0';
1509     }
1510 
1511     MODULEINFO modinfo;
1512     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1513       modinfo.lpBaseOfDll = NULL;
1514       modinfo.SizeOfImage = 0;
1515     }
1516 
1517     // Invoke callback function
1518     result = callback(filename, (address)modinfo.lpBaseOfDll,
1519                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1520     if (result) break;
1521   }
1522 
1523   CloseHandle(hProcess);
1524   return result;
1525 }
1526 
1527 bool os::get_host_name(char* buf, size_t buflen) {
1528   DWORD size = (DWORD)buflen;
1529   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1530 }
1531 
1532 void os::get_summary_os_info(char* buf, size_t buflen) {
1533   stringStream sst(buf, buflen);
1534   os::win32::print_windows_version(&sst);
1535   // chop off newline character
1536   char* nl = strchr(buf, '\n');
1537   if (nl != NULL) *nl = '\0';
1538 }
1539 
1540 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1541 #if _MSC_VER >= 1900
1542   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1543   int result = ::vsnprintf(buf, len, fmt, args);
1544   // If an encoding error occurred (result < 0) then it's not clear
1545   // whether the buffer is NUL terminated, so ensure it is.
1546   if ((result < 0) && (len > 0)) {
1547     buf[len - 1] = '\0';
1548   }
1549   return result;
1550 #else
1551   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1552   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1553   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1554   // go straight to _vscprintf.  The output is going to be truncated in
1555   // that case, except in the unusual case of empty output.  More
1556   // importantly, the documentation for various versions of Visual Studio
1557   // are inconsistent about the behavior of _vsnprintf when len == 0,
1558   // including it possibly being an error.
1559   int result = -1;
1560   if (len > 0) {
1561     result = _vsnprintf(buf, len, fmt, args);
1562     // If output (including NUL terminator) is truncated, the buffer
1563     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1564     if ((result < 0) || ((size_t)result >= len)) {
1565       buf[len - 1] = '\0';
1566     }
1567   }
1568   if (result < 0) {
1569     result = _vscprintf(fmt, args);
1570   }
1571   return result;
1572 #endif // _MSC_VER dispatch
1573 }
1574 
1575 static inline time_t get_mtime(const char* filename) {
1576   struct stat st;
1577   int ret = os::stat(filename, &st);
1578   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1579   return st.st_mtime;
1580 }
1581 
1582 int os::compare_file_modified_times(const char* file1, const char* file2) {
1583   time_t t1 = get_mtime(file1);
1584   time_t t2 = get_mtime(file2);
1585   return t1 - t2;
1586 }
1587 
1588 void os::print_os_info_brief(outputStream* st) {
1589   os::print_os_info(st);
1590 }
1591 
1592 void os::win32::print_uptime_info(outputStream* st) {
1593   unsigned long long ticks = GetTickCount64();
1594   os::print_dhm(st, "OS uptime:", ticks/1000);
1595 }
1596 
1597 void os::print_os_info(outputStream* st) {
1598 #ifdef ASSERT
1599   char buffer[1024];
1600   st->print("HostName: ");
1601   if (get_host_name(buffer, sizeof(buffer))) {
1602     st->print("%s ", buffer);
1603   } else {
1604     st->print("N/A ");
1605   }
1606 #endif
1607   st->print_cr("OS:");
1608   os::win32::print_windows_version(st);
1609 
1610   os::win32::print_uptime_info(st);
1611 
1612   VM_Version::print_platform_virtualization_info(st);
1613 }
1614 
1615 void os::win32::print_windows_version(outputStream* st) {
1616   OSVERSIONINFOEX osvi;
1617   VS_FIXEDFILEINFO *file_info;
1618   TCHAR kernel32_path[MAX_PATH];
1619   UINT len, ret;
1620 
1621   // Use the GetVersionEx information to see if we're on a server or
1622   // workstation edition of Windows. Starting with Windows 8.1 we can't
1623   // trust the OS version information returned by this API.
1624   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1625   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1626   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1627     st->print_cr("Call to GetVersionEx failed");
1628     return;
1629   }
1630   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1631 
1632   // Get the full path to \Windows\System32\kernel32.dll and use that for
1633   // determining what version of Windows we're running on.
1634   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1635   ret = GetSystemDirectory(kernel32_path, len);
1636   if (ret == 0 || ret > len) {
1637     st->print_cr("Call to GetSystemDirectory failed");
1638     return;
1639   }
1640   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1641 
1642   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1643   if (version_size == 0) {
1644     st->print_cr("Call to GetFileVersionInfoSize failed");
1645     return;
1646   }
1647 
1648   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1649   if (version_info == NULL) {
1650     st->print_cr("Failed to allocate version_info");
1651     return;
1652   }
1653 
1654   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1655     os::free(version_info);
1656     st->print_cr("Call to GetFileVersionInfo failed");
1657     return;
1658   }
1659 
1660   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1661     os::free(version_info);
1662     st->print_cr("Call to VerQueryValue failed");
1663     return;
1664   }
1665 
1666   int major_version = HIWORD(file_info->dwProductVersionMS);
1667   int minor_version = LOWORD(file_info->dwProductVersionMS);
1668   int build_number = HIWORD(file_info->dwProductVersionLS);
1669   int build_minor = LOWORD(file_info->dwProductVersionLS);
1670   int os_vers = major_version * 1000 + minor_version;
1671   os::free(version_info);
1672 
1673   st->print(" Windows ");
1674   switch (os_vers) {
1675 
1676   case 6000:
1677     if (is_workstation) {
1678       st->print("Vista");
1679     } else {
1680       st->print("Server 2008");
1681     }
1682     break;
1683 
1684   case 6001:
1685     if (is_workstation) {
1686       st->print("7");
1687     } else {
1688       st->print("Server 2008 R2");
1689     }
1690     break;
1691 
1692   case 6002:
1693     if (is_workstation) {
1694       st->print("8");
1695     } else {
1696       st->print("Server 2012");
1697     }
1698     break;
1699 
1700   case 6003:
1701     if (is_workstation) {
1702       st->print("8.1");
1703     } else {
1704       st->print("Server 2012 R2");
1705     }
1706     break;
1707 
1708   case 10000:
1709     if (is_workstation) {
1710       st->print("10");
1711     } else {
1712       // distinguish Windows Server 2016 and 2019 by build number
1713       // Windows server 2019 GA 10/2018 build number is 17763
1714       if (build_number > 17762) {
1715         st->print("Server 2019");
1716       } else {
1717         st->print("Server 2016");
1718       }
1719     }
1720     break;
1721 
1722   default:
1723     // Unrecognized windows, print out its major and minor versions
1724     st->print("%d.%d", major_version, minor_version);
1725     break;
1726   }
1727 
1728   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1729   // find out whether we are running on 64 bit processor or not
1730   SYSTEM_INFO si;
1731   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1732   GetNativeSystemInfo(&si);
1733   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1734     st->print(" , 64 bit");
1735   }
1736 
1737   st->print(" Build %d", build_number);
1738   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1739   st->cr();
1740 }
1741 
1742 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1743   // Nothing to do for now.
1744 }
1745 
1746 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1747   HKEY key;
1748   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1749                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1750   if (status == ERROR_SUCCESS) {
1751     DWORD size = (DWORD)buflen;
1752     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1753     if (status != ERROR_SUCCESS) {
1754         strncpy(buf, "## __CPU__", buflen);
1755     }
1756     RegCloseKey(key);
1757   } else {
1758     // Put generic cpu info to return
1759     strncpy(buf, "## __CPU__", buflen);
1760   }
1761 }
1762 
1763 void os::print_memory_info(outputStream* st) {
1764   st->print("Memory:");
1765   st->print(" %dk page", os::vm_page_size()>>10);
1766 
1767   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1768   // value if total memory is larger than 4GB
1769   MEMORYSTATUSEX ms;
1770   ms.dwLength = sizeof(ms);
1771   int r1 = GlobalMemoryStatusEx(&ms);
1772 
1773   if (r1 != 0) {
1774     st->print(", system-wide physical " INT64_FORMAT "M ",
1775              (int64_t) ms.ullTotalPhys >> 20);
1776     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1777 
1778     st->print("TotalPageFile size " INT64_FORMAT "M ",
1779              (int64_t) ms.ullTotalPageFile >> 20);
1780     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1781              (int64_t) ms.ullAvailPageFile >> 20);
1782 
1783     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1784 #if defined(_M_IX86)
1785     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1786              (int64_t) ms.ullTotalVirtual >> 20);
1787     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1788 #endif
1789   } else {
1790     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1791   }
1792 
1793   // extended memory statistics for a process
1794   PROCESS_MEMORY_COUNTERS_EX pmex;
1795   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1796   pmex.cb = sizeof(pmex);
1797   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1798 
1799   if (r2 != 0) {
1800     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1801              (int64_t) pmex.WorkingSetSize >> 20);
1802     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1803 
1804     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1805              (int64_t) pmex.PrivateUsage >> 20);
1806     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1807   } else {
1808     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1809   }
1810 
1811   st->cr();
1812 }
1813 
1814 bool os::signal_sent_by_kill(const void* siginfo) {
1815   // TODO: Is this possible?
1816   return false;
1817 }
1818 
1819 void os::print_siginfo(outputStream *st, const void* siginfo) {
1820   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1821   st->print("siginfo:");
1822 
1823   char tmp[64];
1824   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1825     strcpy(tmp, "EXCEPTION_??");
1826   }
1827   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1828 
1829   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1830        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1831        er->NumberParameters >= 2) {
1832     switch (er->ExceptionInformation[0]) {
1833     case 0: st->print(", reading address"); break;
1834     case 1: st->print(", writing address"); break;
1835     case 8: st->print(", data execution prevention violation at address"); break;
1836     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1837                        er->ExceptionInformation[0]);
1838     }
1839     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1840   } else {
1841     int num = er->NumberParameters;
1842     if (num > 0) {
1843       st->print(", ExceptionInformation=");
1844       for (int i = 0; i < num; i++) {
1845         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1846       }
1847     }
1848   }
1849   st->cr();
1850 }
1851 
1852 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1853   // TODO: Can we kill thread?
1854   return false;
1855 }
1856 
1857 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1858   // do nothing
1859 }
1860 
1861 static char saved_jvm_path[MAX_PATH] = {0};
1862 
1863 // Find the full path to the current module, jvm.dll
1864 void os::jvm_path(char *buf, jint buflen) {
1865   // Error checking.
1866   if (buflen < MAX_PATH) {
1867     assert(false, "must use a large-enough buffer");
1868     buf[0] = '\0';
1869     return;
1870   }
1871   // Lazy resolve the path to current module.
1872   if (saved_jvm_path[0] != 0) {
1873     strcpy(buf, saved_jvm_path);
1874     return;
1875   }
1876 
1877   buf[0] = '\0';
1878   if (Arguments::sun_java_launcher_is_altjvm()) {
1879     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1880     // for a JAVA_HOME environment variable and fix up the path so it
1881     // looks like jvm.dll is installed there (append a fake suffix
1882     // hotspot/jvm.dll).
1883     char* java_home_var = ::getenv("JAVA_HOME");
1884     if (java_home_var != NULL && java_home_var[0] != 0 &&
1885         strlen(java_home_var) < (size_t)buflen) {
1886       strncpy(buf, java_home_var, buflen);
1887 
1888       // determine if this is a legacy image or modules image
1889       // modules image doesn't have "jre" subdirectory
1890       size_t len = strlen(buf);
1891       char* jrebin_p = buf + len;
1892       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1893       if (0 != _access(buf, 0)) {
1894         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1895       }
1896       len = strlen(buf);
1897       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1898     }
1899   }
1900 
1901   if (buf[0] == '\0') {
1902     GetModuleFileName(vm_lib_handle, buf, buflen);
1903   }
1904   strncpy(saved_jvm_path, buf, MAX_PATH);
1905   saved_jvm_path[MAX_PATH - 1] = '\0';
1906 }
1907 
1908 
1909 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1910 #ifndef _WIN64
1911   st->print("_");
1912 #endif
1913 }
1914 
1915 
1916 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1917 #ifndef _WIN64
1918   st->print("@%d", args_size  * sizeof(int));
1919 #endif
1920 }
1921 
1922 // This method is a copy of JDK's sysGetLastErrorString
1923 // from src/windows/hpi/src/system_md.c
1924 
1925 size_t os::lasterror(char* buf, size_t len) {
1926   DWORD errval;
1927 
1928   if ((errval = GetLastError()) != 0) {
1929     // DOS error
1930     size_t n = (size_t)FormatMessage(
1931                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1932                                      NULL,
1933                                      errval,
1934                                      0,
1935                                      buf,
1936                                      (DWORD)len,
1937                                      NULL);
1938     if (n > 3) {
1939       // Drop final '.', CR, LF
1940       if (buf[n - 1] == '\n') n--;
1941       if (buf[n - 1] == '\r') n--;
1942       if (buf[n - 1] == '.') n--;
1943       buf[n] = '\0';
1944     }
1945     return n;
1946   }
1947 
1948   if (errno != 0) {
1949     // C runtime error that has no corresponding DOS error code
1950     const char* s = os::strerror(errno);
1951     size_t n = strlen(s);
1952     if (n >= len) n = len - 1;
1953     strncpy(buf, s, n);
1954     buf[n] = '\0';
1955     return n;
1956   }
1957 
1958   return 0;
1959 }
1960 
1961 int os::get_last_error() {
1962   DWORD error = GetLastError();
1963   if (error == 0) {
1964     error = errno;
1965   }
1966   return (int)error;
1967 }
1968 
1969 // sun.misc.Signal
1970 // NOTE that this is a workaround for an apparent kernel bug where if
1971 // a signal handler for SIGBREAK is installed then that signal handler
1972 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1973 // See bug 4416763.
1974 static void (*sigbreakHandler)(int) = NULL;
1975 
1976 static void UserHandler(int sig, void *siginfo, void *context) {
1977   os::signal_notify(sig);
1978   // We need to reinstate the signal handler each time...
1979   os::signal(sig, (void*)UserHandler);
1980 }
1981 
1982 void* os::user_handler() {
1983   return (void*) UserHandler;
1984 }
1985 
1986 void* os::signal(int signal_number, void* handler) {
1987   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1988     void (*oldHandler)(int) = sigbreakHandler;
1989     sigbreakHandler = (void (*)(int)) handler;
1990     return (void*) oldHandler;
1991   } else {
1992     return (void*)::signal(signal_number, (void (*)(int))handler);
1993   }
1994 }
1995 
1996 void os::signal_raise(int signal_number) {
1997   raise(signal_number);
1998 }
1999 
2000 // The Win32 C runtime library maps all console control events other than ^C
2001 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2002 // logoff, and shutdown events.  We therefore install our own console handler
2003 // that raises SIGTERM for the latter cases.
2004 //
2005 static BOOL WINAPI consoleHandler(DWORD event) {
2006   switch (event) {
2007   case CTRL_C_EVENT:
2008     if (VMError::is_error_reported()) {
2009       // Ctrl-C is pressed during error reporting, likely because the error
2010       // handler fails to abort. Let VM die immediately.
2011       os::die();
2012     }
2013 
2014     os::signal_raise(SIGINT);
2015     return TRUE;
2016     break;
2017   case CTRL_BREAK_EVENT:
2018     if (sigbreakHandler != NULL) {
2019       (*sigbreakHandler)(SIGBREAK);
2020     }
2021     return TRUE;
2022     break;
2023   case CTRL_LOGOFF_EVENT: {
2024     // Don't terminate JVM if it is running in a non-interactive session,
2025     // such as a service process.
2026     USEROBJECTFLAGS flags;
2027     HANDLE handle = GetProcessWindowStation();
2028     if (handle != NULL &&
2029         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2030         sizeof(USEROBJECTFLAGS), NULL)) {
2031       // If it is a non-interactive session, let next handler to deal
2032       // with it.
2033       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2034         return FALSE;
2035       }
2036     }
2037   }
2038   case CTRL_CLOSE_EVENT:
2039   case CTRL_SHUTDOWN_EVENT:
2040     os::signal_raise(SIGTERM);
2041     return TRUE;
2042     break;
2043   default:
2044     break;
2045   }
2046   return FALSE;
2047 }
2048 
2049 // The following code is moved from os.cpp for making this
2050 // code platform specific, which it is by its very nature.
2051 
2052 // Return maximum OS signal used + 1 for internal use only
2053 // Used as exit signal for signal_thread
2054 int os::sigexitnum_pd() {
2055   return NSIG;
2056 }
2057 
2058 // a counter for each possible signal value, including signal_thread exit signal
2059 static volatile jint pending_signals[NSIG+1] = { 0 };
2060 static Semaphore* sig_sem = NULL;
2061 
2062 static void jdk_misc_signal_init() {
2063   // Initialize signal structures
2064   memset((void*)pending_signals, 0, sizeof(pending_signals));
2065 
2066   // Initialize signal semaphore
2067   sig_sem = new Semaphore();
2068 
2069   // Programs embedding the VM do not want it to attempt to receive
2070   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2071   // shutdown hooks mechanism introduced in 1.3.  For example, when
2072   // the VM is run as part of a Windows NT service (i.e., a servlet
2073   // engine in a web server), the correct behavior is for any console
2074   // control handler to return FALSE, not TRUE, because the OS's
2075   // "final" handler for such events allows the process to continue if
2076   // it is a service (while terminating it if it is not a service).
2077   // To make this behavior uniform and the mechanism simpler, we
2078   // completely disable the VM's usage of these console events if -Xrs
2079   // (=ReduceSignalUsage) is specified.  This means, for example, that
2080   // the CTRL-BREAK thread dump mechanism is also disabled in this
2081   // case.  See bugs 4323062, 4345157, and related bugs.
2082 
2083   // Add a CTRL-C handler
2084   SetConsoleCtrlHandler(consoleHandler, TRUE);
2085 }
2086 
2087 void os::signal_notify(int sig) {
2088   if (sig_sem != NULL) {
2089     Atomic::inc(&pending_signals[sig]);
2090     sig_sem->signal();
2091   } else {
2092     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2093     // initialization isn't called.
2094     assert(ReduceSignalUsage, "signal semaphore should be created");
2095   }
2096 }
2097 
2098 static int check_pending_signals() {
2099   while (true) {
2100     for (int i = 0; i < NSIG + 1; i++) {
2101       jint n = pending_signals[i];
2102       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2103         return i;
2104       }
2105     }
2106     JavaThread *thread = JavaThread::current();
2107 
2108     ThreadBlockInVM tbivm(thread);
2109 
2110     bool threadIsSuspended;
2111     do {
2112       thread->set_suspend_equivalent();
2113       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2114       sig_sem->wait();
2115 
2116       // were we externally suspended while we were waiting?
2117       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2118       if (threadIsSuspended) {
2119         // The semaphore has been incremented, but while we were waiting
2120         // another thread suspended us. We don't want to continue running
2121         // while suspended because that would surprise the thread that
2122         // suspended us.
2123         sig_sem->signal();
2124 
2125         thread->java_suspend_self();
2126       }
2127     } while (threadIsSuspended);
2128   }
2129 }
2130 
2131 int os::signal_wait() {
2132   return check_pending_signals();
2133 }
2134 
2135 // Implicit OS exception handling
2136 
2137 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2138                       address handler) {
2139   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2140   // Save pc in thread
2141 #ifdef _M_AMD64
2142   // Do not blow up if no thread info available.
2143   if (thread) {
2144     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2145   }
2146   // Set pc to handler
2147   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2148 #else
2149   // Do not blow up if no thread info available.
2150   if (thread) {
2151     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2152   }
2153   // Set pc to handler
2154   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2155 #endif
2156 
2157   // Continue the execution
2158   return EXCEPTION_CONTINUE_EXECUTION;
2159 }
2160 
2161 
2162 // Used for PostMortemDump
2163 extern "C" void safepoints();
2164 extern "C" void find(int x);
2165 extern "C" void events();
2166 
2167 // According to Windows API documentation, an illegal instruction sequence should generate
2168 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2169 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2170 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2171 
2172 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2173 
2174 // From "Execution Protection in the Windows Operating System" draft 0.35
2175 // Once a system header becomes available, the "real" define should be
2176 // included or copied here.
2177 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2178 
2179 // Windows Vista/2008 heap corruption check
2180 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2181 
2182 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2183 // C++ compiler contain this error code. Because this is a compiler-generated
2184 // error, the code is not listed in the Win32 API header files.
2185 // The code is actually a cryptic mnemonic device, with the initial "E"
2186 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2187 // ASCII values of "msc".
2188 
2189 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2190 
2191 #define def_excpt(val) { #val, (val) }
2192 
2193 static const struct { const char* name; uint number; } exceptlabels[] = {
2194     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2195     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2196     def_excpt(EXCEPTION_BREAKPOINT),
2197     def_excpt(EXCEPTION_SINGLE_STEP),
2198     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2199     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2200     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2201     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2202     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2203     def_excpt(EXCEPTION_FLT_OVERFLOW),
2204     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2205     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2206     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2207     def_excpt(EXCEPTION_INT_OVERFLOW),
2208     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2209     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2210     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2211     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2212     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2213     def_excpt(EXCEPTION_STACK_OVERFLOW),
2214     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2215     def_excpt(EXCEPTION_GUARD_PAGE),
2216     def_excpt(EXCEPTION_INVALID_HANDLE),
2217     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2218     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2219 };
2220 
2221 #undef def_excpt
2222 
2223 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2224   uint code = static_cast<uint>(exception_code);
2225   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2226     if (exceptlabels[i].number == code) {
2227       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2228       return buf;
2229     }
2230   }
2231 
2232   return NULL;
2233 }
2234 
2235 //-----------------------------------------------------------------------------
2236 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2237   // handle exception caused by idiv; should only happen for -MinInt/-1
2238   // (division by zero is handled explicitly)
2239 #ifdef  _M_AMD64
2240   PCONTEXT ctx = exceptionInfo->ContextRecord;
2241   address pc = (address)ctx->Rip;
2242   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2243   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2244   if (pc[0] == 0xF7) {
2245     // set correct result values and continue after idiv instruction
2246     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2247   } else {
2248     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2249   }
2250   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2251   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2252   // idiv opcode (0xF7).
2253   ctx->Rdx = (DWORD)0;             // remainder
2254   // Continue the execution
2255 #else
2256   PCONTEXT ctx = exceptionInfo->ContextRecord;
2257   address pc = (address)ctx->Eip;
2258   assert(pc[0] == 0xF7, "not an idiv opcode");
2259   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2260   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2261   // set correct result values and continue after idiv instruction
2262   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2263   ctx->Eax = (DWORD)min_jint;      // result
2264   ctx->Edx = (DWORD)0;             // remainder
2265   // Continue the execution
2266 #endif
2267   return EXCEPTION_CONTINUE_EXECUTION;
2268 }
2269 
2270 //-----------------------------------------------------------------------------
2271 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2272   PCONTEXT ctx = exceptionInfo->ContextRecord;
2273 #ifndef  _WIN64
2274   // handle exception caused by native method modifying control word
2275   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2276 
2277   switch (exception_code) {
2278   case EXCEPTION_FLT_DENORMAL_OPERAND:
2279   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2280   case EXCEPTION_FLT_INEXACT_RESULT:
2281   case EXCEPTION_FLT_INVALID_OPERATION:
2282   case EXCEPTION_FLT_OVERFLOW:
2283   case EXCEPTION_FLT_STACK_CHECK:
2284   case EXCEPTION_FLT_UNDERFLOW:
2285     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2286     if (fp_control_word != ctx->FloatSave.ControlWord) {
2287       // Restore FPCW and mask out FLT exceptions
2288       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2289       // Mask out pending FLT exceptions
2290       ctx->FloatSave.StatusWord &=  0xffffff00;
2291       return EXCEPTION_CONTINUE_EXECUTION;
2292     }
2293   }
2294 
2295   if (prev_uef_handler != NULL) {
2296     // We didn't handle this exception so pass it to the previous
2297     // UnhandledExceptionFilter.
2298     return (prev_uef_handler)(exceptionInfo);
2299   }
2300 #else // !_WIN64
2301   // On Windows, the mxcsr control bits are non-volatile across calls
2302   // See also CR 6192333
2303   //
2304   jint MxCsr = INITIAL_MXCSR;
2305   // we can't use StubRoutines::addr_mxcsr_std()
2306   // because in Win64 mxcsr is not saved there
2307   if (MxCsr != ctx->MxCsr) {
2308     ctx->MxCsr = MxCsr;
2309     return EXCEPTION_CONTINUE_EXECUTION;
2310   }
2311 #endif // !_WIN64
2312 
2313   return EXCEPTION_CONTINUE_SEARCH;
2314 }
2315 
2316 static inline void report_error(Thread* t, DWORD exception_code,
2317                                 address addr, void* siginfo, void* context) {
2318   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2319 
2320   // If UseOsErrorReporting, this will return here and save the error file
2321   // somewhere where we can find it in the minidump.
2322 }
2323 
2324 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2325         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2326   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2327   address addr = (address) exceptionRecord->ExceptionInformation[1];
2328   if (Interpreter::contains(pc)) {
2329     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2330     if (!fr->is_first_java_frame()) {
2331       // get_frame_at_stack_banging_point() is only called when we
2332       // have well defined stacks so java_sender() calls do not need
2333       // to assert safe_for_sender() first.
2334       *fr = fr->java_sender();
2335     }
2336   } else {
2337     // more complex code with compiled code
2338     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2339     CodeBlob* cb = CodeCache::find_blob(pc);
2340     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2341       // Not sure where the pc points to, fallback to default
2342       // stack overflow handling
2343       return false;
2344     } else {
2345       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2346       // in compiled code, the stack banging is performed just after the return pc
2347       // has been pushed on the stack
2348       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2349       if (!fr->is_java_frame()) {
2350         // See java_sender() comment above.
2351         *fr = fr->java_sender();
2352       }
2353     }
2354   }
2355   assert(fr->is_java_frame(), "Safety check");
2356   return true;
2357 }
2358 
2359 #if INCLUDE_AOT
2360 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2361   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2362   address addr = (address) exceptionRecord->ExceptionInformation[1];
2363   address pc = (address) exceptionInfo->ContextRecord->Rip;
2364 
2365   // Handle the case where we get an implicit exception in AOT generated
2366   // code.  AOT DLL's loaded are not registered for structured exceptions.
2367   // If the exception occurred in the codeCache or AOT code, pass control
2368   // to our normal exception handler.
2369   CodeBlob* cb = CodeCache::find_blob(pc);
2370   if (cb != NULL) {
2371     return topLevelExceptionFilter(exceptionInfo);
2372   }
2373 
2374   return EXCEPTION_CONTINUE_SEARCH;
2375 }
2376 #endif
2377 
2378 //-----------------------------------------------------------------------------
2379 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2380   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2381   PEXCEPTION_RECORD exception_record = exceptionInfo->ExceptionRecord;
2382   DWORD exception_code = exception_record->ExceptionCode;
2383 #ifdef _M_AMD64
2384   address pc = (address) exceptionInfo->ContextRecord->Rip;
2385 #else
2386   address pc = (address) exceptionInfo->ContextRecord->Eip;
2387 #endif
2388   Thread* t = Thread::current_or_null_safe();
2389 
2390   // Handle SafeFetch32 and SafeFetchN exceptions.
2391   if (StubRoutines::is_safefetch_fault(pc)) {
2392     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2393   }
2394 
2395 #ifndef _WIN64
2396   // Execution protection violation - win32 running on AMD64 only
2397   // Handled first to avoid misdiagnosis as a "normal" access violation;
2398   // This is safe to do because we have a new/unique ExceptionInformation
2399   // code for this condition.
2400   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2401     int exception_subcode = (int) exception_record->ExceptionInformation[0];
2402     address addr = (address) exception_record->ExceptionInformation[1];
2403 
2404     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2405       int page_size = os::vm_page_size();
2406 
2407       // Make sure the pc and the faulting address are sane.
2408       //
2409       // If an instruction spans a page boundary, and the page containing
2410       // the beginning of the instruction is executable but the following
2411       // page is not, the pc and the faulting address might be slightly
2412       // different - we still want to unguard the 2nd page in this case.
2413       //
2414       // 15 bytes seems to be a (very) safe value for max instruction size.
2415       bool pc_is_near_addr =
2416         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2417       bool instr_spans_page_boundary =
2418         (align_down((intptr_t) pc ^ (intptr_t) addr,
2419                          (intptr_t) page_size) > 0);
2420 
2421       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2422         static volatile address last_addr =
2423           (address) os::non_memory_address_word();
2424 
2425         // In conservative mode, don't unguard unless the address is in the VM
2426         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2427             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2428 
2429           // Set memory to RWX and retry
2430           address page_start = align_down(addr, page_size);
2431           bool res = os::protect_memory((char*) page_start, page_size,
2432                                         os::MEM_PROT_RWX);
2433 
2434           log_debug(os)("Execution protection violation "
2435                         "at " INTPTR_FORMAT
2436                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2437                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2438 
2439           // Set last_addr so if we fault again at the same address, we don't
2440           // end up in an endless loop.
2441           //
2442           // There are two potential complications here.  Two threads trapping
2443           // at the same address at the same time could cause one of the
2444           // threads to think it already unguarded, and abort the VM.  Likely
2445           // very rare.
2446           //
2447           // The other race involves two threads alternately trapping at
2448           // different addresses and failing to unguard the page, resulting in
2449           // an endless loop.  This condition is probably even more unlikely
2450           // than the first.
2451           //
2452           // Although both cases could be avoided by using locks or thread
2453           // local last_addr, these solutions are unnecessary complication:
2454           // this handler is a best-effort safety net, not a complete solution.
2455           // It is disabled by default and should only be used as a workaround
2456           // in case we missed any no-execute-unsafe VM code.
2457 
2458           last_addr = addr;
2459 
2460           return EXCEPTION_CONTINUE_EXECUTION;
2461         }
2462       }
2463 
2464       // Last unguard failed or not unguarding
2465       tty->print_raw_cr("Execution protection violation");
2466       report_error(t, exception_code, addr, exception_record,
2467                    exceptionInfo->ContextRecord);
2468       return EXCEPTION_CONTINUE_SEARCH;
2469     }
2470   }
2471 #endif // _WIN64
2472 
2473   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2474       VM_Version::is_cpuinfo_segv_addr(pc)) {
2475     // Verify that OS save/restore AVX registers.
2476     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2477   }
2478 
2479   if (t != NULL && t->is_Java_thread()) {
2480     JavaThread* thread = (JavaThread*) t;
2481     bool in_java = thread->thread_state() == _thread_in_Java;
2482     bool in_native = thread->thread_state() == _thread_in_native;
2483     bool in_vm = thread->thread_state() == _thread_in_vm;
2484 
2485     // Handle potential stack overflows up front.
2486     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2487       if (thread->stack_guards_enabled()) {
2488         if (in_java) {
2489           frame fr;
2490           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2491             assert(fr.is_java_frame(), "Must be a Java frame");
2492             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2493           }
2494         }
2495         // Yellow zone violation.  The o/s has unprotected the first yellow
2496         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2497         // update the enabled status, even if the zone contains only one page.
2498         assert(!in_vm, "Undersized StackShadowPages");
2499         thread->disable_stack_yellow_reserved_zone();
2500         // If not in java code, return and hope for the best.
2501         return in_java
2502             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2503             :  EXCEPTION_CONTINUE_EXECUTION;
2504       } else {
2505         // Fatal red zone violation.
2506         thread->disable_stack_red_zone();
2507         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2508         report_error(t, exception_code, pc, exception_record,
2509                       exceptionInfo->ContextRecord);
2510         return EXCEPTION_CONTINUE_SEARCH;
2511       }
2512     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2513       if (in_java) {
2514         // Either stack overflow or null pointer exception.
2515         address addr = (address) exception_record->ExceptionInformation[1];
2516         address stack_end = thread->stack_end();
2517         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2518           // Stack overflow.
2519           assert(!os::uses_stack_guard_pages(),
2520                  "should be caught by red zone code above.");
2521           return Handle_Exception(exceptionInfo,
2522                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2523         }
2524         // Check for safepoint polling and implicit null
2525         // We only expect null pointers in the stubs (vtable)
2526         // the rest are checked explicitly now.
2527         CodeBlob* cb = CodeCache::find_blob(pc);
2528         if (cb != NULL) {
2529           if (SafepointMechanism::is_poll_address(addr)) {
2530             address stub = SharedRuntime::get_poll_stub(pc);
2531             return Handle_Exception(exceptionInfo, stub);
2532           }
2533         }
2534 #ifdef _WIN64
2535         // If it's a legal stack address map the entire region in
2536         if (thread->is_in_usable_stack(addr)) {
2537           addr = (address)((uintptr_t)addr &
2538                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2539           os::commit_memory((char *)addr, thread->stack_base() - addr,
2540                             !ExecMem);
2541           return EXCEPTION_CONTINUE_EXECUTION;
2542         }
2543 #endif
2544         // Null pointer exception.
2545         if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2546           address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2547           if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2548         }
2549         report_error(t, exception_code, pc, exception_record,
2550                       exceptionInfo->ContextRecord);
2551         return EXCEPTION_CONTINUE_SEARCH;
2552       }
2553 
2554 #ifdef _WIN64
2555       // Special care for fast JNI field accessors.
2556       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2557       // in and the heap gets shrunk before the field access.
2558       address slowcase_pc = JNI_FastGetField::find_slowcase_pc(pc);
2559       if (slowcase_pc != (address)-1) {
2560         return Handle_Exception(exceptionInfo, slowcase_pc);
2561       }
2562 #endif
2563 
2564       // Stack overflow or null pointer exception in native code.
2565       report_error(t, exception_code, pc, exception_record,
2566                    exceptionInfo->ContextRecord);
2567       return EXCEPTION_CONTINUE_SEARCH;
2568     } // /EXCEPTION_ACCESS_VIOLATION
2569     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2570 
2571     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2572       CompiledMethod* nm = NULL;
2573       JavaThread* thread = (JavaThread*)t;
2574       if (in_java) {
2575         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2576         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2577       }
2578 
2579       bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2580       if (((in_vm || in_native || is_unsafe_arraycopy) && thread->doing_unsafe_access()) ||
2581           (nm != NULL && nm->has_unsafe_access())) {
2582         address next_pc =  Assembler::locate_next_instruction(pc);
2583         if (is_unsafe_arraycopy) {
2584           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2585         }
2586         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2587       }
2588     }
2589 
2590     if (in_java) {
2591       switch (exception_code) {
2592       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2593         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2594 
2595       case EXCEPTION_INT_OVERFLOW:
2596         return Handle_IDiv_Exception(exceptionInfo);
2597 
2598       } // switch
2599     }
2600     if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2601       LONG result=Handle_FLT_Exception(exceptionInfo);
2602       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2603     }
2604   }
2605 
2606   if (exception_code != EXCEPTION_BREAKPOINT) {
2607     report_error(t, exception_code, pc, exception_record,
2608                  exceptionInfo->ContextRecord);
2609   }
2610   return EXCEPTION_CONTINUE_SEARCH;
2611 }
2612 
2613 #ifndef _WIN64
2614 // Special care for fast JNI accessors.
2615 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2616 // the heap gets shrunk before the field access.
2617 // Need to install our own structured exception handler since native code may
2618 // install its own.
2619 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2620   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2621   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2622     address pc = (address) exceptionInfo->ContextRecord->Eip;
2623     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2624     if (addr != (address)-1) {
2625       return Handle_Exception(exceptionInfo, addr);
2626     }
2627   }
2628   return EXCEPTION_CONTINUE_SEARCH;
2629 }
2630 
2631 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2632   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2633                                                      jobject obj,           \
2634                                                      jfieldID fieldID) {    \
2635     __try {                                                                 \
2636       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2637                                                                  obj,       \
2638                                                                  fieldID);  \
2639     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2640                                               _exception_info())) {         \
2641     }                                                                       \
2642     return 0;                                                               \
2643   }
2644 
2645 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2646 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2647 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2648 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2649 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2650 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2651 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2652 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2653 
2654 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2655   switch (type) {
2656   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2657   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2658   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2659   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2660   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2661   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2662   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2663   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2664   default:        ShouldNotReachHere();
2665   }
2666   return (address)-1;
2667 }
2668 #endif
2669 
2670 // Virtual Memory
2671 
2672 int os::vm_page_size() { return os::win32::vm_page_size(); }
2673 int os::vm_allocation_granularity() {
2674   return os::win32::vm_allocation_granularity();
2675 }
2676 
2677 // Windows large page support is available on Windows 2003. In order to use
2678 // large page memory, the administrator must first assign additional privilege
2679 // to the user:
2680 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2681 //   + select Local Policies -> User Rights Assignment
2682 //   + double click "Lock pages in memory", add users and/or groups
2683 //   + reboot
2684 // Note the above steps are needed for administrator as well, as administrators
2685 // by default do not have the privilege to lock pages in memory.
2686 //
2687 // Note about Windows 2003: although the API supports committing large page
2688 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2689 // scenario, I found through experiment it only uses large page if the entire
2690 // memory region is reserved and committed in a single VirtualAlloc() call.
2691 // This makes Windows large page support more or less like Solaris ISM, in
2692 // that the entire heap must be committed upfront. This probably will change
2693 // in the future, if so the code below needs to be revisited.
2694 
2695 #ifndef MEM_LARGE_PAGES
2696   #define MEM_LARGE_PAGES 0x20000000
2697 #endif
2698 
2699 #define VirtualFreeChecked(mem, size, type)                       \
2700   do {                                                            \
2701     bool ret = VirtualFree(mem, size, type);                      \
2702     assert(ret, "Failed to free memory: " PTR_FORMAT, p2i(mem));  \
2703   } while (false)
2704 
2705 // The number of bytes is setup to match 1 pixel and 32 bits per pixel.
2706 static const int gdi_tiny_bitmap_width_bytes = 4;
2707 
2708 static HBITMAP gdi_create_tiny_bitmap(void* mem) {
2709   // The documentation for CreateBitmap states a word-alignment requirement.
2710   STATIC_ASSERT(is_aligned_(gdi_tiny_bitmap_width_bytes, sizeof(WORD)));
2711 
2712   // Some callers use this function to test if memory crossing separate memory
2713   // reservations can be used. Create a height of 2 to make sure that one pixel
2714   // ends up in the first reservation and the other in the second.
2715   int nHeight = 2;
2716 
2717   assert(is_aligned(mem, gdi_tiny_bitmap_width_bytes), "Incorrect alignment");
2718 
2719   // Width is one pixel and correlates with gdi_tiny_bitmap_width_bytes.
2720   int nWidth = 1;
2721 
2722   // Calculate bit count - will be 32.
2723   UINT nBitCount = gdi_tiny_bitmap_width_bytes / nWidth * BitsPerByte;
2724 
2725   return CreateBitmap(
2726       nWidth,
2727       nHeight,
2728       1,         // nPlanes
2729       nBitCount,
2730       mem);      // lpBits
2731 }
2732 
2733 // It has been found that some of the GDI functions fail under these two situations:
2734 //  1) When used with large pages
2735 //  2) When mem crosses the boundary between two separate memory reservations.
2736 //
2737 // This is a small test used to see if the current GDI implementation is
2738 // susceptible to any of these problems.
2739 static bool gdi_can_use_memory(void* mem) {
2740   HBITMAP bitmap = gdi_create_tiny_bitmap(mem);
2741   if (bitmap != NULL) {
2742     DeleteObject(bitmap);
2743     return true;
2744   }
2745 
2746   // Verify that the bitmap could be created with a normal page.
2747   // If this fails, the testing method above isn't reliable.
2748 #ifdef ASSERT
2749   void* verify_mem = ::malloc(4 * 1024);
2750   HBITMAP verify_bitmap = gdi_create_tiny_bitmap(verify_mem);
2751   if (verify_bitmap == NULL) {
2752     fatal("Couldn't create test bitmap with malloced memory");
2753   } else {
2754     DeleteObject(verify_bitmap);
2755   }
2756   ::free(verify_mem);
2757 #endif
2758 
2759   return false;
2760 }
2761 
2762 // Test if GDI functions work when memory spans
2763 // two adjacent memory reservations.
2764 static bool gdi_can_use_split_reservation_memory(bool use_large_pages, size_t granule) {
2765   DWORD mem_large_pages = use_large_pages ? MEM_LARGE_PAGES : 0;
2766 
2767   // Find virtual memory range. Two granules for regions and one for alignment.
2768   void* reserved = VirtualAlloc(NULL,
2769                                 granule * 3,
2770                                 MEM_RESERVE,
2771                                 PAGE_NOACCESS);
2772   if (reserved == NULL) {
2773     // Can't proceed with test - pessimistically report false
2774     return false;
2775   }
2776   VirtualFreeChecked(reserved, 0, MEM_RELEASE);
2777 
2778   // Ensure proper alignment
2779   void* res0 = align_up(reserved, granule);
2780   void* res1 = (char*)res0 + granule;
2781 
2782   // Reserve and commit the first part
2783   void* mem0 = VirtualAlloc(res0,
2784                             granule,
2785                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2786                             PAGE_READWRITE);
2787   if (mem0 != res0) {
2788     // Can't proceed with test - pessimistically report false
2789     return false;
2790   }
2791 
2792   // Reserve and commit the second part
2793   void* mem1 = VirtualAlloc(res1,
2794                             granule,
2795                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2796                             PAGE_READWRITE);
2797   if (mem1 != res1) {
2798     VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2799     // Can't proceed with test - pessimistically report false
2800     return false;
2801   }
2802 
2803   // Set the bitmap's bits to point one "width" bytes before, so that
2804   // the bitmap extends across the reservation boundary.
2805   void* bitmapBits = (char*)mem1 - gdi_tiny_bitmap_width_bytes;
2806 
2807   bool success = gdi_can_use_memory(bitmapBits);
2808 
2809   VirtualFreeChecked(mem1, 0, MEM_RELEASE);
2810   VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2811 
2812   return success;
2813 }
2814 
2815 // Container for NUMA node list info
2816 class NUMANodeListHolder {
2817  private:
2818   int *_numa_used_node_list;  // allocated below
2819   int _numa_used_node_count;
2820 
2821   void free_node_list() {
2822     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2823   }
2824 
2825  public:
2826   NUMANodeListHolder() {
2827     _numa_used_node_count = 0;
2828     _numa_used_node_list = NULL;
2829     // do rest of initialization in build routine (after function pointers are set up)
2830   }
2831 
2832   ~NUMANodeListHolder() {
2833     free_node_list();
2834   }
2835 
2836   bool build() {
2837     DWORD_PTR proc_aff_mask;
2838     DWORD_PTR sys_aff_mask;
2839     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2840     ULONG highest_node_number;
2841     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2842     free_node_list();
2843     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2844     for (unsigned int i = 0; i <= highest_node_number; i++) {
2845       ULONGLONG proc_mask_numa_node;
2846       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2847       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2848         _numa_used_node_list[_numa_used_node_count++] = i;
2849       }
2850     }
2851     return (_numa_used_node_count > 1);
2852   }
2853 
2854   int get_count() { return _numa_used_node_count; }
2855   int get_node_list_entry(int n) {
2856     // for indexes out of range, returns -1
2857     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2858   }
2859 
2860 } numa_node_list_holder;
2861 
2862 static size_t _large_page_size = 0;
2863 
2864 static bool request_lock_memory_privilege() {
2865   HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2866                                 os::current_process_id());
2867 
2868   bool success = false;
2869   HANDLE hToken = NULL;
2870   LUID luid;
2871   if (hProcess != NULL &&
2872       OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
2873       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2874 
2875     TOKEN_PRIVILEGES tp;
2876     tp.PrivilegeCount = 1;
2877     tp.Privileges[0].Luid = luid;
2878     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2879 
2880     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2881     // privilege. Check GetLastError() too. See MSDN document.
2882     if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2883         (GetLastError() == ERROR_SUCCESS)) {
2884       success = true;
2885     }
2886   }
2887 
2888   // Cleanup
2889   if (hProcess != NULL) {
2890     CloseHandle(hProcess);
2891   }
2892   if (hToken != NULL) {
2893     CloseHandle(hToken);
2894   }
2895 
2896   return success;
2897 }
2898 
2899 static bool numa_interleaving_init() {
2900   bool success = false;
2901 
2902   // print a warning if UseNUMAInterleaving flag is specified on command line
2903   bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2904 
2905 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2906 
2907   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2908   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2909   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2910 
2911   if (!numa_node_list_holder.build()) {
2912     WARN("Process does not cover multiple NUMA nodes.");
2913     WARN("...Ignoring UseNUMAInterleaving flag.");
2914     return false;
2915   }
2916 
2917   if (!gdi_can_use_split_reservation_memory(UseLargePages, min_interleave_granularity)) {
2918     WARN("Windows GDI cannot handle split reservations.");
2919     WARN("...Ignoring UseNUMAInterleaving flag.");
2920     return false;
2921   }
2922 
2923   if (log_is_enabled(Debug, os, cpu)) {
2924     Log(os, cpu) log;
2925     log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2926     for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2927       log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2928     }
2929   }
2930 
2931 #undef WARN
2932 
2933   return true;
2934 }
2935 
2936 // this routine is used whenever we need to reserve a contiguous VA range
2937 // but we need to make separate VirtualAlloc calls for each piece of the range
2938 // Reasons for doing this:
2939 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2940 //  * UseNUMAInterleaving requires a separate node for each piece
2941 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2942                                          DWORD prot,
2943                                          bool should_inject_error = false) {
2944   char * p_buf;
2945   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2946   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2947   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2948 
2949   // first reserve enough address space in advance since we want to be
2950   // able to break a single contiguous virtual address range into multiple
2951   // large page commits but WS2003 does not allow reserving large page space
2952   // so we just use 4K pages for reserve, this gives us a legal contiguous
2953   // address space. then we will deallocate that reservation, and re alloc
2954   // using large pages
2955   const size_t size_of_reserve = bytes + chunk_size;
2956   if (bytes > size_of_reserve) {
2957     // Overflowed.
2958     return NULL;
2959   }
2960   p_buf = (char *) VirtualAlloc(addr,
2961                                 size_of_reserve,  // size of Reserve
2962                                 MEM_RESERVE,
2963                                 PAGE_READWRITE);
2964   // If reservation failed, return NULL
2965   if (p_buf == NULL) return NULL;
2966   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2967   os::release_memory(p_buf, bytes + chunk_size);
2968 
2969   // we still need to round up to a page boundary (in case we are using large pages)
2970   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2971   // instead we handle this in the bytes_to_rq computation below
2972   p_buf = align_up(p_buf, page_size);
2973 
2974   // now go through and allocate one chunk at a time until all bytes are
2975   // allocated
2976   size_t  bytes_remaining = bytes;
2977   // An overflow of align_up() would have been caught above
2978   // in the calculation of size_of_reserve.
2979   char * next_alloc_addr = p_buf;
2980   HANDLE hProc = GetCurrentProcess();
2981 
2982 #ifdef ASSERT
2983   // Variable for the failure injection
2984   int ran_num = os::random();
2985   size_t fail_after = ran_num % bytes;
2986 #endif
2987 
2988   int count=0;
2989   while (bytes_remaining) {
2990     // select bytes_to_rq to get to the next chunk_size boundary
2991 
2992     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2993     // Note allocate and commit
2994     char * p_new;
2995 
2996 #ifdef ASSERT
2997     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2998 #else
2999     const bool inject_error_now = false;
3000 #endif
3001 
3002     if (inject_error_now) {
3003       p_new = NULL;
3004     } else {
3005       if (!UseNUMAInterleaving) {
3006         p_new = (char *) VirtualAlloc(next_alloc_addr,
3007                                       bytes_to_rq,
3008                                       flags,
3009                                       prot);
3010       } else {
3011         // get the next node to use from the used_node_list
3012         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3013         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3014         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3015       }
3016     }
3017 
3018     if (p_new == NULL) {
3019       // Free any allocated pages
3020       if (next_alloc_addr > p_buf) {
3021         // Some memory was committed so release it.
3022         size_t bytes_to_release = bytes - bytes_remaining;
3023         // NMT has yet to record any individual blocks, so it
3024         // need to create a dummy 'reserve' record to match
3025         // the release.
3026         MemTracker::record_virtual_memory_reserve((address)p_buf,
3027                                                   bytes_to_release, CALLER_PC);
3028         os::release_memory(p_buf, bytes_to_release);
3029       }
3030 #ifdef ASSERT
3031       if (should_inject_error) {
3032         log_develop_debug(pagesize)("Reserving pages individually failed.");
3033       }
3034 #endif
3035       return NULL;
3036     }
3037 
3038     bytes_remaining -= bytes_to_rq;
3039     next_alloc_addr += bytes_to_rq;
3040     count++;
3041   }
3042   // Although the memory is allocated individually, it is returned as one.
3043   // NMT records it as one block.
3044   if ((flags & MEM_COMMIT) != 0) {
3045     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3046   } else {
3047     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3048   }
3049 
3050   // made it this far, success
3051   return p_buf;
3052 }
3053 
3054 static size_t large_page_init_decide_size() {
3055   // print a warning if any large page related flag is specified on command line
3056   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3057                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3058 
3059 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3060 
3061   if (!request_lock_memory_privilege()) {
3062     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3063     return 0;
3064   }
3065 
3066   size_t size = GetLargePageMinimum();
3067   if (size == 0) {
3068     WARN("Large page is not supported by the processor.");
3069     return 0;
3070   }
3071 
3072 #if defined(IA32) || defined(AMD64)
3073   if (size > 4*M || LargePageSizeInBytes > 4*M) {
3074     WARN("JVM cannot use large pages bigger than 4mb.");
3075     return 0;
3076   }
3077 #endif
3078 
3079   if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3080     size = LargePageSizeInBytes;
3081   }
3082 
3083   // Now test allocating a page
3084   void* large_page = VirtualAlloc(NULL,
3085                                   size,
3086                                   MEM_RESERVE|MEM_COMMIT|MEM_LARGE_PAGES,
3087                                   PAGE_READWRITE);
3088   if (large_page == NULL) {
3089     WARN("JVM cannot allocate one single large page.");
3090     return 0;
3091   }
3092 
3093   // Detect if GDI can use memory backed by large pages
3094   if (!gdi_can_use_memory(large_page)) {
3095     WARN("JVM cannot use large pages because of bug in Windows GDI.");
3096     return 0;
3097   }
3098 
3099   // Release test page
3100   VirtualFreeChecked(large_page, 0, MEM_RELEASE);
3101 
3102 #undef WARN
3103 
3104   return size;
3105 }
3106 
3107 void os::large_page_init() {
3108   if (!UseLargePages) {
3109     return;
3110   }
3111 
3112   _large_page_size = large_page_init_decide_size();
3113 
3114   const size_t default_page_size = (size_t) vm_page_size();
3115   if (_large_page_size > default_page_size) {
3116     _page_sizes[0] = _large_page_size;
3117     _page_sizes[1] = default_page_size;
3118     _page_sizes[2] = 0;
3119   }
3120 
3121   UseLargePages = _large_page_size != 0;
3122 
3123   if (UseLargePages && UseLargePagesIndividualAllocation) {
3124     if (!gdi_can_use_split_reservation_memory(true /* use_large_pages */, _large_page_size)) {
3125       if (FLAG_IS_CMDLINE(UseLargePagesIndividualAllocation)) {
3126         warning("Windows GDI cannot handle split reservations.");
3127         warning("...Ignoring UseLargePagesIndividualAllocation flag.");
3128       }
3129       UseLargePagesIndividualAllocation = false;
3130     }
3131   }
3132 }
3133 
3134 int os::create_file_for_heap(const char* dir) {
3135 
3136   const char name_template[] = "/jvmheap.XXXXXX";
3137 
3138   size_t fullname_len = strlen(dir) + strlen(name_template);
3139   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3140   if (fullname == NULL) {
3141     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3142     return -1;
3143   }
3144   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3145   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3146 
3147   os::native_path(fullname);
3148 
3149   char *path = _mktemp(fullname);
3150   if (path == NULL) {
3151     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3152     os::free(fullname);
3153     return -1;
3154   }
3155 
3156   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3157 
3158   os::free(fullname);
3159   if (fd < 0) {
3160     warning("Problem opening file for heap (%s)", os::strerror(errno));
3161     return -1;
3162   }
3163   return fd;
3164 }
3165 
3166 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3167 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3168   assert(fd != -1, "File descriptor is not valid");
3169 
3170   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3171 #ifdef _LP64
3172   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3173     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3174 #else
3175   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3176     0, (DWORD)size, NULL);
3177 #endif
3178   if (fileMapping == NULL) {
3179     if (GetLastError() == ERROR_DISK_FULL) {
3180       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3181     }
3182     else {
3183       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3184     }
3185 
3186     return NULL;
3187   }
3188 
3189   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3190 
3191   CloseHandle(fileMapping);
3192 
3193   return (char*)addr;
3194 }
3195 
3196 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3197   assert(fd != -1, "File descriptor is not valid");
3198   assert(base != NULL, "Base address cannot be NULL");
3199 
3200   release_memory(base, size);
3201   return map_memory_to_file(base, size, fd);
3202 }
3203 
3204 // On win32, one cannot release just a part of reserved memory, it's an
3205 // all or nothing deal.  When we split a reservation, we must break the
3206 // reservation into two reservations.
3207 void os::split_reserved_memory(char *base, size_t size, size_t split) {
3208 
3209   char* const split_address = base + split;
3210   assert(size > 0, "Sanity");
3211   assert(size > split, "Sanity");
3212   assert(split > 0, "Sanity");
3213   assert(is_aligned(base, os::vm_allocation_granularity()), "Sanity");
3214   assert(is_aligned(split_address, os::vm_allocation_granularity()), "Sanity");
3215 
3216   release_memory(base, size);
3217   reserve_memory(split, base);
3218   reserve_memory(size - split, split_address);
3219 
3220   // NMT: nothing to do here. Since Windows implements the split by
3221   //  releasing and re-reserving memory, the parts are already registered
3222   //  as individual mappings with NMT.
3223 
3224 }
3225 
3226 // Multiple threads can race in this code but it's not possible to unmap small sections of
3227 // virtual space to get requested alignment, like posix-like os's.
3228 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3229 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3230   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3231          "Alignment must be a multiple of allocation granularity (page size)");
3232   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3233 
3234   size_t extra_size = size + alignment;
3235   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3236 
3237   char* aligned_base = NULL;
3238 
3239   do {
3240     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3241     if (extra_base == NULL) {
3242       return NULL;
3243     }
3244     // Do manual alignment
3245     aligned_base = align_up(extra_base, alignment);
3246 
3247     if (file_desc != -1) {
3248       os::unmap_memory(extra_base, extra_size);
3249     } else {
3250       os::release_memory(extra_base, extra_size);
3251     }
3252 
3253     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3254 
3255   } while (aligned_base == NULL);
3256 
3257   return aligned_base;
3258 }
3259 
3260 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3261   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3262          "reserve alignment");
3263   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3264   char* res;
3265   // note that if UseLargePages is on, all the areas that require interleaving
3266   // will go thru reserve_memory_special rather than thru here.
3267   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3268   if (!use_individual) {
3269     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3270   } else {
3271     elapsedTimer reserveTimer;
3272     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3273     // in numa interleaving, we have to allocate pages individually
3274     // (well really chunks of NUMAInterleaveGranularity size)
3275     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3276     if (res == NULL) {
3277       warning("NUMA page allocation failed");
3278     }
3279     if (Verbose && PrintMiscellaneous) {
3280       reserveTimer.stop();
3281       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3282                     reserveTimer.milliseconds(), reserveTimer.ticks());
3283     }
3284   }
3285   assert(res == NULL || addr == NULL || addr == res,
3286          "Unexpected address from reserve.");
3287 
3288   return res;
3289 }
3290 
3291 // Reserve memory at an arbitrary address, only if that area is
3292 // available (and not reserved for something else).
3293 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3294   // Windows os::reserve_memory() fails of the requested address range is
3295   // not avilable.
3296   return reserve_memory(bytes, requested_addr);
3297 }
3298 
3299 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3300   assert(file_desc >= 0, "file_desc is not valid");
3301   return map_memory_to_file(requested_addr, bytes, file_desc);
3302 }
3303 
3304 size_t os::large_page_size() {
3305   return _large_page_size;
3306 }
3307 
3308 bool os::can_commit_large_page_memory() {
3309   // Windows only uses large page memory when the entire region is reserved
3310   // and committed in a single VirtualAlloc() call. This may change in the
3311   // future, but with Windows 2003 it's not possible to commit on demand.
3312   return false;
3313 }
3314 
3315 bool os::can_execute_large_page_memory() {
3316   return true;
3317 }
3318 
3319 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3320                                     bool exec) {
3321   assert(UseLargePages, "only for large pages");
3322 
3323   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3324     return NULL; // Fallback to small pages.
3325   }
3326 
3327   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3328   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3329 
3330   // with large pages, there are two cases where we need to use Individual Allocation
3331   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3332   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3333   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3334     log_debug(pagesize)("Reserving large pages individually.");
3335 
3336     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3337     if (p_buf == NULL) {
3338       // give an appropriate warning message
3339       if (UseNUMAInterleaving) {
3340         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3341       }
3342       if (UseLargePagesIndividualAllocation) {
3343         warning("Individually allocated large pages failed, "
3344                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3345       }
3346       return NULL;
3347     }
3348 
3349     return p_buf;
3350 
3351   } else {
3352     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3353 
3354     // normal policy just allocate it all at once
3355     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3356     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3357 
3358     return res;
3359   }
3360 }
3361 
3362 bool os::pd_release_memory_special(char* base, size_t bytes) {
3363   assert(base != NULL, "Sanity check");
3364   return pd_release_memory(base, bytes);
3365 }
3366 
3367 void os::print_statistics() {
3368 }
3369 
3370 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3371   int err = os::get_last_error();
3372   char buf[256];
3373   size_t buf_len = os::lasterror(buf, sizeof(buf));
3374   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3375           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3376           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3377 }
3378 
3379 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3380   if (bytes == 0) {
3381     // Don't bother the OS with noops.
3382     return true;
3383   }
3384   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3385   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3386   // Don't attempt to print anything if the OS call fails. We're
3387   // probably low on resources, so the print itself may cause crashes.
3388 
3389   // unless we have NUMAInterleaving enabled, the range of a commit
3390   // is always within a reserve covered by a single VirtualAlloc
3391   // in that case we can just do a single commit for the requested size
3392   if (!UseNUMAInterleaving) {
3393     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3394       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3395       return false;
3396     }
3397     if (exec) {
3398       DWORD oldprot;
3399       // Windows doc says to use VirtualProtect to get execute permissions
3400       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3401         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3402         return false;
3403       }
3404     }
3405     return true;
3406   } else {
3407 
3408     // when NUMAInterleaving is enabled, the commit might cover a range that
3409     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3410     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3411     // returns represents the number of bytes that can be committed in one step.
3412     size_t bytes_remaining = bytes;
3413     char * next_alloc_addr = addr;
3414     while (bytes_remaining > 0) {
3415       MEMORY_BASIC_INFORMATION alloc_info;
3416       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3417       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3418       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3419                        PAGE_READWRITE) == NULL) {
3420         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3421                                             exec);)
3422         return false;
3423       }
3424       if (exec) {
3425         DWORD oldprot;
3426         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3427                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3428           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3429                                               exec);)
3430           return false;
3431         }
3432       }
3433       bytes_remaining -= bytes_to_rq;
3434       next_alloc_addr += bytes_to_rq;
3435     }
3436   }
3437   // if we made it this far, return true
3438   return true;
3439 }
3440 
3441 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3442                           bool exec) {
3443   // alignment_hint is ignored on this OS
3444   return pd_commit_memory(addr, size, exec);
3445 }
3446 
3447 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3448                                   const char* mesg) {
3449   assert(mesg != NULL, "mesg must be specified");
3450   if (!pd_commit_memory(addr, size, exec)) {
3451     warn_fail_commit_memory(addr, size, exec);
3452     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3453   }
3454 }
3455 
3456 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3457                                   size_t alignment_hint, bool exec,
3458                                   const char* mesg) {
3459   // alignment_hint is ignored on this OS
3460   pd_commit_memory_or_exit(addr, size, exec, mesg);
3461 }
3462 
3463 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3464   if (bytes == 0) {
3465     // Don't bother the OS with noops.
3466     return true;
3467   }
3468   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3469   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3470   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3471 }
3472 
3473 bool os::pd_release_memory(char* addr, size_t bytes) {
3474   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3475 }
3476 
3477 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3478   return os::commit_memory(addr, size, !ExecMem);
3479 }
3480 
3481 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3482   return os::uncommit_memory(addr, size);
3483 }
3484 
3485 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3486   uint count = 0;
3487   bool ret = false;
3488   size_t bytes_remaining = bytes;
3489   char * next_protect_addr = addr;
3490 
3491   // Use VirtualQuery() to get the chunk size.
3492   while (bytes_remaining) {
3493     MEMORY_BASIC_INFORMATION alloc_info;
3494     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3495       return false;
3496     }
3497 
3498     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3499     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3500     // but we don't distinguish here as both cases are protected by same API.
3501     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3502     warning("Failed protecting pages individually for chunk #%u", count);
3503     if (!ret) {
3504       return false;
3505     }
3506 
3507     bytes_remaining -= bytes_to_protect;
3508     next_protect_addr += bytes_to_protect;
3509     count++;
3510   }
3511   return ret;
3512 }
3513 
3514 // Set protections specified
3515 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3516                         bool is_committed) {
3517   unsigned int p = 0;
3518   switch (prot) {
3519   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3520   case MEM_PROT_READ: p = PAGE_READONLY; break;
3521   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3522   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3523   default:
3524     ShouldNotReachHere();
3525   }
3526 
3527   DWORD old_status;
3528 
3529   // Strange enough, but on Win32 one can change protection only for committed
3530   // memory, not a big deal anyway, as bytes less or equal than 64K
3531   if (!is_committed) {
3532     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3533                           "cannot commit protection page");
3534   }
3535   // One cannot use os::guard_memory() here, as on Win32 guard page
3536   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3537   //
3538   // Pages in the region become guard pages. Any attempt to access a guard page
3539   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3540   // the guard page status. Guard pages thus act as a one-time access alarm.
3541   bool ret;
3542   if (UseNUMAInterleaving) {
3543     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3544     // so we must protect the chunks individually.
3545     ret = protect_pages_individually(addr, bytes, p, &old_status);
3546   } else {
3547     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3548   }
3549 #ifdef ASSERT
3550   if (!ret) {
3551     int err = os::get_last_error();
3552     char buf[256];
3553     size_t buf_len = os::lasterror(buf, sizeof(buf));
3554     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3555           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3556           buf_len != 0 ? buf : "<no_error_string>", err);
3557   }
3558 #endif
3559   return ret;
3560 }
3561 
3562 bool os::guard_memory(char* addr, size_t bytes) {
3563   DWORD old_status;
3564   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3565 }
3566 
3567 bool os::unguard_memory(char* addr, size_t bytes) {
3568   DWORD old_status;
3569   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3570 }
3571 
3572 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3573 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3574 void os::numa_make_global(char *addr, size_t bytes)    { }
3575 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3576 bool os::numa_topology_changed()                       { return false; }
3577 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3578 int os::numa_get_group_id()                            { return 0; }
3579 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3580   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3581     // Provide an answer for UMA systems
3582     ids[0] = 0;
3583     return 1;
3584   } else {
3585     // check for size bigger than actual groups_num
3586     size = MIN2(size, numa_get_groups_num());
3587     for (int i = 0; i < (int)size; i++) {
3588       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3589     }
3590     return size;
3591   }
3592 }
3593 
3594 int os::numa_get_group_id_for_address(const void* address) {
3595   return 0;
3596 }
3597 
3598 bool os::get_page_info(char *start, page_info* info) {
3599   return false;
3600 }
3601 
3602 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3603                      page_info* page_found) {
3604   return end;
3605 }
3606 
3607 char* os::non_memory_address_word() {
3608   // Must never look like an address returned by reserve_memory,
3609   // even in its subfields (as defined by the CPU immediate fields,
3610   // if the CPU splits constants across multiple instructions).
3611   return (char*)-1;
3612 }
3613 
3614 #define MAX_ERROR_COUNT 100
3615 #define SYS_THREAD_ERROR 0xffffffffUL
3616 
3617 void os::pd_start_thread(Thread* thread) {
3618   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3619   // Returns previous suspend state:
3620   // 0:  Thread was not suspended
3621   // 1:  Thread is running now
3622   // >1: Thread is still suspended.
3623   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3624 }
3625 
3626 
3627 // Short sleep, direct OS call.
3628 //
3629 // ms = 0, means allow others (if any) to run.
3630 //
3631 void os::naked_short_sleep(jlong ms) {
3632   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3633   Sleep(ms);
3634 }
3635 
3636 // Windows does not provide sleep functionality with nanosecond resolution, so we
3637 // try to approximate this with spinning combined with yielding if another thread
3638 // is ready to run on the current processor.
3639 void os::naked_short_nanosleep(jlong ns) {
3640   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3641 
3642   int64_t start = os::javaTimeNanos();
3643   do {
3644     if (SwitchToThread() == 0) {
3645       // Nothing else is ready to run on this cpu, spin a little
3646       SpinPause();
3647     }
3648   } while (os::javaTimeNanos() - start < ns);
3649 }
3650 
3651 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3652 void os::infinite_sleep() {
3653   while (true) {    // sleep forever ...
3654     Sleep(100000);  // ... 100 seconds at a time
3655   }
3656 }
3657 
3658 typedef BOOL (WINAPI * STTSignature)(void);
3659 
3660 void os::naked_yield() {
3661   // Consider passing back the return value from SwitchToThread().
3662   SwitchToThread();
3663 }
3664 
3665 // Win32 only gives you access to seven real priorities at a time,
3666 // so we compress Java's ten down to seven.  It would be better
3667 // if we dynamically adjusted relative priorities.
3668 
3669 int os::java_to_os_priority[CriticalPriority + 1] = {
3670   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3671   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3672   THREAD_PRIORITY_LOWEST,                       // 2
3673   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3674   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3675   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3676   THREAD_PRIORITY_NORMAL,                       // 6
3677   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3678   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3679   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3680   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3681   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3682 };
3683 
3684 int prio_policy1[CriticalPriority + 1] = {
3685   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3686   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3687   THREAD_PRIORITY_LOWEST,                       // 2
3688   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3689   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3690   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3691   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3692   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3693   THREAD_PRIORITY_HIGHEST,                      // 8
3694   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3695   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3696   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3697 };
3698 
3699 static int prio_init() {
3700   // If ThreadPriorityPolicy is 1, switch tables
3701   if (ThreadPriorityPolicy == 1) {
3702     int i;
3703     for (i = 0; i < CriticalPriority + 1; i++) {
3704       os::java_to_os_priority[i] = prio_policy1[i];
3705     }
3706   }
3707   if (UseCriticalJavaThreadPriority) {
3708     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3709   }
3710   return 0;
3711 }
3712 
3713 OSReturn os::set_native_priority(Thread* thread, int priority) {
3714   if (!UseThreadPriorities) return OS_OK;
3715   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3716   return ret ? OS_OK : OS_ERR;
3717 }
3718 
3719 OSReturn os::get_native_priority(const Thread* const thread,
3720                                  int* priority_ptr) {
3721   if (!UseThreadPriorities) {
3722     *priority_ptr = java_to_os_priority[NormPriority];
3723     return OS_OK;
3724   }
3725   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3726   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3727     assert(false, "GetThreadPriority failed");
3728     return OS_ERR;
3729   }
3730   *priority_ptr = os_prio;
3731   return OS_OK;
3732 }
3733 
3734 // GetCurrentThreadId() returns DWORD
3735 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3736 
3737 static int _initial_pid = 0;
3738 
3739 int os::current_process_id() {
3740   return (_initial_pid ? _initial_pid : _getpid());
3741 }
3742 
3743 int    os::win32::_vm_page_size              = 0;
3744 int    os::win32::_vm_allocation_granularity = 0;
3745 int    os::win32::_processor_type            = 0;
3746 // Processor level is not available on non-NT systems, use vm_version instead
3747 int    os::win32::_processor_level           = 0;
3748 julong os::win32::_physical_memory           = 0;
3749 size_t os::win32::_default_stack_size        = 0;
3750 
3751 intx          os::win32::_os_thread_limit    = 0;
3752 volatile intx os::win32::_os_thread_count    = 0;
3753 
3754 bool   os::win32::_is_windows_server         = false;
3755 
3756 // 6573254
3757 // Currently, the bug is observed across all the supported Windows releases,
3758 // including the latest one (as of this writing - Windows Server 2012 R2)
3759 bool   os::win32::_has_exit_bug              = true;
3760 
3761 void os::win32::initialize_system_info() {
3762   SYSTEM_INFO si;
3763   GetSystemInfo(&si);
3764   _vm_page_size    = si.dwPageSize;
3765   _vm_allocation_granularity = si.dwAllocationGranularity;
3766   _processor_type  = si.dwProcessorType;
3767   _processor_level = si.wProcessorLevel;
3768   set_processor_count(si.dwNumberOfProcessors);
3769 
3770   MEMORYSTATUSEX ms;
3771   ms.dwLength = sizeof(ms);
3772 
3773   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3774   // dwMemoryLoad (% of memory in use)
3775   GlobalMemoryStatusEx(&ms);
3776   _physical_memory = ms.ullTotalPhys;
3777 
3778   if (FLAG_IS_DEFAULT(MaxRAM)) {
3779     // Adjust MaxRAM according to the maximum virtual address space available.
3780     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3781   }
3782 
3783   OSVERSIONINFOEX oi;
3784   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3785   GetVersionEx((OSVERSIONINFO*)&oi);
3786   switch (oi.dwPlatformId) {
3787   case VER_PLATFORM_WIN32_NT:
3788     {
3789       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3790       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3791           oi.wProductType == VER_NT_SERVER) {
3792         _is_windows_server = true;
3793       }
3794     }
3795     break;
3796   default: fatal("Unknown platform");
3797   }
3798 
3799   _default_stack_size = os::current_stack_size();
3800   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3801   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3802          "stack size not a multiple of page size");
3803 
3804   initialize_performance_counter();
3805 }
3806 
3807 
3808 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3809                                       int ebuflen) {
3810   char path[MAX_PATH];
3811   DWORD size;
3812   DWORD pathLen = (DWORD)sizeof(path);
3813   HINSTANCE result = NULL;
3814 
3815   // only allow library name without path component
3816   assert(strchr(name, '\\') == NULL, "path not allowed");
3817   assert(strchr(name, ':') == NULL, "path not allowed");
3818   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3819     jio_snprintf(ebuf, ebuflen,
3820                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3821     return NULL;
3822   }
3823 
3824   // search system directory
3825   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3826     if (size >= pathLen) {
3827       return NULL; // truncated
3828     }
3829     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3830       return NULL; // truncated
3831     }
3832     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3833       return result;
3834     }
3835   }
3836 
3837   // try Windows directory
3838   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3839     if (size >= pathLen) {
3840       return NULL; // truncated
3841     }
3842     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3843       return NULL; // truncated
3844     }
3845     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3846       return result;
3847     }
3848   }
3849 
3850   jio_snprintf(ebuf, ebuflen,
3851                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3852   return NULL;
3853 }
3854 
3855 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3856 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3857 
3858 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3859   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3860   return TRUE;
3861 }
3862 
3863 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3864   // Basic approach:
3865   //  - Each exiting thread registers its intent to exit and then does so.
3866   //  - A thread trying to terminate the process must wait for all
3867   //    threads currently exiting to complete their exit.
3868 
3869   if (os::win32::has_exit_bug()) {
3870     // The array holds handles of the threads that have started exiting by calling
3871     // _endthreadex().
3872     // Should be large enough to avoid blocking the exiting thread due to lack of
3873     // a free slot.
3874     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3875     static int handle_count = 0;
3876 
3877     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3878     static CRITICAL_SECTION crit_sect;
3879     static volatile DWORD process_exiting = 0;
3880     int i, j;
3881     DWORD res;
3882     HANDLE hproc, hthr;
3883 
3884     // We only attempt to register threads until a process exiting
3885     // thread manages to set the process_exiting flag. Any threads
3886     // that come through here after the process_exiting flag is set
3887     // are unregistered and will be caught in the SuspendThread()
3888     // infinite loop below.
3889     bool registered = false;
3890 
3891     // The first thread that reached this point, initializes the critical section.
3892     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3893       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3894     } else if (Atomic::load_acquire(&process_exiting) == 0) {
3895       if (what != EPT_THREAD) {
3896         // Atomically set process_exiting before the critical section
3897         // to increase the visibility between racing threads.
3898         Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
3899       }
3900       EnterCriticalSection(&crit_sect);
3901 
3902       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3903         // Remove from the array those handles of the threads that have completed exiting.
3904         for (i = 0, j = 0; i < handle_count; ++i) {
3905           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3906           if (res == WAIT_TIMEOUT) {
3907             handles[j++] = handles[i];
3908           } else {
3909             if (res == WAIT_FAILED) {
3910               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3911                       GetLastError(), __FILE__, __LINE__);
3912             }
3913             // Don't keep the handle, if we failed waiting for it.
3914             CloseHandle(handles[i]);
3915           }
3916         }
3917 
3918         // If there's no free slot in the array of the kept handles, we'll have to
3919         // wait until at least one thread completes exiting.
3920         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3921           // Raise the priority of the oldest exiting thread to increase its chances
3922           // to complete sooner.
3923           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3924           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3925           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3926             i = (res - WAIT_OBJECT_0);
3927             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3928             for (; i < handle_count; ++i) {
3929               handles[i] = handles[i + 1];
3930             }
3931           } else {
3932             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3933                     (res == WAIT_FAILED ? "failed" : "timed out"),
3934                     GetLastError(), __FILE__, __LINE__);
3935             // Don't keep handles, if we failed waiting for them.
3936             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3937               CloseHandle(handles[i]);
3938             }
3939             handle_count = 0;
3940           }
3941         }
3942 
3943         // Store a duplicate of the current thread handle in the array of handles.
3944         hproc = GetCurrentProcess();
3945         hthr = GetCurrentThread();
3946         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3947                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3948           warning("DuplicateHandle failed (%u) in %s: %d\n",
3949                   GetLastError(), __FILE__, __LINE__);
3950 
3951           // We can't register this thread (no more handles) so this thread
3952           // may be racing with a thread that is calling exit(). If the thread
3953           // that is calling exit() has managed to set the process_exiting
3954           // flag, then this thread will be caught in the SuspendThread()
3955           // infinite loop below which closes that race. A small timing
3956           // window remains before the process_exiting flag is set, but it
3957           // is only exposed when we are out of handles.
3958         } else {
3959           ++handle_count;
3960           registered = true;
3961 
3962           // The current exiting thread has stored its handle in the array, and now
3963           // should leave the critical section before calling _endthreadex().
3964         }
3965 
3966       } else if (what != EPT_THREAD && handle_count > 0) {
3967         jlong start_time, finish_time, timeout_left;
3968         // Before ending the process, make sure all the threads that had called
3969         // _endthreadex() completed.
3970 
3971         // Set the priority level of the current thread to the same value as
3972         // the priority level of exiting threads.
3973         // This is to ensure it will be given a fair chance to execute if
3974         // the timeout expires.
3975         hthr = GetCurrentThread();
3976         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3977         start_time = os::javaTimeNanos();
3978         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3979         for (i = 0; ; ) {
3980           int portion_count = handle_count - i;
3981           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3982             portion_count = MAXIMUM_WAIT_OBJECTS;
3983           }
3984           for (j = 0; j < portion_count; ++j) {
3985             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3986           }
3987           timeout_left = (finish_time - start_time) / 1000000L;
3988           if (timeout_left < 0) {
3989             timeout_left = 0;
3990           }
3991           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3992           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3993             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3994                     (res == WAIT_FAILED ? "failed" : "timed out"),
3995                     GetLastError(), __FILE__, __LINE__);
3996             // Reset portion_count so we close the remaining
3997             // handles due to this error.
3998             portion_count = handle_count - i;
3999           }
4000           for (j = 0; j < portion_count; ++j) {
4001             CloseHandle(handles[i + j]);
4002           }
4003           if ((i += portion_count) >= handle_count) {
4004             break;
4005           }
4006           start_time = os::javaTimeNanos();
4007         }
4008         handle_count = 0;
4009       }
4010 
4011       LeaveCriticalSection(&crit_sect);
4012     }
4013 
4014     if (!registered &&
4015         Atomic::load_acquire(&process_exiting) != 0 &&
4016         process_exiting != GetCurrentThreadId()) {
4017       // Some other thread is about to call exit(), so we don't let
4018       // the current unregistered thread proceed to exit() or _endthreadex()
4019       while (true) {
4020         SuspendThread(GetCurrentThread());
4021         // Avoid busy-wait loop, if SuspendThread() failed.
4022         Sleep(EXIT_TIMEOUT);
4023       }
4024     }
4025   }
4026 
4027   // We are here if either
4028   // - there's no 'race at exit' bug on this OS release;
4029   // - initialization of the critical section failed (unlikely);
4030   // - the current thread has registered itself and left the critical section;
4031   // - the process-exiting thread has raised the flag and left the critical section.
4032   if (what == EPT_THREAD) {
4033     _endthreadex((unsigned)exit_code);
4034   } else if (what == EPT_PROCESS) {
4035     ::exit(exit_code);
4036   } else {
4037     _exit(exit_code);
4038   }
4039 
4040   // Should not reach here
4041   return exit_code;
4042 }
4043 
4044 #undef EXIT_TIMEOUT
4045 
4046 void os::win32::setmode_streams() {
4047   _setmode(_fileno(stdin), _O_BINARY);
4048   _setmode(_fileno(stdout), _O_BINARY);
4049   _setmode(_fileno(stderr), _O_BINARY);
4050 }
4051 
4052 void os::wait_for_keypress_at_exit(void) {
4053   if (PauseAtExit) {
4054     fprintf(stderr, "Press any key to continue...\n");
4055     fgetc(stdin);
4056   }
4057 }
4058 
4059 
4060 bool os::message_box(const char* title, const char* message) {
4061   int result = MessageBox(NULL, message, title,
4062                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4063   return result == IDYES;
4064 }
4065 
4066 #ifndef PRODUCT
4067 #ifndef _WIN64
4068 // Helpers to check whether NX protection is enabled
4069 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4070   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4071       pex->ExceptionRecord->NumberParameters > 0 &&
4072       pex->ExceptionRecord->ExceptionInformation[0] ==
4073       EXCEPTION_INFO_EXEC_VIOLATION) {
4074     return EXCEPTION_EXECUTE_HANDLER;
4075   }
4076   return EXCEPTION_CONTINUE_SEARCH;
4077 }
4078 
4079 void nx_check_protection() {
4080   // If NX is enabled we'll get an exception calling into code on the stack
4081   char code[] = { (char)0xC3 }; // ret
4082   void *code_ptr = (void *)code;
4083   __try {
4084     __asm call code_ptr
4085   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4086     tty->print_raw_cr("NX protection detected.");
4087   }
4088 }
4089 #endif // _WIN64
4090 #endif // PRODUCT
4091 
4092 // This is called _before_ the global arguments have been parsed
4093 void os::init(void) {
4094   _initial_pid = _getpid();
4095 
4096   init_random(1234567);
4097 
4098   win32::initialize_system_info();
4099   win32::setmode_streams();
4100   init_page_sizes((size_t) win32::vm_page_size());
4101 
4102   // This may be overridden later when argument processing is done.
4103   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4104 
4105   // Initialize main_process and main_thread
4106   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4107   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4108                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4109     fatal("DuplicateHandle failed\n");
4110   }
4111   main_thread_id = (int) GetCurrentThreadId();
4112 
4113   // initialize fast thread access - only used for 32-bit
4114   win32::initialize_thread_ptr_offset();
4115 }
4116 
4117 // To install functions for atexit processing
4118 extern "C" {
4119   static void perfMemory_exit_helper() {
4120     perfMemory_exit();
4121   }
4122 }
4123 
4124 static jint initSock();
4125 
4126 // this is called _after_ the global arguments have been parsed
4127 jint os::init_2(void) {
4128 
4129   // This could be set any time but all platforms
4130   // have to set it the same so we have to mirror Solaris.
4131   DEBUG_ONLY(os::set_mutex_init_done();)
4132 
4133   // Setup Windows Exceptions
4134 
4135 #if INCLUDE_AOT
4136   // If AOT is enabled we need to install a vectored exception handler
4137   // in order to forward implicit exceptions from code in AOT
4138   // generated DLLs.  This is necessary since these DLLs are not
4139   // registered for structured exceptions like codecache methods are.
4140   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4141     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4142   }
4143 #endif
4144 
4145   // for debugging float code generation bugs
4146   if (ForceFloatExceptions) {
4147 #ifndef  _WIN64
4148     static long fp_control_word = 0;
4149     __asm { fstcw fp_control_word }
4150     // see Intel PPro Manual, Vol. 2, p 7-16
4151     const long precision = 0x20;
4152     const long underflow = 0x10;
4153     const long overflow  = 0x08;
4154     const long zero_div  = 0x04;
4155     const long denorm    = 0x02;
4156     const long invalid   = 0x01;
4157     fp_control_word |= invalid;
4158     __asm { fldcw fp_control_word }
4159 #endif
4160   }
4161 
4162   // If stack_commit_size is 0, windows will reserve the default size,
4163   // but only commit a small portion of it.
4164   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4165   size_t default_reserve_size = os::win32::default_stack_size();
4166   size_t actual_reserve_size = stack_commit_size;
4167   if (stack_commit_size < default_reserve_size) {
4168     // If stack_commit_size == 0, we want this too
4169     actual_reserve_size = default_reserve_size;
4170   }
4171 
4172   // Check minimum allowable stack size for thread creation and to initialize
4173   // the java system classes, including StackOverflowError - depends on page
4174   // size.  Add two 4K pages for compiler2 recursion in main thread.
4175   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4176   // class initialization depending on 32 or 64 bit VM.
4177   size_t min_stack_allowed =
4178             (size_t)(JavaThread::stack_guard_zone_size() +
4179                      JavaThread::stack_shadow_zone_size() +
4180                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4181 
4182   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4183 
4184   if (actual_reserve_size < min_stack_allowed) {
4185     tty->print_cr("\nThe Java thread stack size specified is too small. "
4186                   "Specify at least %dk",
4187                   min_stack_allowed / K);
4188     return JNI_ERR;
4189   }
4190 
4191   JavaThread::set_stack_size_at_create(stack_commit_size);
4192 
4193   // Calculate theoretical max. size of Threads to guard gainst artifical
4194   // out-of-memory situations, where all available address-space has been
4195   // reserved by thread stacks.
4196   assert(actual_reserve_size != 0, "Must have a stack");
4197 
4198   // Calculate the thread limit when we should start doing Virtual Memory
4199   // banging. Currently when the threads will have used all but 200Mb of space.
4200   //
4201   // TODO: consider performing a similar calculation for commit size instead
4202   // as reserve size, since on a 64-bit platform we'll run into that more
4203   // often than running out of virtual memory space.  We can use the
4204   // lower value of the two calculations as the os_thread_limit.
4205   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4206   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4207 
4208   // at exit methods are called in the reverse order of their registration.
4209   // there is no limit to the number of functions registered. atexit does
4210   // not set errno.
4211 
4212   if (PerfAllowAtExitRegistration) {
4213     // only register atexit functions if PerfAllowAtExitRegistration is set.
4214     // atexit functions can be delayed until process exit time, which
4215     // can be problematic for embedded VM situations. Embedded VMs should
4216     // call DestroyJavaVM() to assure that VM resources are released.
4217 
4218     // note: perfMemory_exit_helper atexit function may be removed in
4219     // the future if the appropriate cleanup code can be added to the
4220     // VM_Exit VMOperation's doit method.
4221     if (atexit(perfMemory_exit_helper) != 0) {
4222       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4223     }
4224   }
4225 
4226 #ifndef _WIN64
4227   // Print something if NX is enabled (win32 on AMD64)
4228   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4229 #endif
4230 
4231   // initialize thread priority policy
4232   prio_init();
4233 
4234   if (UseNUMA && !ForceNUMA) {
4235     UseNUMA = false; // We don't fully support this yet
4236   }
4237 
4238   if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4239     if (!numa_interleaving_init()) {
4240       FLAG_SET_ERGO(UseNUMAInterleaving, false);
4241     } else if (!UseNUMAInterleaving) {
4242       // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4243       FLAG_SET_ERGO(UseNUMAInterleaving, true);
4244     }
4245   }
4246 
4247   if (initSock() != JNI_OK) {
4248     return JNI_ERR;
4249   }
4250 
4251   SymbolEngine::recalc_search_path();
4252 
4253   // Initialize data for jdk.internal.misc.Signal
4254   if (!ReduceSignalUsage) {
4255     jdk_misc_signal_init();
4256   }
4257 
4258   return JNI_OK;
4259 }
4260 
4261 // combine the high and low DWORD into a ULONGLONG
4262 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4263   ULONGLONG value = high_word;
4264   value <<= sizeof(high_word) * 8;
4265   value |= low_word;
4266   return value;
4267 }
4268 
4269 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4270 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4271   ::memset((void*)sbuf, 0, sizeof(struct stat));
4272   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4273   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4274                                   file_data.ftLastWriteTime.dwLowDateTime);
4275   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4276                                   file_data.ftCreationTime.dwLowDateTime);
4277   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4278                                   file_data.ftLastAccessTime.dwLowDateTime);
4279   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4280     sbuf->st_mode |= S_IFDIR;
4281   } else {
4282     sbuf->st_mode |= S_IFREG;
4283   }
4284 }
4285 
4286 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4287   // Get required buffer size to convert to Unicode
4288   int unicode_path_len = MultiByteToWideChar(CP_ACP,
4289                                              MB_ERR_INVALID_CHARS,
4290                                              char_path, -1,
4291                                              NULL, 0);
4292   if (unicode_path_len == 0) {
4293     return EINVAL;
4294   }
4295 
4296   *unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4297 
4298   int result = MultiByteToWideChar(CP_ACP,
4299                                    MB_ERR_INVALID_CHARS,
4300                                    char_path, -1,
4301                                    *unicode_path, unicode_path_len);
4302   assert(result == unicode_path_len, "length already checked above");
4303 
4304   return ERROR_SUCCESS;
4305 }
4306 
4307 static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4308   // Get required buffer size to convert to full path. The return
4309   // value INCLUDES the terminating null character.
4310   DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4311   if (full_path_len == 0) {
4312     return EINVAL;
4313   }
4314 
4315   *full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4316 
4317   // When the buffer has sufficient size, the return value EXCLUDES the
4318   // terminating null character
4319   DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4320   assert(result <= full_path_len, "length already checked above");
4321 
4322   return ERROR_SUCCESS;
4323 }
4324 
4325 static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4326   *prefix_off = 0;
4327   *needs_fullpath = true;
4328 
4329   if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4330     *prefix = L"\\\\?\\";
4331   } else if (buf[0] == '\\' && buf[1] == '\\') {
4332     if (buf[2] == '?' && buf[3] == '\\') {
4333       *prefix = L"";
4334       *needs_fullpath = false;
4335     } else {
4336       *prefix = L"\\\\?\\UNC";
4337       *prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4338     }
4339   } else {
4340     *prefix = L"\\\\?\\";
4341   }
4342 }
4343 
4344 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4345 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4346 // additional_space is the size of space, in wchar_t, the function will additionally add to
4347 // the allocation of return buffer (such that the size of the returned buffer is at least
4348 // wcslen(buf) + 1 + additional_space).
4349 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4350   if ((path == NULL) || (path[0] == '\0')) {
4351     err = ENOENT;
4352     return NULL;
4353   }
4354 
4355   // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4356   size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4357   char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4358   strncpy(buf, path, buf_len);
4359   os::native_path(buf);
4360 
4361   LPWSTR prefix = NULL;
4362   int prefix_off = 0;
4363   bool needs_fullpath = true;
4364   set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4365 
4366   LPWSTR unicode_path = NULL;
4367   err = convert_to_unicode(buf, &unicode_path);
4368   FREE_C_HEAP_ARRAY(char, buf);
4369   if (err != ERROR_SUCCESS) {
4370     return NULL;
4371   }
4372 
4373   LPWSTR converted_path = NULL;
4374   if (needs_fullpath) {
4375     err = get_full_path(unicode_path, &converted_path);
4376   } else {
4377     converted_path = unicode_path;
4378   }
4379 
4380   LPWSTR result = NULL;
4381   if (converted_path != NULL) {
4382     size_t prefix_len = wcslen(prefix);
4383     size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4384     result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4385     _snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4386 
4387     // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4388     result_len = wcslen(result);
4389     if ((result[result_len - 1] == L'\\') &&
4390         !(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4391       result[result_len - 1] = L'\0';
4392     }
4393   }
4394 
4395   if (converted_path != unicode_path) {
4396     FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4397   }
4398   FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4399 
4400   return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4401 }
4402 
4403 int os::stat(const char *path, struct stat *sbuf) {
4404   errno_t err;
4405   wchar_t* wide_path = wide_abs_unc_path(path, err);
4406 
4407   if (wide_path == NULL) {
4408     errno = err;
4409     return -1;
4410   }
4411 
4412   WIN32_FILE_ATTRIBUTE_DATA file_data;;
4413   BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4414   os::free(wide_path);
4415 
4416   if (!bret) {
4417     errno = ::GetLastError();
4418     return -1;
4419   }
4420 
4421   file_attribute_data_to_stat(sbuf, file_data);
4422   return 0;
4423 }
4424 
4425 static HANDLE create_read_only_file_handle(const char* file) {
4426   errno_t err;
4427   wchar_t* wide_path = wide_abs_unc_path(file, err);
4428 
4429   if (wide_path == NULL) {
4430     errno = err;
4431     return INVALID_HANDLE_VALUE;
4432   }
4433 
4434   HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4435                                 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4436   os::free(wide_path);
4437 
4438   return handle;
4439 }
4440 
4441 bool os::same_files(const char* file1, const char* file2) {
4442 
4443   if (file1 == NULL && file2 == NULL) {
4444     return true;
4445   }
4446 
4447   if (file1 == NULL || file2 == NULL) {
4448     return false;
4449   }
4450 
4451   if (strcmp(file1, file2) == 0) {
4452     return true;
4453   }
4454 
4455   HANDLE handle1 = create_read_only_file_handle(file1);
4456   HANDLE handle2 = create_read_only_file_handle(file2);
4457   bool result = false;
4458 
4459   // if we could open both paths...
4460   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4461     BY_HANDLE_FILE_INFORMATION fileInfo1;
4462     BY_HANDLE_FILE_INFORMATION fileInfo2;
4463     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4464       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4465       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4466       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4467         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4468         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4469         result = true;
4470       }
4471     }
4472   }
4473 
4474   //free the handles
4475   if (handle1 != INVALID_HANDLE_VALUE) {
4476     ::CloseHandle(handle1);
4477   }
4478 
4479   if (handle2 != INVALID_HANDLE_VALUE) {
4480     ::CloseHandle(handle2);
4481   }
4482 
4483   return result;
4484 }
4485 
4486 #define FT2INT64(ft) \
4487   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4488 
4489 
4490 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4491 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4492 // of a thread.
4493 //
4494 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4495 // the fast estimate available on the platform.
4496 
4497 // current_thread_cpu_time() is not optimized for Windows yet
4498 jlong os::current_thread_cpu_time() {
4499   // return user + sys since the cost is the same
4500   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4501 }
4502 
4503 jlong os::thread_cpu_time(Thread* thread) {
4504   // consistent with what current_thread_cpu_time() returns.
4505   return os::thread_cpu_time(thread, true /* user+sys */);
4506 }
4507 
4508 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4509   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4510 }
4511 
4512 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4513   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4514   // If this function changes, os::is_thread_cpu_time_supported() should too
4515   FILETIME CreationTime;
4516   FILETIME ExitTime;
4517   FILETIME KernelTime;
4518   FILETIME UserTime;
4519 
4520   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4521                       &ExitTime, &KernelTime, &UserTime) == 0) {
4522     return -1;
4523   } else if (user_sys_cpu_time) {
4524     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4525   } else {
4526     return FT2INT64(UserTime) * 100;
4527   }
4528 }
4529 
4530 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4531   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4532   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4533   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4534   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4535 }
4536 
4537 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4538   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4539   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4540   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4541   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4542 }
4543 
4544 bool os::is_thread_cpu_time_supported() {
4545   // see os::thread_cpu_time
4546   FILETIME CreationTime;
4547   FILETIME ExitTime;
4548   FILETIME KernelTime;
4549   FILETIME UserTime;
4550 
4551   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4552                       &KernelTime, &UserTime) == 0) {
4553     return false;
4554   } else {
4555     return true;
4556   }
4557 }
4558 
4559 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4560 // It does have primitives (PDH API) to get CPU usage and run queue length.
4561 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4562 // If we wanted to implement loadavg on Windows, we have a few options:
4563 //
4564 // a) Query CPU usage and run queue length and "fake" an answer by
4565 //    returning the CPU usage if it's under 100%, and the run queue
4566 //    length otherwise.  It turns out that querying is pretty slow
4567 //    on Windows, on the order of 200 microseconds on a fast machine.
4568 //    Note that on the Windows the CPU usage value is the % usage
4569 //    since the last time the API was called (and the first call
4570 //    returns 100%), so we'd have to deal with that as well.
4571 //
4572 // b) Sample the "fake" answer using a sampling thread and store
4573 //    the answer in a global variable.  The call to loadavg would
4574 //    just return the value of the global, avoiding the slow query.
4575 //
4576 // c) Sample a better answer using exponential decay to smooth the
4577 //    value.  This is basically the algorithm used by UNIX kernels.
4578 //
4579 // Note that sampling thread starvation could affect both (b) and (c).
4580 int os::loadavg(double loadavg[], int nelem) {
4581   return -1;
4582 }
4583 
4584 
4585 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4586 bool os::dont_yield() {
4587   return DontYieldALot;
4588 }
4589 
4590 int os::open(const char *path, int oflag, int mode) {
4591   errno_t err;
4592   wchar_t* wide_path = wide_abs_unc_path(path, err);
4593 
4594   if (wide_path == NULL) {
4595     errno = err;
4596     return -1;
4597   }
4598   int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4599   os::free(wide_path);
4600 
4601   if (fd == -1) {
4602     errno = ::GetLastError();
4603   }
4604 
4605   return fd;
4606 }
4607 
4608 FILE* os::open(int fd, const char* mode) {
4609   return ::_fdopen(fd, mode);
4610 }
4611 
4612 // Is a (classpath) directory empty?
4613 bool os::dir_is_empty(const char* path) {
4614   errno_t err;
4615   wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4616 
4617   if (wide_path == NULL) {
4618     errno = err;
4619     return false;
4620   }
4621 
4622   // Make sure we end with "\\*"
4623   if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4624     wcscat(wide_path, L"*");
4625   } else {
4626     wcscat(wide_path, L"\\*");
4627   }
4628 
4629   WIN32_FIND_DATAW fd;
4630   HANDLE f = ::FindFirstFileW(wide_path, &fd);
4631   os::free(wide_path);
4632   bool is_empty = true;
4633 
4634   if (f != INVALID_HANDLE_VALUE) {
4635     while (is_empty && ::FindNextFileW(f, &fd)) {
4636       // An empty directory contains only the current directory file
4637       // and the previous directory file.
4638       if ((wcscmp(fd.cFileName, L".") != 0) &&
4639           (wcscmp(fd.cFileName, L"..") != 0)) {
4640         is_empty = false;
4641       }
4642     }
4643     FindClose(f);
4644   } else {
4645     errno = ::GetLastError();
4646   }
4647 
4648   return is_empty;
4649 }
4650 
4651 // create binary file, rewriting existing file if required
4652 int os::create_binary_file(const char* path, bool rewrite_existing) {
4653   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4654   if (!rewrite_existing) {
4655     oflags |= _O_EXCL;
4656   }
4657   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4658 }
4659 
4660 // return current position of file pointer
4661 jlong os::current_file_offset(int fd) {
4662   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4663 }
4664 
4665 // move file pointer to the specified offset
4666 jlong os::seek_to_file_offset(int fd, jlong offset) {
4667   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4668 }
4669 
4670 
4671 jlong os::lseek(int fd, jlong offset, int whence) {
4672   return (jlong) ::_lseeki64(fd, offset, whence);
4673 }
4674 
4675 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4676   OVERLAPPED ov;
4677   DWORD nread;
4678   BOOL result;
4679 
4680   ZeroMemory(&ov, sizeof(ov));
4681   ov.Offset = (DWORD)offset;
4682   ov.OffsetHigh = (DWORD)(offset >> 32);
4683 
4684   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4685 
4686   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4687 
4688   return result ? nread : 0;
4689 }
4690 
4691 
4692 // This method is a slightly reworked copy of JDK's sysNativePath
4693 // from src/windows/hpi/src/path_md.c
4694 
4695 // Convert a pathname to native format.  On win32, this involves forcing all
4696 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4697 // sometimes rejects '/') and removing redundant separators.  The input path is
4698 // assumed to have been converted into the character encoding used by the local
4699 // system.  Because this might be a double-byte encoding, care is taken to
4700 // treat double-byte lead characters correctly.
4701 //
4702 // This procedure modifies the given path in place, as the result is never
4703 // longer than the original.  There is no error return; this operation always
4704 // succeeds.
4705 char * os::native_path(char *path) {
4706   char *src = path, *dst = path, *end = path;
4707   char *colon = NULL;  // If a drive specifier is found, this will
4708                        // point to the colon following the drive letter
4709 
4710   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4711   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4712           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4713 
4714   // Check for leading separators
4715 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4716   while (isfilesep(*src)) {
4717     src++;
4718   }
4719 
4720   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4721     // Remove leading separators if followed by drive specifier.  This
4722     // hack is necessary to support file URLs containing drive
4723     // specifiers (e.g., "file://c:/path").  As a side effect,
4724     // "/c:/path" can be used as an alternative to "c:/path".
4725     *dst++ = *src++;
4726     colon = dst;
4727     *dst++ = ':';
4728     src++;
4729   } else {
4730     src = path;
4731     if (isfilesep(src[0]) && isfilesep(src[1])) {
4732       // UNC pathname: Retain first separator; leave src pointed at
4733       // second separator so that further separators will be collapsed
4734       // into the second separator.  The result will be a pathname
4735       // beginning with "\\\\" followed (most likely) by a host name.
4736       src = dst = path + 1;
4737       path[0] = '\\';     // Force first separator to '\\'
4738     }
4739   }
4740 
4741   end = dst;
4742 
4743   // Remove redundant separators from remainder of path, forcing all
4744   // separators to be '\\' rather than '/'. Also, single byte space
4745   // characters are removed from the end of the path because those
4746   // are not legal ending characters on this operating system.
4747   //
4748   while (*src != '\0') {
4749     if (isfilesep(*src)) {
4750       *dst++ = '\\'; src++;
4751       while (isfilesep(*src)) src++;
4752       if (*src == '\0') {
4753         // Check for trailing separator
4754         end = dst;
4755         if (colon == dst - 2) break;  // "z:\\"
4756         if (dst == path + 1) break;   // "\\"
4757         if (dst == path + 2 && isfilesep(path[0])) {
4758           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4759           // beginning of a UNC pathname.  Even though it is not, by
4760           // itself, a valid UNC pathname, we leave it as is in order
4761           // to be consistent with the path canonicalizer as well
4762           // as the win32 APIs, which treat this case as an invalid
4763           // UNC pathname rather than as an alias for the root
4764           // directory of the current drive.
4765           break;
4766         }
4767         end = --dst;  // Path does not denote a root directory, so
4768                       // remove trailing separator
4769         break;
4770       }
4771       end = dst;
4772     } else {
4773       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4774         *dst++ = *src++;
4775         if (*src) *dst++ = *src++;
4776         end = dst;
4777       } else {  // Copy a single-byte character
4778         char c = *src++;
4779         *dst++ = c;
4780         // Space is not a legal ending character
4781         if (c != ' ') end = dst;
4782       }
4783     }
4784   }
4785 
4786   *end = '\0';
4787 
4788   // For "z:", add "." to work around a bug in the C runtime library
4789   if (colon == dst - 1) {
4790     path[2] = '.';
4791     path[3] = '\0';
4792   }
4793 
4794   return path;
4795 }
4796 
4797 // This code is a copy of JDK's sysSetLength
4798 // from src/windows/hpi/src/sys_api_md.c
4799 
4800 int os::ftruncate(int fd, jlong length) {
4801   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4802   long high = (long)(length >> 32);
4803   DWORD ret;
4804 
4805   if (h == (HANDLE)(-1)) {
4806     return -1;
4807   }
4808 
4809   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4810   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4811     return -1;
4812   }
4813 
4814   if (::SetEndOfFile(h) == FALSE) {
4815     return -1;
4816   }
4817 
4818   return 0;
4819 }
4820 
4821 int os::get_fileno(FILE* fp) {
4822   return _fileno(fp);
4823 }
4824 
4825 // This code is a copy of JDK's sysSync
4826 // from src/windows/hpi/src/sys_api_md.c
4827 // except for the legacy workaround for a bug in Win 98
4828 
4829 int os::fsync(int fd) {
4830   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4831 
4832   if ((!::FlushFileBuffers(handle)) &&
4833       (GetLastError() != ERROR_ACCESS_DENIED)) {
4834     // from winerror.h
4835     return -1;
4836   }
4837   return 0;
4838 }
4839 
4840 static int nonSeekAvailable(int, long *);
4841 static int stdinAvailable(int, long *);
4842 
4843 // This code is a copy of JDK's sysAvailable
4844 // from src/windows/hpi/src/sys_api_md.c
4845 
4846 int os::available(int fd, jlong *bytes) {
4847   jlong cur, end;
4848   struct _stati64 stbuf64;
4849 
4850   if (::_fstati64(fd, &stbuf64) >= 0) {
4851     int mode = stbuf64.st_mode;
4852     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4853       int ret;
4854       long lpbytes;
4855       if (fd == 0) {
4856         ret = stdinAvailable(fd, &lpbytes);
4857       } else {
4858         ret = nonSeekAvailable(fd, &lpbytes);
4859       }
4860       (*bytes) = (jlong)(lpbytes);
4861       return ret;
4862     }
4863     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4864       return FALSE;
4865     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4866       return FALSE;
4867     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4868       return FALSE;
4869     }
4870     *bytes = end - cur;
4871     return TRUE;
4872   } else {
4873     return FALSE;
4874   }
4875 }
4876 
4877 void os::flockfile(FILE* fp) {
4878   _lock_file(fp);
4879 }
4880 
4881 void os::funlockfile(FILE* fp) {
4882   _unlock_file(fp);
4883 }
4884 
4885 // This code is a copy of JDK's nonSeekAvailable
4886 // from src/windows/hpi/src/sys_api_md.c
4887 
4888 static int nonSeekAvailable(int fd, long *pbytes) {
4889   // This is used for available on non-seekable devices
4890   // (like both named and anonymous pipes, such as pipes
4891   //  connected to an exec'd process).
4892   // Standard Input is a special case.
4893   HANDLE han;
4894 
4895   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4896     return FALSE;
4897   }
4898 
4899   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4900     // PeekNamedPipe fails when at EOF.  In that case we
4901     // simply make *pbytes = 0 which is consistent with the
4902     // behavior we get on Solaris when an fd is at EOF.
4903     // The only alternative is to raise an Exception,
4904     // which isn't really warranted.
4905     //
4906     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4907       return FALSE;
4908     }
4909     *pbytes = 0;
4910   }
4911   return TRUE;
4912 }
4913 
4914 #define MAX_INPUT_EVENTS 2000
4915 
4916 // This code is a copy of JDK's stdinAvailable
4917 // from src/windows/hpi/src/sys_api_md.c
4918 
4919 static int stdinAvailable(int fd, long *pbytes) {
4920   HANDLE han;
4921   DWORD numEventsRead = 0;  // Number of events read from buffer
4922   DWORD numEvents = 0;      // Number of events in buffer
4923   DWORD i = 0;              // Loop index
4924   DWORD curLength = 0;      // Position marker
4925   DWORD actualLength = 0;   // Number of bytes readable
4926   BOOL error = FALSE;       // Error holder
4927   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4928 
4929   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4930     return FALSE;
4931   }
4932 
4933   // Construct an array of input records in the console buffer
4934   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4935   if (error == 0) {
4936     return nonSeekAvailable(fd, pbytes);
4937   }
4938 
4939   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4940   if (numEvents > MAX_INPUT_EVENTS) {
4941     numEvents = MAX_INPUT_EVENTS;
4942   }
4943 
4944   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4945   if (lpBuffer == NULL) {
4946     return FALSE;
4947   }
4948 
4949   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4950   if (error == 0) {
4951     os::free(lpBuffer);
4952     return FALSE;
4953   }
4954 
4955   // Examine input records for the number of bytes available
4956   for (i=0; i<numEvents; i++) {
4957     if (lpBuffer[i].EventType == KEY_EVENT) {
4958 
4959       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4960                                       &(lpBuffer[i].Event);
4961       if (keyRecord->bKeyDown == TRUE) {
4962         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4963         curLength++;
4964         if (*keyPressed == '\r') {
4965           actualLength = curLength;
4966         }
4967       }
4968     }
4969   }
4970 
4971   if (lpBuffer != NULL) {
4972     os::free(lpBuffer);
4973   }
4974 
4975   *pbytes = (long) actualLength;
4976   return TRUE;
4977 }
4978 
4979 // Map a block of memory.
4980 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4981                         char *addr, size_t bytes, bool read_only,
4982                         bool allow_exec) {
4983   HANDLE hFile;
4984   char* base;
4985 
4986   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4987                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4988   if (hFile == INVALID_HANDLE_VALUE) {
4989     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4990     return NULL;
4991   }
4992 
4993   if (allow_exec) {
4994     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4995     // unless it comes from a PE image (which the shared archive is not.)
4996     // Even VirtualProtect refuses to give execute access to mapped memory
4997     // that was not previously executable.
4998     //
4999     // Instead, stick the executable region in anonymous memory.  Yuck.
5000     // Penalty is that ~4 pages will not be shareable - in the future
5001     // we might consider DLLizing the shared archive with a proper PE
5002     // header so that mapping executable + sharing is possible.
5003 
5004     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
5005                                 PAGE_READWRITE);
5006     if (base == NULL) {
5007       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
5008       CloseHandle(hFile);
5009       return NULL;
5010     }
5011 
5012     // Record virtual memory allocation
5013     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
5014 
5015     DWORD bytes_read;
5016     OVERLAPPED overlapped;
5017     overlapped.Offset = (DWORD)file_offset;
5018     overlapped.OffsetHigh = 0;
5019     overlapped.hEvent = NULL;
5020     // ReadFile guarantees that if the return value is true, the requested
5021     // number of bytes were read before returning.
5022     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
5023     if (!res) {
5024       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
5025       release_memory(base, bytes);
5026       CloseHandle(hFile);
5027       return NULL;
5028     }
5029   } else {
5030     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
5031                                     NULL /* file_name */);
5032     if (hMap == NULL) {
5033       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
5034       CloseHandle(hFile);
5035       return NULL;
5036     }
5037 
5038     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
5039     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
5040                                   (DWORD)bytes, addr);
5041     if (base == NULL) {
5042       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
5043       CloseHandle(hMap);
5044       CloseHandle(hFile);
5045       return NULL;
5046     }
5047 
5048     if (CloseHandle(hMap) == 0) {
5049       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
5050       CloseHandle(hFile);
5051       return base;
5052     }
5053   }
5054 
5055   if (allow_exec) {
5056     DWORD old_protect;
5057     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
5058     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
5059 
5060     if (!res) {
5061       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
5062       // Don't consider this a hard error, on IA32 even if the
5063       // VirtualProtect fails, we should still be able to execute
5064       CloseHandle(hFile);
5065       return base;
5066     }
5067   }
5068 
5069   if (CloseHandle(hFile) == 0) {
5070     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
5071     return base;
5072   }
5073 
5074   return base;
5075 }
5076 
5077 
5078 // Remap a block of memory.
5079 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5080                           char *addr, size_t bytes, bool read_only,
5081                           bool allow_exec) {
5082   // This OS does not allow existing memory maps to be remapped so we
5083   // would have to unmap the memory before we remap it.
5084 
5085   // Because there is a small window between unmapping memory and mapping
5086   // it in again with different protections, CDS archives are mapped RW
5087   // on windows, so this function isn't called.
5088   ShouldNotReachHere();
5089   return NULL;
5090 }
5091 
5092 
5093 // Unmap a block of memory.
5094 // Returns true=success, otherwise false.
5095 
5096 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5097   MEMORY_BASIC_INFORMATION mem_info;
5098   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5099     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5100     return false;
5101   }
5102 
5103   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5104   // Instead, executable region was allocated using VirtualAlloc(). See
5105   // pd_map_memory() above.
5106   //
5107   // The following flags should match the 'exec_access' flages used for
5108   // VirtualProtect() in pd_map_memory().
5109   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5110       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5111     return pd_release_memory(addr, bytes);
5112   }
5113 
5114   BOOL result = UnmapViewOfFile(addr);
5115   if (result == 0) {
5116     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
5117     return false;
5118   }
5119   return true;
5120 }
5121 
5122 void os::pause() {
5123   char filename[MAX_PATH];
5124   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5125     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5126   } else {
5127     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5128   }
5129 
5130   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5131   if (fd != -1) {
5132     struct stat buf;
5133     ::close(fd);
5134     while (::stat(filename, &buf) == 0) {
5135       Sleep(100);
5136     }
5137   } else {
5138     jio_fprintf(stderr,
5139                 "Could not open pause file '%s', continuing immediately.\n", filename);
5140   }
5141 }
5142 
5143 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5144 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5145 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5146 
5147 os::ThreadCrashProtection::ThreadCrashProtection() {
5148 }
5149 
5150 // See the caveats for this class in os_windows.hpp
5151 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5152 // into this method and returns false. If no OS EXCEPTION was raised, returns
5153 // true.
5154 // The callback is supposed to provide the method that should be protected.
5155 //
5156 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5157 
5158   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5159 
5160   _protected_thread = Thread::current_or_null();
5161   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5162 
5163   bool success = true;
5164   __try {
5165     _crash_protection = this;
5166     cb.call();
5167   } __except(EXCEPTION_EXECUTE_HANDLER) {
5168     // only for protection, nothing to do
5169     success = false;
5170   }
5171   _crash_protection = NULL;
5172   _protected_thread = NULL;
5173   Thread::muxRelease(&_crash_mux);
5174   return success;
5175 }
5176 
5177 
5178 class HighResolutionInterval : public CHeapObj<mtThread> {
5179   // The default timer resolution seems to be 10 milliseconds.
5180   // (Where is this written down?)
5181   // If someone wants to sleep for only a fraction of the default,
5182   // then we set the timer resolution down to 1 millisecond for
5183   // the duration of their interval.
5184   // We carefully set the resolution back, since otherwise we
5185   // seem to incur an overhead (3%?) that we don't need.
5186   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5187   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5188   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5189   // timeBeginPeriod() if the relative error exceeded some threshold.
5190   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5191   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5192   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5193   // resolution timers running.
5194  private:
5195   jlong resolution;
5196  public:
5197   HighResolutionInterval(jlong ms) {
5198     resolution = ms % 10L;
5199     if (resolution != 0) {
5200       MMRESULT result = timeBeginPeriod(1L);
5201     }
5202   }
5203   ~HighResolutionInterval() {
5204     if (resolution != 0) {
5205       MMRESULT result = timeEndPeriod(1L);
5206     }
5207     resolution = 0L;
5208   }
5209 };
5210 
5211 // An Event wraps a win32 "CreateEvent" kernel handle.
5212 //
5213 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5214 //
5215 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5216 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5217 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5218 //     In addition, an unpark() operation might fetch the handle field, but the
5219 //     event could recycle between the fetch and the SetEvent() operation.
5220 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5221 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5222 //     on an stale but recycled handle would be harmless, but in practice this might
5223 //     confuse other non-Sun code, so it's not a viable approach.
5224 //
5225 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5226 //     with the Event.  The event handle is never closed.  This could be construed
5227 //     as handle leakage, but only up to the maximum # of threads that have been extant
5228 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5229 //     permit a process to have hundreds of thousands of open handles.
5230 //
5231 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5232 //     and release unused handles.
5233 //
5234 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5235 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5236 //
5237 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5238 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5239 //
5240 // We use (2).
5241 //
5242 // TODO-FIXME:
5243 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5244 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5245 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5246 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5247 //     into a single win32 CreateEvent() handle.
5248 //
5249 // Assumption:
5250 //    Only one parker can exist on an event, which is why we allocate
5251 //    them per-thread. Multiple unparkers can coexist.
5252 //
5253 // _Event transitions in park()
5254 //   -1 => -1 : illegal
5255 //    1 =>  0 : pass - return immediately
5256 //    0 => -1 : block; then set _Event to 0 before returning
5257 //
5258 // _Event transitions in unpark()
5259 //    0 => 1 : just return
5260 //    1 => 1 : just return
5261 //   -1 => either 0 or 1; must signal target thread
5262 //         That is, we can safely transition _Event from -1 to either
5263 //         0 or 1.
5264 //
5265 // _Event serves as a restricted-range semaphore.
5266 //   -1 : thread is blocked, i.e. there is a waiter
5267 //    0 : neutral: thread is running or ready,
5268 //        could have been signaled after a wait started
5269 //    1 : signaled - thread is running or ready
5270 //
5271 // Another possible encoding of _Event would be with
5272 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5273 //
5274 
5275 int os::PlatformEvent::park(jlong Millis) {
5276   // Transitions for _Event:
5277   //   -1 => -1 : illegal
5278   //    1 =>  0 : pass - return immediately
5279   //    0 => -1 : block; then set _Event to 0 before returning
5280 
5281   guarantee(_ParkHandle != NULL , "Invariant");
5282   guarantee(Millis > 0          , "Invariant");
5283 
5284   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5285   // the initial park() operation.
5286   // Consider: use atomic decrement instead of CAS-loop
5287 
5288   int v;
5289   for (;;) {
5290     v = _Event;
5291     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5292   }
5293   guarantee((v == 0) || (v == 1), "invariant");
5294   if (v != 0) return OS_OK;
5295 
5296   // Do this the hard way by blocking ...
5297   // TODO: consider a brief spin here, gated on the success of recent
5298   // spin attempts by this thread.
5299   //
5300   // We decompose long timeouts into series of shorter timed waits.
5301   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5302   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5303   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5304   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5305   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5306   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5307   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5308   // for the already waited time.  This policy does not admit any new outcomes.
5309   // In the future, however, we might want to track the accumulated wait time and
5310   // adjust Millis accordingly if we encounter a spurious wakeup.
5311 
5312   const int MAXTIMEOUT = 0x10000000;
5313   DWORD rv = WAIT_TIMEOUT;
5314   while (_Event < 0 && Millis > 0) {
5315     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5316     if (Millis > MAXTIMEOUT) {
5317       prd = MAXTIMEOUT;
5318     }
5319     HighResolutionInterval *phri = NULL;
5320     if (!ForceTimeHighResolution) {
5321       phri = new HighResolutionInterval(prd);
5322     }
5323     rv = ::WaitForSingleObject(_ParkHandle, prd);
5324     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5325     if (rv == WAIT_TIMEOUT) {
5326       Millis -= prd;
5327     }
5328     delete phri; // if it is NULL, harmless
5329   }
5330   v = _Event;
5331   _Event = 0;
5332   // see comment at end of os::PlatformEvent::park() below:
5333   OrderAccess::fence();
5334   // If we encounter a nearly simultanous timeout expiry and unpark()
5335   // we return OS_OK indicating we awoke via unpark().
5336   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5337   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5338 }
5339 
5340 void os::PlatformEvent::park() {
5341   // Transitions for _Event:
5342   //   -1 => -1 : illegal
5343   //    1 =>  0 : pass - return immediately
5344   //    0 => -1 : block; then set _Event to 0 before returning
5345 
5346   guarantee(_ParkHandle != NULL, "Invariant");
5347   // Invariant: Only the thread associated with the Event/PlatformEvent
5348   // may call park().
5349   // Consider: use atomic decrement instead of CAS-loop
5350   int v;
5351   for (;;) {
5352     v = _Event;
5353     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5354   }
5355   guarantee((v == 0) || (v == 1), "invariant");
5356   if (v != 0) return;
5357 
5358   // Do this the hard way by blocking ...
5359   // TODO: consider a brief spin here, gated on the success of recent
5360   // spin attempts by this thread.
5361   while (_Event < 0) {
5362     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5363     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5364   }
5365 
5366   // Usually we'll find _Event == 0 at this point, but as
5367   // an optional optimization we clear it, just in case can
5368   // multiple unpark() operations drove _Event up to 1.
5369   _Event = 0;
5370   OrderAccess::fence();
5371   guarantee(_Event >= 0, "invariant");
5372 }
5373 
5374 void os::PlatformEvent::unpark() {
5375   guarantee(_ParkHandle != NULL, "Invariant");
5376 
5377   // Transitions for _Event:
5378   //    0 => 1 : just return
5379   //    1 => 1 : just return
5380   //   -1 => either 0 or 1; must signal target thread
5381   //         That is, we can safely transition _Event from -1 to either
5382   //         0 or 1.
5383   // See also: "Semaphores in Plan 9" by Mullender & Cox
5384   //
5385   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5386   // that it will take two back-to-back park() calls for the owning
5387   // thread to block. This has the benefit of forcing a spurious return
5388   // from the first park() call after an unpark() call which will help
5389   // shake out uses of park() and unpark() without condition variables.
5390 
5391   if (Atomic::xchg(&_Event, 1) >= 0) return;
5392 
5393   ::SetEvent(_ParkHandle);
5394 }
5395 
5396 
5397 // JSR166
5398 // -------------------------------------------------------
5399 
5400 // The Windows implementation of Park is very straightforward: Basic
5401 // operations on Win32 Events turn out to have the right semantics to
5402 // use them directly. We opportunistically resuse the event inherited
5403 // from Monitor.
5404 
5405 void Parker::park(bool isAbsolute, jlong time) {
5406   guarantee(_ParkEvent != NULL, "invariant");
5407   // First, demultiplex/decode time arguments
5408   if (time < 0) { // don't wait
5409     return;
5410   } else if (time == 0 && !isAbsolute) {
5411     time = INFINITE;
5412   } else if (isAbsolute) {
5413     time -= os::javaTimeMillis(); // convert to relative time
5414     if (time <= 0) {  // already elapsed
5415       return;
5416     }
5417   } else { // relative
5418     time /= 1000000;  // Must coarsen from nanos to millis
5419     if (time == 0) {  // Wait for the minimal time unit if zero
5420       time = 1;
5421     }
5422   }
5423 
5424   JavaThread* thread = JavaThread::current();
5425 
5426   // Don't wait if interrupted or already triggered
5427   if (thread->is_interrupted(false) ||
5428       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5429     ResetEvent(_ParkEvent);
5430     return;
5431   } else {
5432     ThreadBlockInVM tbivm(thread);
5433     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5434     thread->set_suspend_equivalent();
5435 
5436     WaitForSingleObject(_ParkEvent, time);
5437     ResetEvent(_ParkEvent);
5438 
5439     // If externally suspended while waiting, re-suspend
5440     if (thread->handle_special_suspend_equivalent_condition()) {
5441       thread->java_suspend_self();
5442     }
5443   }
5444 }
5445 
5446 void Parker::unpark() {
5447   guarantee(_ParkEvent != NULL, "invariant");
5448   SetEvent(_ParkEvent);
5449 }
5450 
5451 // Platform Monitor implementation
5452 
5453 // Must already be locked
5454 int os::PlatformMonitor::wait(jlong millis) {
5455   assert(millis >= 0, "negative timeout");
5456   int ret = OS_TIMEOUT;
5457   int status = SleepConditionVariableCS(&_cond, &_mutex,
5458                                         millis == 0 ? INFINITE : millis);
5459   if (status != 0) {
5460     ret = OS_OK;
5461   }
5462   #ifndef PRODUCT
5463   else {
5464     DWORD err = GetLastError();
5465     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5466   }
5467   #endif
5468   return ret;
5469 }
5470 
5471 // Run the specified command in a separate process. Return its exit value,
5472 // or -1 on failure (e.g. can't create a new process).
5473 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5474   STARTUPINFO si;
5475   PROCESS_INFORMATION pi;
5476   DWORD exit_code;
5477 
5478   char * cmd_string;
5479   const char * cmd_prefix = "cmd /C ";
5480   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5481   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5482   if (cmd_string == NULL) {
5483     return -1;
5484   }
5485   cmd_string[0] = '\0';
5486   strcat(cmd_string, cmd_prefix);
5487   strcat(cmd_string, cmd);
5488 
5489   // now replace all '\n' with '&'
5490   char * substring = cmd_string;
5491   while ((substring = strchr(substring, '\n')) != NULL) {
5492     substring[0] = '&';
5493     substring++;
5494   }
5495   memset(&si, 0, sizeof(si));
5496   si.cb = sizeof(si);
5497   memset(&pi, 0, sizeof(pi));
5498   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5499                             cmd_string,    // command line
5500                             NULL,   // process security attribute
5501                             NULL,   // thread security attribute
5502                             TRUE,   // inherits system handles
5503                             0,      // no creation flags
5504                             NULL,   // use parent's environment block
5505                             NULL,   // use parent's starting directory
5506                             &si,    // (in) startup information
5507                             &pi);   // (out) process information
5508 
5509   if (rslt) {
5510     // Wait until child process exits.
5511     WaitForSingleObject(pi.hProcess, INFINITE);
5512 
5513     GetExitCodeProcess(pi.hProcess, &exit_code);
5514 
5515     // Close process and thread handles.
5516     CloseHandle(pi.hProcess);
5517     CloseHandle(pi.hThread);
5518   } else {
5519     exit_code = -1;
5520   }
5521 
5522   FREE_C_HEAP_ARRAY(char, cmd_string);
5523   return (int)exit_code;
5524 }
5525 
5526 bool os::find(address addr, outputStream* st) {
5527   int offset = -1;
5528   bool result = false;
5529   char buf[256];
5530   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5531     st->print(PTR_FORMAT " ", addr);
5532     if (strlen(buf) < sizeof(buf) - 1) {
5533       char* p = strrchr(buf, '\\');
5534       if (p) {
5535         st->print("%s", p + 1);
5536       } else {
5537         st->print("%s", buf);
5538       }
5539     } else {
5540         // The library name is probably truncated. Let's omit the library name.
5541         // See also JDK-8147512.
5542     }
5543     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5544       st->print("::%s + 0x%x", buf, offset);
5545     }
5546     st->cr();
5547     result = true;
5548   }
5549   return result;
5550 }
5551 
5552 static jint initSock() {
5553   WSADATA wsadata;
5554 
5555   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5556     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5557                 ::GetLastError());
5558     return JNI_ERR;
5559   }
5560   return JNI_OK;
5561 }
5562 
5563 struct hostent* os::get_host_by_name(char* name) {
5564   return (struct hostent*)gethostbyname(name);
5565 }
5566 
5567 int os::socket_close(int fd) {
5568   return ::closesocket(fd);
5569 }
5570 
5571 int os::socket(int domain, int type, int protocol) {
5572   return ::socket(domain, type, protocol);
5573 }
5574 
5575 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5576   return ::connect(fd, him, len);
5577 }
5578 
5579 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5580   return ::recv(fd, buf, (int)nBytes, flags);
5581 }
5582 
5583 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5584   return ::send(fd, buf, (int)nBytes, flags);
5585 }
5586 
5587 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5588   return ::send(fd, buf, (int)nBytes, flags);
5589 }
5590 
5591 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5592 #if defined(IA32)
5593   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5594 #elif defined (AMD64)
5595   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5596 #endif
5597 
5598 // returns true if thread could be suspended,
5599 // false otherwise
5600 static bool do_suspend(HANDLE* h) {
5601   if (h != NULL) {
5602     if (SuspendThread(*h) != ~0) {
5603       return true;
5604     }
5605   }
5606   return false;
5607 }
5608 
5609 // resume the thread
5610 // calling resume on an active thread is a no-op
5611 static void do_resume(HANDLE* h) {
5612   if (h != NULL) {
5613     ResumeThread(*h);
5614   }
5615 }
5616 
5617 // retrieve a suspend/resume context capable handle
5618 // from the tid. Caller validates handle return value.
5619 void get_thread_handle_for_extended_context(HANDLE* h,
5620                                             OSThread::thread_id_t tid) {
5621   if (h != NULL) {
5622     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5623   }
5624 }
5625 
5626 // Thread sampling implementation
5627 //
5628 void os::SuspendedThreadTask::internal_do_task() {
5629   CONTEXT    ctxt;
5630   HANDLE     h = NULL;
5631 
5632   // get context capable handle for thread
5633   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5634 
5635   // sanity
5636   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5637     return;
5638   }
5639 
5640   // suspend the thread
5641   if (do_suspend(&h)) {
5642     ctxt.ContextFlags = sampling_context_flags;
5643     // get thread context
5644     GetThreadContext(h, &ctxt);
5645     SuspendedThreadTaskContext context(_thread, &ctxt);
5646     // pass context to Thread Sampling impl
5647     do_task(context);
5648     // resume thread
5649     do_resume(&h);
5650   }
5651 
5652   // close handle
5653   CloseHandle(h);
5654 }
5655 
5656 bool os::start_debugging(char *buf, int buflen) {
5657   int len = (int)strlen(buf);
5658   char *p = &buf[len];
5659 
5660   jio_snprintf(p, buflen-len,
5661              "\n\n"
5662              "Do you want to debug the problem?\n\n"
5663              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5664              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5665              "Otherwise, select 'No' to abort...",
5666              os::current_process_id(), os::current_thread_id());
5667 
5668   bool yes = os::message_box("Unexpected Error", buf);
5669 
5670   if (yes) {
5671     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5672     // exception. If VM is running inside a debugger, the debugger will
5673     // catch the exception. Otherwise, the breakpoint exception will reach
5674     // the default windows exception handler, which can spawn a debugger and
5675     // automatically attach to the dying VM.
5676     os::breakpoint();
5677     yes = false;
5678   }
5679   return yes;
5680 }
5681 
5682 void* os::get_default_process_handle() {
5683   return (void*)GetModuleHandle(NULL);
5684 }
5685 
5686 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5687 // which is used to find statically linked in agents.
5688 // Additionally for windows, takes into account __stdcall names.
5689 // Parameters:
5690 //            sym_name: Symbol in library we are looking for
5691 //            lib_name: Name of library to look in, NULL for shared libs.
5692 //            is_absolute_path == true if lib_name is absolute path to agent
5693 //                                     such as "C:/a/b/L.dll"
5694 //            == false if only the base name of the library is passed in
5695 //               such as "L"
5696 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5697                                     bool is_absolute_path) {
5698   char *agent_entry_name;
5699   size_t len;
5700   size_t name_len;
5701   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5702   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5703   const char *start;
5704 
5705   if (lib_name != NULL) {
5706     len = name_len = strlen(lib_name);
5707     if (is_absolute_path) {
5708       // Need to strip path, prefix and suffix
5709       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5710         lib_name = ++start;
5711       } else {
5712         // Need to check for drive prefix
5713         if ((start = strchr(lib_name, ':')) != NULL) {
5714           lib_name = ++start;
5715         }
5716       }
5717       if (len <= (prefix_len + suffix_len)) {
5718         return NULL;
5719       }
5720       lib_name += prefix_len;
5721       name_len = strlen(lib_name) - suffix_len;
5722     }
5723   }
5724   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5725   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5726   if (agent_entry_name == NULL) {
5727     return NULL;
5728   }
5729   if (lib_name != NULL) {
5730     const char *p = strrchr(sym_name, '@');
5731     if (p != NULL && p != sym_name) {
5732       // sym_name == _Agent_OnLoad@XX
5733       strncpy(agent_entry_name, sym_name, (p - sym_name));
5734       agent_entry_name[(p-sym_name)] = '\0';
5735       // agent_entry_name == _Agent_OnLoad
5736       strcat(agent_entry_name, "_");
5737       strncat(agent_entry_name, lib_name, name_len);
5738       strcat(agent_entry_name, p);
5739       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5740     } else {
5741       strcpy(agent_entry_name, sym_name);
5742       strcat(agent_entry_name, "_");
5743       strncat(agent_entry_name, lib_name, name_len);
5744     }
5745   } else {
5746     strcpy(agent_entry_name, sym_name);
5747   }
5748   return agent_entry_name;
5749 }
5750 
5751 #ifndef PRODUCT
5752 
5753 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5754 // contiguous memory block at a particular address.
5755 // The test first tries to find a good approximate address to allocate at by using the same
5756 // method to allocate some memory at any address. The test then tries to allocate memory in
5757 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5758 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5759 // the previously allocated memory is available for allocation. The only actual failure
5760 // that is reported is when the test tries to allocate at a particular location but gets a
5761 // different valid one. A NULL return value at this point is not considered an error but may
5762 // be legitimate.
5763 void TestReserveMemorySpecial_test() {
5764   if (!UseLargePages) {
5765     return;
5766   }
5767   // save current value of globals
5768   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5769   bool old_use_numa_interleaving = UseNUMAInterleaving;
5770 
5771   // set globals to make sure we hit the correct code path
5772   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5773 
5774   // do an allocation at an address selected by the OS to get a good one.
5775   const size_t large_allocation_size = os::large_page_size() * 4;
5776   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5777   if (result == NULL) {
5778   } else {
5779     os::release_memory_special(result, large_allocation_size);
5780 
5781     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5782     // we managed to get it once.
5783     const size_t expected_allocation_size = os::large_page_size();
5784     char* expected_location = result + os::large_page_size();
5785     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5786     if (actual_location == NULL) {
5787     } else {
5788       // release memory
5789       os::release_memory_special(actual_location, expected_allocation_size);
5790       // only now check, after releasing any memory to avoid any leaks.
5791       assert(actual_location == expected_location,
5792              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5793              expected_location, expected_allocation_size, actual_location);
5794     }
5795   }
5796 
5797   // restore globals
5798   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5799   UseNUMAInterleaving = old_use_numa_interleaving;
5800 }
5801 #endif // PRODUCT
5802 
5803 /*
5804   All the defined signal names for Windows.
5805 
5806   NOTE that not all of these names are accepted by FindSignal!
5807 
5808   For various reasons some of these may be rejected at runtime.
5809 
5810   Here are the names currently accepted by a user of sun.misc.Signal with
5811   1.4.1 (ignoring potential interaction with use of chaining, etc):
5812 
5813      (LIST TBD)
5814 
5815 */
5816 int os::get_signal_number(const char* name) {
5817   static const struct {
5818     const char* name;
5819     int         number;
5820   } siglabels [] =
5821     // derived from version 6.0 VC98/include/signal.h
5822   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5823   "FPE",        SIGFPE,         // floating point exception
5824   "SEGV",       SIGSEGV,        // segment violation
5825   "INT",        SIGINT,         // interrupt
5826   "TERM",       SIGTERM,        // software term signal from kill
5827   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5828   "ILL",        SIGILL};        // illegal instruction
5829   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5830     if (strcmp(name, siglabels[i].name) == 0) {
5831       return siglabels[i].number;
5832     }
5833   }
5834   return -1;
5835 }
5836 
5837 // Fast current thread access
5838 
5839 int os::win32::_thread_ptr_offset = 0;
5840 
5841 static void call_wrapper_dummy() {}
5842 
5843 // We need to call the os_exception_wrapper once so that it sets
5844 // up the offset from FS of the thread pointer.
5845 void os::win32::initialize_thread_ptr_offset() {
5846   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5847                            NULL, methodHandle(), NULL, NULL);
5848 }
5849 
5850 bool os::supports_map_sync() {
5851   return false;
5852 }