1 /*
   2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/allocation.inline.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/globals.hpp"
  50 #include "runtime/interfaceSupport.inline.hpp"
  51 #include "runtime/java.hpp"
  52 #include "runtime/javaCalls.hpp"
  53 #include "runtime/mutexLocker.hpp"
  54 #include "runtime/objectMonitor.hpp"
  55 #include "runtime/orderAccess.hpp"
  56 #include "runtime/osThread.hpp"
  57 #include "runtime/perfMemory.hpp"
  58 #include "runtime/safepointMechanism.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "services/attachListener.hpp"
  67 #include "services/memTracker.hpp"
  68 #include "services/runtimeService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/macros.hpp"
  74 #include "utilities/vmError.hpp"
  75 #include "symbolengine.hpp"
  76 #include "windbghelp.hpp"
  77 
  78 #ifdef _DEBUG
  79 #include <crtdbg.h>
  80 #endif
  81 
  82 #include <windows.h>
  83 #include <sys/types.h>
  84 #include <sys/stat.h>
  85 #include <sys/timeb.h>
  86 #include <objidl.h>
  87 #include <shlobj.h>
  88 
  89 #include <malloc.h>
  90 #include <signal.h>
  91 #include <direct.h>
  92 #include <errno.h>
  93 #include <fcntl.h>
  94 #include <io.h>
  95 #include <process.h>              // For _beginthreadex(), _endthreadex()
  96 #include <imagehlp.h>             // For os::dll_address_to_function_name
  97 // for enumerating dll libraries
  98 #include <vdmdbg.h>
  99 #include <psapi.h>
 100 #include <mmsystem.h>
 101 #include <winsock2.h>
 102 
 103 // for timer info max values which include all bits
 104 #define ALL_64_BITS CONST64(-1)
 105 
 106 // For DLL loading/load error detection
 107 // Values of PE COFF
 108 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 109 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 110 
 111 static HANDLE main_process;
 112 static HANDLE main_thread;
 113 static int    main_thread_id;
 114 
 115 static FILETIME process_creation_time;
 116 static FILETIME process_exit_time;
 117 static FILETIME process_user_time;
 118 static FILETIME process_kernel_time;
 119 
 120 #ifdef _M_AMD64
 121   #define __CPU__ amd64
 122 #else
 123   #define __CPU__ i486
 124 #endif
 125 
 126 #if INCLUDE_AOT
 127 PVOID  topLevelVectoredExceptionHandler = NULL;
 128 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 129 #endif
 130 
 131 // save DLL module handle, used by GetModuleFileName
 132 
 133 HINSTANCE vm_lib_handle;
 134 
 135 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 136   switch (reason) {
 137   case DLL_PROCESS_ATTACH:
 138     vm_lib_handle = hinst;
 139     if (ForceTimeHighResolution) {
 140       timeBeginPeriod(1L);
 141     }
 142     WindowsDbgHelp::pre_initialize();
 143     SymbolEngine::pre_initialize();
 144     break;
 145   case DLL_PROCESS_DETACH:
 146     if (ForceTimeHighResolution) {
 147       timeEndPeriod(1L);
 148     }
 149 #if INCLUDE_AOT
 150     if (topLevelVectoredExceptionHandler != NULL) {
 151       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 152       topLevelVectoredExceptionHandler = NULL;
 153     }
 154 #endif
 155     break;
 156   default:
 157     break;
 158   }
 159   return true;
 160 }
 161 
 162 static inline double fileTimeAsDouble(FILETIME* time) {
 163   const double high  = (double) ((unsigned int) ~0);
 164   const double split = 10000000.0;
 165   double result = (time->dwLowDateTime / split) +
 166                    time->dwHighDateTime * (high/split);
 167   return result;
 168 }
 169 
 170 // Implementation of os
 171 
 172 bool os::unsetenv(const char* name) {
 173   assert(name != NULL, "Null pointer");
 174   return (SetEnvironmentVariable(name, NULL) == TRUE);
 175 }
 176 
 177 // No setuid programs under Windows.
 178 bool os::have_special_privileges() {
 179   return false;
 180 }
 181 
 182 
 183 // This method is  a periodic task to check for misbehaving JNI applications
 184 // under CheckJNI, we can add any periodic checks here.
 185 // For Windows at the moment does nothing
 186 void os::run_periodic_checks() {
 187   return;
 188 }
 189 
 190 // previous UnhandledExceptionFilter, if there is one
 191 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 192 
 193 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 194 
 195 void os::init_system_properties_values() {
 196   // sysclasspath, java_home, dll_dir
 197   {
 198     char *home_path;
 199     char *dll_path;
 200     char *pslash;
 201     const char *bin = "\\bin";
 202     char home_dir[MAX_PATH + 1];
 203     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 204 
 205     if (alt_home_dir != NULL)  {
 206       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 207       home_dir[MAX_PATH] = '\0';
 208     } else {
 209       os::jvm_path(home_dir, sizeof(home_dir));
 210       // Found the full path to jvm.dll.
 211       // Now cut the path to <java_home>/jre if we can.
 212       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 213       pslash = strrchr(home_dir, '\\');
 214       if (pslash != NULL) {
 215         *pslash = '\0';                   // get rid of \{client|server}
 216         pslash = strrchr(home_dir, '\\');
 217         if (pslash != NULL) {
 218           *pslash = '\0';                 // get rid of \bin
 219         }
 220       }
 221     }
 222 
 223     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 224     strcpy(home_path, home_dir);
 225     Arguments::set_java_home(home_path);
 226     FREE_C_HEAP_ARRAY(char, home_path);
 227 
 228     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 229                                 mtInternal);
 230     strcpy(dll_path, home_dir);
 231     strcat(dll_path, bin);
 232     Arguments::set_dll_dir(dll_path);
 233     FREE_C_HEAP_ARRAY(char, dll_path);
 234 
 235     if (!set_boot_path('\\', ';')) {
 236       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 237     }
 238   }
 239 
 240 // library_path
 241 #define EXT_DIR "\\lib\\ext"
 242 #define BIN_DIR "\\bin"
 243 #define PACKAGE_DIR "\\Sun\\Java"
 244   {
 245     // Win32 library search order (See the documentation for LoadLibrary):
 246     //
 247     // 1. The directory from which application is loaded.
 248     // 2. The system wide Java Extensions directory (Java only)
 249     // 3. System directory (GetSystemDirectory)
 250     // 4. Windows directory (GetWindowsDirectory)
 251     // 5. The PATH environment variable
 252     // 6. The current directory
 253 
 254     char *library_path;
 255     char tmp[MAX_PATH];
 256     char *path_str = ::getenv("PATH");
 257 
 258     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 259                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 260 
 261     library_path[0] = '\0';
 262 
 263     GetModuleFileName(NULL, tmp, sizeof(tmp));
 264     *(strrchr(tmp, '\\')) = '\0';
 265     strcat(library_path, tmp);
 266 
 267     GetWindowsDirectory(tmp, sizeof(tmp));
 268     strcat(library_path, ";");
 269     strcat(library_path, tmp);
 270     strcat(library_path, PACKAGE_DIR BIN_DIR);
 271 
 272     GetSystemDirectory(tmp, sizeof(tmp));
 273     strcat(library_path, ";");
 274     strcat(library_path, tmp);
 275 
 276     GetWindowsDirectory(tmp, sizeof(tmp));
 277     strcat(library_path, ";");
 278     strcat(library_path, tmp);
 279 
 280     if (path_str) {
 281       strcat(library_path, ";");
 282       strcat(library_path, path_str);
 283     }
 284 
 285     strcat(library_path, ";.");
 286 
 287     Arguments::set_library_path(library_path);
 288     FREE_C_HEAP_ARRAY(char, library_path);
 289   }
 290 
 291   // Default extensions directory
 292   {
 293     char path[MAX_PATH];
 294     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 295     GetWindowsDirectory(path, MAX_PATH);
 296     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 297             path, PACKAGE_DIR, EXT_DIR);
 298     Arguments::set_ext_dirs(buf);
 299   }
 300   #undef EXT_DIR
 301   #undef BIN_DIR
 302   #undef PACKAGE_DIR
 303 
 304 #ifndef _WIN64
 305   // set our UnhandledExceptionFilter and save any previous one
 306   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 307 #endif
 308 
 309   // Done
 310   return;
 311 }
 312 
 313 void os::breakpoint() {
 314   DebugBreak();
 315 }
 316 
 317 // Invoked from the BREAKPOINT Macro
 318 extern "C" void breakpoint() {
 319   os::breakpoint();
 320 }
 321 
 322 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 323 // So far, this method is only used by Native Memory Tracking, which is
 324 // only supported on Windows XP or later.
 325 //
 326 int os::get_native_stack(address* stack, int frames, int toSkip) {
 327   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 328   for (int index = captured; index < frames; index ++) {
 329     stack[index] = NULL;
 330   }
 331   return captured;
 332 }
 333 
 334 
 335 // os::current_stack_base()
 336 //
 337 //   Returns the base of the stack, which is the stack's
 338 //   starting address.  This function must be called
 339 //   while running on the stack of the thread being queried.
 340 
 341 address os::current_stack_base() {
 342   MEMORY_BASIC_INFORMATION minfo;
 343   address stack_bottom;
 344   size_t stack_size;
 345 
 346   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 347   stack_bottom =  (address)minfo.AllocationBase;
 348   stack_size = minfo.RegionSize;
 349 
 350   // Add up the sizes of all the regions with the same
 351   // AllocationBase.
 352   while (1) {
 353     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 354     if (stack_bottom == (address)minfo.AllocationBase) {
 355       stack_size += minfo.RegionSize;
 356     } else {
 357       break;
 358     }
 359   }
 360   return stack_bottom + stack_size;
 361 }
 362 
 363 size_t os::current_stack_size() {
 364   size_t sz;
 365   MEMORY_BASIC_INFORMATION minfo;
 366   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 367   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 368   return sz;
 369 }
 370 
 371 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 372   MEMORY_BASIC_INFORMATION minfo;
 373   committed_start = NULL;
 374   committed_size = 0;
 375   address top = start + size;
 376   const address start_addr = start;
 377   while (start < top) {
 378     VirtualQuery(start, &minfo, sizeof(minfo));
 379     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 380       if (committed_start != NULL) {
 381         break;
 382       }
 383     } else {  // committed
 384       if (committed_start == NULL) {
 385         committed_start = start;
 386       }
 387       size_t offset = start - (address)minfo.BaseAddress;
 388       committed_size += minfo.RegionSize - offset;
 389     }
 390     start = (address)minfo.BaseAddress + minfo.RegionSize;
 391   }
 392 
 393   if (committed_start == NULL) {
 394     assert(committed_size == 0, "Sanity");
 395     return false;
 396   } else {
 397     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 398     // current region may go beyond the limit, trim to the limit
 399     committed_size = MIN2(committed_size, size_t(top - committed_start));
 400     return true;
 401   }
 402 }
 403 
 404 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 405   const struct tm* time_struct_ptr = localtime(clock);
 406   if (time_struct_ptr != NULL) {
 407     *res = *time_struct_ptr;
 408     return res;
 409   }
 410   return NULL;
 411 }
 412 
 413 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 414   const struct tm* time_struct_ptr = gmtime(clock);
 415   if (time_struct_ptr != NULL) {
 416     *res = *time_struct_ptr;
 417     return res;
 418   }
 419   return NULL;
 420 }
 421 
 422 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 423 
 424 // Thread start routine for all newly created threads
 425 static unsigned __stdcall thread_native_entry(Thread* thread) {
 426 
 427   thread->record_stack_base_and_size();
 428 
 429   // Try to randomize the cache line index of hot stack frames.
 430   // This helps when threads of the same stack traces evict each other's
 431   // cache lines. The threads can be either from the same JVM instance, or
 432   // from different JVM instances. The benefit is especially true for
 433   // processors with hyperthreading technology.
 434   static int counter = 0;
 435   int pid = os::current_process_id();
 436   _alloca(((pid ^ counter++) & 7) * 128);
 437 
 438   thread->initialize_thread_current();
 439 
 440   OSThread* osthr = thread->osthread();
 441   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 442 
 443   if (UseNUMA) {
 444     int lgrp_id = os::numa_get_group_id();
 445     if (lgrp_id != -1) {
 446       thread->set_lgrp_id(lgrp_id);
 447     }
 448   }
 449 
 450   // Diagnostic code to investigate JDK-6573254
 451   int res = 30115;  // non-java thread
 452   if (thread->is_Java_thread()) {
 453     res = 20115;    // java thread
 454   }
 455 
 456   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 457 
 458   // Install a win32 structured exception handler around every thread created
 459   // by VM, so VM can generate error dump when an exception occurred in non-
 460   // Java thread (e.g. VM thread).
 461   __try {
 462     thread->call_run();
 463   } __except(topLevelExceptionFilter(
 464                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 465     // Nothing to do.
 466   }
 467 
 468   // Note: at this point the thread object may already have deleted itself.
 469   // Do not dereference it from here on out.
 470 
 471   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 472 
 473   // One less thread is executing
 474   // When the VMThread gets here, the main thread may have already exited
 475   // which frees the CodeHeap containing the Atomic::add code
 476   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 477     Atomic::dec(&os::win32::_os_thread_count);
 478   }
 479 
 480   // Thread must not return from exit_process_or_thread(), but if it does,
 481   // let it proceed to exit normally
 482   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 483 }
 484 
 485 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 486                                   int thread_id) {
 487   // Allocate the OSThread object
 488   OSThread* osthread = new OSThread(NULL, NULL);
 489   if (osthread == NULL) return NULL;
 490 
 491   // Initialize the JDK library's interrupt event.
 492   // This should really be done when OSThread is constructed,
 493   // but there is no way for a constructor to report failure to
 494   // allocate the event.
 495   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 496   if (interrupt_event == NULL) {
 497     delete osthread;
 498     return NULL;
 499   }
 500   osthread->set_interrupt_event(interrupt_event);
 501 
 502   // Store info on the Win32 thread into the OSThread
 503   osthread->set_thread_handle(thread_handle);
 504   osthread->set_thread_id(thread_id);
 505 
 506   if (UseNUMA) {
 507     int lgrp_id = os::numa_get_group_id();
 508     if (lgrp_id != -1) {
 509       thread->set_lgrp_id(lgrp_id);
 510     }
 511   }
 512 
 513   // Initial thread state is INITIALIZED, not SUSPENDED
 514   osthread->set_state(INITIALIZED);
 515 
 516   return osthread;
 517 }
 518 
 519 
 520 bool os::create_attached_thread(JavaThread* thread) {
 521 #ifdef ASSERT
 522   thread->verify_not_published();
 523 #endif
 524   HANDLE thread_h;
 525   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 526                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 527     fatal("DuplicateHandle failed\n");
 528   }
 529   OSThread* osthread = create_os_thread(thread, thread_h,
 530                                         (int)current_thread_id());
 531   if (osthread == NULL) {
 532     return false;
 533   }
 534 
 535   // Initial thread state is RUNNABLE
 536   osthread->set_state(RUNNABLE);
 537 
 538   thread->set_osthread(osthread);
 539 
 540   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 541     os::current_thread_id());
 542 
 543   return true;
 544 }
 545 
 546 bool os::create_main_thread(JavaThread* thread) {
 547 #ifdef ASSERT
 548   thread->verify_not_published();
 549 #endif
 550   if (_starting_thread == NULL) {
 551     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 552     if (_starting_thread == NULL) {
 553       return false;
 554     }
 555   }
 556 
 557   // The primordial thread is runnable from the start)
 558   _starting_thread->set_state(RUNNABLE);
 559 
 560   thread->set_osthread(_starting_thread);
 561   return true;
 562 }
 563 
 564 // Helper function to trace _beginthreadex attributes,
 565 //  similar to os::Posix::describe_pthread_attr()
 566 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 567                                                size_t stacksize, unsigned initflag) {
 568   stringStream ss(buf, buflen);
 569   if (stacksize == 0) {
 570     ss.print("stacksize: default, ");
 571   } else {
 572     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 573   }
 574   ss.print("flags: ");
 575   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 576   #define ALL(X) \
 577     X(CREATE_SUSPENDED) \
 578     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 579   ALL(PRINT_FLAG)
 580   #undef ALL
 581   #undef PRINT_FLAG
 582   return buf;
 583 }
 584 
 585 // Allocate and initialize a new OSThread
 586 bool os::create_thread(Thread* thread, ThreadType thr_type,
 587                        size_t stack_size) {
 588   unsigned thread_id;
 589 
 590   // Allocate the OSThread object
 591   OSThread* osthread = new OSThread(NULL, NULL);
 592   if (osthread == NULL) {
 593     return false;
 594   }
 595 
 596   // Initialize the JDK library's interrupt event.
 597   // This should really be done when OSThread is constructed,
 598   // but there is no way for a constructor to report failure to
 599   // allocate the event.
 600   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 601   if (interrupt_event == NULL) {
 602     delete osthread;
 603     return false;
 604   }
 605   osthread->set_interrupt_event(interrupt_event);
 606   // We don't call set_interrupted(false) as it will trip the assert in there
 607   // as we are not operating on the current thread. We don't need to call it
 608   // because the initial state is already correct.
 609 
 610   thread->set_osthread(osthread);
 611 
 612   if (stack_size == 0) {
 613     switch (thr_type) {
 614     case os::java_thread:
 615       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 616       if (JavaThread::stack_size_at_create() > 0) {
 617         stack_size = JavaThread::stack_size_at_create();
 618       }
 619       break;
 620     case os::compiler_thread:
 621       if (CompilerThreadStackSize > 0) {
 622         stack_size = (size_t)(CompilerThreadStackSize * K);
 623         break;
 624       } // else fall through:
 625         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 626     case os::vm_thread:
 627     case os::pgc_thread:
 628     case os::cgc_thread:
 629     case os::watcher_thread:
 630       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 631       break;
 632     }
 633   }
 634 
 635   // Create the Win32 thread
 636   //
 637   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 638   // does not specify stack size. Instead, it specifies the size of
 639   // initially committed space. The stack size is determined by
 640   // PE header in the executable. If the committed "stack_size" is larger
 641   // than default value in the PE header, the stack is rounded up to the
 642   // nearest multiple of 1MB. For example if the launcher has default
 643   // stack size of 320k, specifying any size less than 320k does not
 644   // affect the actual stack size at all, it only affects the initial
 645   // commitment. On the other hand, specifying 'stack_size' larger than
 646   // default value may cause significant increase in memory usage, because
 647   // not only the stack space will be rounded up to MB, but also the
 648   // entire space is committed upfront.
 649   //
 650   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 651   // for CreateThread() that can treat 'stack_size' as stack size. However we
 652   // are not supposed to call CreateThread() directly according to MSDN
 653   // document because JVM uses C runtime library. The good news is that the
 654   // flag appears to work with _beginthredex() as well.
 655 
 656   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 657   HANDLE thread_handle =
 658     (HANDLE)_beginthreadex(NULL,
 659                            (unsigned)stack_size,
 660                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 661                            thread,
 662                            initflag,
 663                            &thread_id);
 664 
 665   char buf[64];
 666   if (thread_handle != NULL) {
 667     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 668       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 669   } else {
 670     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 671       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 672     // Log some OS information which might explain why creating the thread failed.
 673     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 674     LogStream st(Log(os, thread)::info());
 675     os::print_memory_info(&st);
 676   }
 677 
 678   if (thread_handle == NULL) {
 679     // Need to clean up stuff we've allocated so far
 680     thread->set_osthread(NULL);
 681     delete osthread;
 682     return false;
 683   }
 684 
 685   Atomic::inc(&os::win32::_os_thread_count);
 686 
 687   // Store info on the Win32 thread into the OSThread
 688   osthread->set_thread_handle(thread_handle);
 689   osthread->set_thread_id(thread_id);
 690 
 691   // Initial thread state is INITIALIZED, not SUSPENDED
 692   osthread->set_state(INITIALIZED);
 693 
 694   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 695   return true;
 696 }
 697 
 698 
 699 // Free Win32 resources related to the OSThread
 700 void os::free_thread(OSThread* osthread) {
 701   assert(osthread != NULL, "osthread not set");
 702 
 703   // We are told to free resources of the argument thread,
 704   // but we can only really operate on the current thread.
 705   assert(Thread::current()->osthread() == osthread,
 706          "os::free_thread but not current thread");
 707 
 708   CloseHandle(osthread->thread_handle());
 709   delete osthread;
 710 }
 711 
 712 static jlong first_filetime;
 713 static jlong initial_performance_count;
 714 static jlong performance_frequency;
 715 
 716 
 717 jlong as_long(LARGE_INTEGER x) {
 718   jlong result = 0; // initialization to avoid warning
 719   set_high(&result, x.HighPart);
 720   set_low(&result, x.LowPart);
 721   return result;
 722 }
 723 
 724 
 725 jlong os::elapsed_counter() {
 726   LARGE_INTEGER count;
 727   QueryPerformanceCounter(&count);
 728   return as_long(count) - initial_performance_count;
 729 }
 730 
 731 
 732 jlong os::elapsed_frequency() {
 733   return performance_frequency;
 734 }
 735 
 736 
 737 julong os::available_memory() {
 738   return win32::available_memory();
 739 }
 740 
 741 julong os::win32::available_memory() {
 742   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 743   // value if total memory is larger than 4GB
 744   MEMORYSTATUSEX ms;
 745   ms.dwLength = sizeof(ms);
 746   GlobalMemoryStatusEx(&ms);
 747 
 748   return (julong)ms.ullAvailPhys;
 749 }
 750 
 751 julong os::physical_memory() {
 752   return win32::physical_memory();
 753 }
 754 
 755 bool os::has_allocatable_memory_limit(julong* limit) {
 756   MEMORYSTATUSEX ms;
 757   ms.dwLength = sizeof(ms);
 758   GlobalMemoryStatusEx(&ms);
 759 #ifdef _LP64
 760   *limit = (julong)ms.ullAvailVirtual;
 761   return true;
 762 #else
 763   // Limit to 1400m because of the 2gb address space wall
 764   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 765   return true;
 766 #endif
 767 }
 768 
 769 int os::active_processor_count() {
 770   // User has overridden the number of active processors
 771   if (ActiveProcessorCount > 0) {
 772     log_trace(os)("active_processor_count: "
 773                   "active processor count set by user : %d",
 774                   ActiveProcessorCount);
 775     return ActiveProcessorCount;
 776   }
 777 
 778   DWORD_PTR lpProcessAffinityMask = 0;
 779   DWORD_PTR lpSystemAffinityMask = 0;
 780   int proc_count = processor_count();
 781   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 782       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 783     // Nof active processors is number of bits in process affinity mask
 784     int bitcount = 0;
 785     while (lpProcessAffinityMask != 0) {
 786       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 787       bitcount++;
 788     }
 789     return bitcount;
 790   } else {
 791     return proc_count;
 792   }
 793 }
 794 
 795 uint os::processor_id() {
 796   return (uint)GetCurrentProcessorNumber();
 797 }
 798 
 799 void os::set_native_thread_name(const char *name) {
 800 
 801   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 802   //
 803   // Note that unfortunately this only works if the process
 804   // is already attached to a debugger; debugger must observe
 805   // the exception below to show the correct name.
 806 
 807   // If there is no debugger attached skip raising the exception
 808   if (!IsDebuggerPresent()) {
 809     return;
 810   }
 811 
 812   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 813   struct {
 814     DWORD dwType;     // must be 0x1000
 815     LPCSTR szName;    // pointer to name (in user addr space)
 816     DWORD dwThreadID; // thread ID (-1=caller thread)
 817     DWORD dwFlags;    // reserved for future use, must be zero
 818   } info;
 819 
 820   info.dwType = 0x1000;
 821   info.szName = name;
 822   info.dwThreadID = -1;
 823   info.dwFlags = 0;
 824 
 825   __try {
 826     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 827   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 828 }
 829 
 830 bool os::bind_to_processor(uint processor_id) {
 831   // Not yet implemented.
 832   return false;
 833 }
 834 
 835 void os::win32::initialize_performance_counter() {
 836   LARGE_INTEGER count;
 837   QueryPerformanceFrequency(&count);
 838   performance_frequency = as_long(count);
 839   QueryPerformanceCounter(&count);
 840   initial_performance_count = as_long(count);
 841 }
 842 
 843 
 844 double os::elapsedTime() {
 845   return (double) elapsed_counter() / (double) elapsed_frequency();
 846 }
 847 
 848 
 849 // Windows format:
 850 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 851 // Java format:
 852 //   Java standards require the number of milliseconds since 1/1/1970
 853 
 854 // Constant offset - calculated using offset()
 855 static jlong  _offset   = 116444736000000000;
 856 // Fake time counter for reproducible results when debugging
 857 static jlong  fake_time = 0;
 858 
 859 #ifdef ASSERT
 860 // Just to be safe, recalculate the offset in debug mode
 861 static jlong _calculated_offset = 0;
 862 static int   _has_calculated_offset = 0;
 863 
 864 jlong offset() {
 865   if (_has_calculated_offset) return _calculated_offset;
 866   SYSTEMTIME java_origin;
 867   java_origin.wYear          = 1970;
 868   java_origin.wMonth         = 1;
 869   java_origin.wDayOfWeek     = 0; // ignored
 870   java_origin.wDay           = 1;
 871   java_origin.wHour          = 0;
 872   java_origin.wMinute        = 0;
 873   java_origin.wSecond        = 0;
 874   java_origin.wMilliseconds  = 0;
 875   FILETIME jot;
 876   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 877     fatal("Error = %d\nWindows error", GetLastError());
 878   }
 879   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 880   _has_calculated_offset = 1;
 881   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 882   return _calculated_offset;
 883 }
 884 #else
 885 jlong offset() {
 886   return _offset;
 887 }
 888 #endif
 889 
 890 jlong windows_to_java_time(FILETIME wt) {
 891   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 892   return (a - offset()) / 10000;
 893 }
 894 
 895 // Returns time ticks in (10th of micro seconds)
 896 jlong windows_to_time_ticks(FILETIME wt) {
 897   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 898   return (a - offset());
 899 }
 900 
 901 FILETIME java_to_windows_time(jlong l) {
 902   jlong a = (l * 10000) + offset();
 903   FILETIME result;
 904   result.dwHighDateTime = high(a);
 905   result.dwLowDateTime  = low(a);
 906   return result;
 907 }
 908 
 909 bool os::supports_vtime() { return true; }
 910 
 911 double os::elapsedVTime() {
 912   FILETIME created;
 913   FILETIME exited;
 914   FILETIME kernel;
 915   FILETIME user;
 916   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 917     // the resolution of windows_to_java_time() should be sufficient (ms)
 918     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 919   } else {
 920     return elapsedTime();
 921   }
 922 }
 923 
 924 jlong os::javaTimeMillis() {
 925   FILETIME wt;
 926   GetSystemTimeAsFileTime(&wt);
 927   return windows_to_java_time(wt);
 928 }
 929 
 930 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 931   FILETIME wt;
 932   GetSystemTimeAsFileTime(&wt);
 933   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 934   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 935   seconds = secs;
 936   nanos = jlong(ticks - (secs*10000000)) * 100;
 937 }
 938 
 939 jlong os::javaTimeNanos() {
 940     LARGE_INTEGER current_count;
 941     QueryPerformanceCounter(&current_count);
 942     double current = as_long(current_count);
 943     double freq = performance_frequency;
 944     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 945     return time;
 946 }
 947 
 948 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 949   jlong freq = performance_frequency;
 950   if (freq < NANOSECS_PER_SEC) {
 951     // the performance counter is 64 bits and we will
 952     // be multiplying it -- so no wrap in 64 bits
 953     info_ptr->max_value = ALL_64_BITS;
 954   } else if (freq > NANOSECS_PER_SEC) {
 955     // use the max value the counter can reach to
 956     // determine the max value which could be returned
 957     julong max_counter = (julong)ALL_64_BITS;
 958     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 959   } else {
 960     // the performance counter is 64 bits and we will
 961     // be using it directly -- so no wrap in 64 bits
 962     info_ptr->max_value = ALL_64_BITS;
 963   }
 964 
 965   // using a counter, so no skipping
 966   info_ptr->may_skip_backward = false;
 967   info_ptr->may_skip_forward = false;
 968 
 969   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 970 }
 971 
 972 char* os::local_time_string(char *buf, size_t buflen) {
 973   SYSTEMTIME st;
 974   GetLocalTime(&st);
 975   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 976                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 977   return buf;
 978 }
 979 
 980 bool os::getTimesSecs(double* process_real_time,
 981                       double* process_user_time,
 982                       double* process_system_time) {
 983   HANDLE h_process = GetCurrentProcess();
 984   FILETIME create_time, exit_time, kernel_time, user_time;
 985   BOOL result = GetProcessTimes(h_process,
 986                                 &create_time,
 987                                 &exit_time,
 988                                 &kernel_time,
 989                                 &user_time);
 990   if (result != 0) {
 991     FILETIME wt;
 992     GetSystemTimeAsFileTime(&wt);
 993     jlong rtc_millis = windows_to_java_time(wt);
 994     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 995     *process_user_time =
 996       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 997     *process_system_time =
 998       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
 999     return true;
1000   } else {
1001     return false;
1002   }
1003 }
1004 
1005 void os::shutdown() {
1006   // allow PerfMemory to attempt cleanup of any persistent resources
1007   perfMemory_exit();
1008 
1009   // flush buffered output, finish log files
1010   ostream_abort();
1011 
1012   // Check for abort hook
1013   abort_hook_t abort_hook = Arguments::abort_hook();
1014   if (abort_hook != NULL) {
1015     abort_hook();
1016   }
1017 }
1018 
1019 
1020 static HANDLE dumpFile = NULL;
1021 
1022 // Check if dump file can be created.
1023 void os::check_dump_limit(char* buffer, size_t buffsz) {
1024   bool status = true;
1025   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1026     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1027     status = false;
1028   }
1029 
1030 #ifndef ASSERT
1031   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1032     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1033     status = false;
1034   }
1035 #endif
1036 
1037   if (status) {
1038     const char* cwd = get_current_directory(NULL, 0);
1039     int pid = current_process_id();
1040     if (cwd != NULL) {
1041       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1042     } else {
1043       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1044     }
1045 
1046     if (dumpFile == NULL &&
1047        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1048                  == INVALID_HANDLE_VALUE) {
1049       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1050       status = false;
1051     }
1052   }
1053   VMError::record_coredump_status(buffer, status);
1054 }
1055 
1056 void os::abort(bool dump_core, void* siginfo, const void* context) {
1057   EXCEPTION_POINTERS ep;
1058   MINIDUMP_EXCEPTION_INFORMATION mei;
1059   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1060 
1061   HANDLE hProcess = GetCurrentProcess();
1062   DWORD processId = GetCurrentProcessId();
1063   MINIDUMP_TYPE dumpType;
1064 
1065   shutdown();
1066   if (!dump_core || dumpFile == NULL) {
1067     if (dumpFile != NULL) {
1068       CloseHandle(dumpFile);
1069     }
1070     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1071   }
1072 
1073   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1074     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1075 
1076   if (siginfo != NULL && context != NULL) {
1077     ep.ContextRecord = (PCONTEXT) context;
1078     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1079 
1080     mei.ThreadId = GetCurrentThreadId();
1081     mei.ExceptionPointers = &ep;
1082     pmei = &mei;
1083   } else {
1084     pmei = NULL;
1085   }
1086 
1087   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1088   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1089   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1090       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1091     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1092   }
1093   CloseHandle(dumpFile);
1094   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1095 }
1096 
1097 // Die immediately, no exit hook, no abort hook, no cleanup.
1098 void os::die() {
1099   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1100 }
1101 
1102 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1103 //  * dirent_md.c       1.15 00/02/02
1104 //
1105 // The declarations for DIR and struct dirent are in jvm_win32.h.
1106 
1107 // Caller must have already run dirname through JVM_NativePath, which removes
1108 // duplicate slashes and converts all instances of '/' into '\\'.
1109 
1110 DIR * os::opendir(const char *dirname) {
1111   assert(dirname != NULL, "just checking");   // hotspot change
1112   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1113   DWORD fattr;                                // hotspot change
1114   char alt_dirname[4] = { 0, 0, 0, 0 };
1115 
1116   if (dirp == 0) {
1117     errno = ENOMEM;
1118     return 0;
1119   }
1120 
1121   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1122   // as a directory in FindFirstFile().  We detect this case here and
1123   // prepend the current drive name.
1124   //
1125   if (dirname[1] == '\0' && dirname[0] == '\\') {
1126     alt_dirname[0] = _getdrive() + 'A' - 1;
1127     alt_dirname[1] = ':';
1128     alt_dirname[2] = '\\';
1129     alt_dirname[3] = '\0';
1130     dirname = alt_dirname;
1131   }
1132 
1133   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1134   if (dirp->path == 0) {
1135     free(dirp);
1136     errno = ENOMEM;
1137     return 0;
1138   }
1139   strcpy(dirp->path, dirname);
1140 
1141   fattr = GetFileAttributes(dirp->path);
1142   if (fattr == 0xffffffff) {
1143     free(dirp->path);
1144     free(dirp);
1145     errno = ENOENT;
1146     return 0;
1147   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1148     free(dirp->path);
1149     free(dirp);
1150     errno = ENOTDIR;
1151     return 0;
1152   }
1153 
1154   // Append "*.*", or possibly "\\*.*", to path
1155   if (dirp->path[1] == ':' &&
1156       (dirp->path[2] == '\0' ||
1157       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1158     // No '\\' needed for cases like "Z:" or "Z:\"
1159     strcat(dirp->path, "*.*");
1160   } else {
1161     strcat(dirp->path, "\\*.*");
1162   }
1163 
1164   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1165   if (dirp->handle == INVALID_HANDLE_VALUE) {
1166     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1167       free(dirp->path);
1168       free(dirp);
1169       errno = EACCES;
1170       return 0;
1171     }
1172   }
1173   return dirp;
1174 }
1175 
1176 struct dirent * os::readdir(DIR *dirp) {
1177   assert(dirp != NULL, "just checking");      // hotspot change
1178   if (dirp->handle == INVALID_HANDLE_VALUE) {
1179     return NULL;
1180   }
1181 
1182   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1183 
1184   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1185     if (GetLastError() == ERROR_INVALID_HANDLE) {
1186       errno = EBADF;
1187       return NULL;
1188     }
1189     FindClose(dirp->handle);
1190     dirp->handle = INVALID_HANDLE_VALUE;
1191   }
1192 
1193   return &dirp->dirent;
1194 }
1195 
1196 int os::closedir(DIR *dirp) {
1197   assert(dirp != NULL, "just checking");      // hotspot change
1198   if (dirp->handle != INVALID_HANDLE_VALUE) {
1199     if (!FindClose(dirp->handle)) {
1200       errno = EBADF;
1201       return -1;
1202     }
1203     dirp->handle = INVALID_HANDLE_VALUE;
1204   }
1205   free(dirp->path);
1206   free(dirp);
1207   return 0;
1208 }
1209 
1210 // This must be hard coded because it's the system's temporary
1211 // directory not the java application's temp directory, ala java.io.tmpdir.
1212 const char* os::get_temp_directory() {
1213   static char path_buf[MAX_PATH];
1214   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1215     return path_buf;
1216   } else {
1217     path_buf[0] = '\0';
1218     return path_buf;
1219   }
1220 }
1221 
1222 // Needs to be in os specific directory because windows requires another
1223 // header file <direct.h>
1224 const char* os::get_current_directory(char *buf, size_t buflen) {
1225   int n = static_cast<int>(buflen);
1226   if (buflen > INT_MAX)  n = INT_MAX;
1227   return _getcwd(buf, n);
1228 }
1229 
1230 //-----------------------------------------------------------
1231 // Helper functions for fatal error handler
1232 #ifdef _WIN64
1233 // Helper routine which returns true if address in
1234 // within the NTDLL address space.
1235 //
1236 static bool _addr_in_ntdll(address addr) {
1237   HMODULE hmod;
1238   MODULEINFO minfo;
1239 
1240   hmod = GetModuleHandle("NTDLL.DLL");
1241   if (hmod == NULL) return false;
1242   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1243                                           &minfo, sizeof(MODULEINFO))) {
1244     return false;
1245   }
1246 
1247   if ((addr >= minfo.lpBaseOfDll) &&
1248       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1249     return true;
1250   } else {
1251     return false;
1252   }
1253 }
1254 #endif
1255 
1256 struct _modinfo {
1257   address addr;
1258   char*   full_path;   // point to a char buffer
1259   int     buflen;      // size of the buffer
1260   address base_addr;
1261 };
1262 
1263 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1264                                   address top_address, void * param) {
1265   struct _modinfo *pmod = (struct _modinfo *)param;
1266   if (!pmod) return -1;
1267 
1268   if (base_addr   <= pmod->addr &&
1269       top_address > pmod->addr) {
1270     // if a buffer is provided, copy path name to the buffer
1271     if (pmod->full_path) {
1272       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1273     }
1274     pmod->base_addr = base_addr;
1275     return 1;
1276   }
1277   return 0;
1278 }
1279 
1280 bool os::dll_address_to_library_name(address addr, char* buf,
1281                                      int buflen, int* offset) {
1282   // buf is not optional, but offset is optional
1283   assert(buf != NULL, "sanity check");
1284 
1285 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1286 //       return the full path to the DLL file, sometimes it returns path
1287 //       to the corresponding PDB file (debug info); sometimes it only
1288 //       returns partial path, which makes life painful.
1289 
1290   struct _modinfo mi;
1291   mi.addr      = addr;
1292   mi.full_path = buf;
1293   mi.buflen    = buflen;
1294   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1295     // buf already contains path name
1296     if (offset) *offset = addr - mi.base_addr;
1297     return true;
1298   }
1299 
1300   buf[0] = '\0';
1301   if (offset) *offset = -1;
1302   return false;
1303 }
1304 
1305 bool os::dll_address_to_function_name(address addr, char *buf,
1306                                       int buflen, int *offset,
1307                                       bool demangle) {
1308   // buf is not optional, but offset is optional
1309   assert(buf != NULL, "sanity check");
1310 
1311   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1312     return true;
1313   }
1314   if (offset != NULL)  *offset  = -1;
1315   buf[0] = '\0';
1316   return false;
1317 }
1318 
1319 // save the start and end address of jvm.dll into param[0] and param[1]
1320 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1321                            address top_address, void * param) {
1322   if (!param) return -1;
1323 
1324   if (base_addr   <= (address)_locate_jvm_dll &&
1325       top_address > (address)_locate_jvm_dll) {
1326     ((address*)param)[0] = base_addr;
1327     ((address*)param)[1] = top_address;
1328     return 1;
1329   }
1330   return 0;
1331 }
1332 
1333 address vm_lib_location[2];    // start and end address of jvm.dll
1334 
1335 // check if addr is inside jvm.dll
1336 bool os::address_is_in_vm(address addr) {
1337   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1338     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1339       assert(false, "Can't find jvm module.");
1340       return false;
1341     }
1342   }
1343 
1344   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1345 }
1346 
1347 // print module info; param is outputStream*
1348 static int _print_module(const char* fname, address base_address,
1349                          address top_address, void* param) {
1350   if (!param) return -1;
1351 
1352   outputStream* st = (outputStream*)param;
1353 
1354   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1355   return 0;
1356 }
1357 
1358 // Loads .dll/.so and
1359 // in case of error it checks if .dll/.so was built for the
1360 // same architecture as Hotspot is running on
1361 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1362   log_info(os)("attempting shared library load of %s", name);
1363 
1364   void * result = LoadLibrary(name);
1365   if (result != NULL) {
1366     Events::log(NULL, "Loaded shared library %s", name);
1367     // Recalculate pdb search path if a DLL was loaded successfully.
1368     SymbolEngine::recalc_search_path();
1369     log_info(os)("shared library load of %s was successful", name);
1370     return result;
1371   }
1372   DWORD errcode = GetLastError();
1373   // Read system error message into ebuf
1374   // It may or may not be overwritten below (in the for loop and just above)
1375   lasterror(ebuf, (size_t) ebuflen);
1376   ebuf[ebuflen - 1] = '\0';
1377   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1378   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1379 
1380   if (errcode == ERROR_MOD_NOT_FOUND) {
1381     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1382     ebuf[ebuflen - 1] = '\0';
1383     return NULL;
1384   }
1385 
1386   // Parsing dll below
1387   // If we can read dll-info and find that dll was built
1388   // for an architecture other than Hotspot is running in
1389   // - then print to buffer "DLL was built for a different architecture"
1390   // else call os::lasterror to obtain system error message
1391   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1392   if (fd < 0) {
1393     return NULL;
1394   }
1395 
1396   uint32_t signature_offset;
1397   uint16_t lib_arch = 0;
1398   bool failed_to_get_lib_arch =
1399     ( // Go to position 3c in the dll
1400      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1401      ||
1402      // Read location of signature
1403      (sizeof(signature_offset) !=
1404      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1405      ||
1406      // Go to COFF File Header in dll
1407      // that is located after "signature" (4 bytes long)
1408      (os::seek_to_file_offset(fd,
1409      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1410      ||
1411      // Read field that contains code of architecture
1412      // that dll was built for
1413      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1414     );
1415 
1416   ::close(fd);
1417   if (failed_to_get_lib_arch) {
1418     // file i/o error - report os::lasterror(...) msg
1419     return NULL;
1420   }
1421 
1422   typedef struct {
1423     uint16_t arch_code;
1424     char* arch_name;
1425   } arch_t;
1426 
1427   static const arch_t arch_array[] = {
1428     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1429     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1430   };
1431 #if (defined _M_AMD64)
1432   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1433 #elif (defined _M_IX86)
1434   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1435 #else
1436   #error Method os::dll_load requires that one of following \
1437          is defined :_M_AMD64 or _M_IX86
1438 #endif
1439 
1440 
1441   // Obtain a string for printf operation
1442   // lib_arch_str shall contain string what platform this .dll was built for
1443   // running_arch_str shall string contain what platform Hotspot was built for
1444   char *running_arch_str = NULL, *lib_arch_str = NULL;
1445   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1446     if (lib_arch == arch_array[i].arch_code) {
1447       lib_arch_str = arch_array[i].arch_name;
1448     }
1449     if (running_arch == arch_array[i].arch_code) {
1450       running_arch_str = arch_array[i].arch_name;
1451     }
1452   }
1453 
1454   assert(running_arch_str,
1455          "Didn't find running architecture code in arch_array");
1456 
1457   // If the architecture is right
1458   // but some other error took place - report os::lasterror(...) msg
1459   if (lib_arch == running_arch) {
1460     return NULL;
1461   }
1462 
1463   if (lib_arch_str != NULL) {
1464     ::_snprintf(ebuf, ebuflen - 1,
1465                 "Can't load %s-bit .dll on a %s-bit platform",
1466                 lib_arch_str, running_arch_str);
1467   } else {
1468     // don't know what architecture this dll was build for
1469     ::_snprintf(ebuf, ebuflen - 1,
1470                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1471                 lib_arch, running_arch_str);
1472   }
1473 
1474   return NULL;
1475 }
1476 
1477 void os::print_dll_info(outputStream *st) {
1478   st->print_cr("Dynamic libraries:");
1479   get_loaded_modules_info(_print_module, (void *)st);
1480 }
1481 
1482 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1483   HANDLE   hProcess;
1484 
1485 # define MAX_NUM_MODULES 128
1486   HMODULE     modules[MAX_NUM_MODULES];
1487   static char filename[MAX_PATH];
1488   int         result = 0;
1489 
1490   int pid = os::current_process_id();
1491   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1492                          FALSE, pid);
1493   if (hProcess == NULL) return 0;
1494 
1495   DWORD size_needed;
1496   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1497     CloseHandle(hProcess);
1498     return 0;
1499   }
1500 
1501   // number of modules that are currently loaded
1502   int num_modules = size_needed / sizeof(HMODULE);
1503 
1504   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1505     // Get Full pathname:
1506     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1507       filename[0] = '\0';
1508     }
1509 
1510     MODULEINFO modinfo;
1511     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1512       modinfo.lpBaseOfDll = NULL;
1513       modinfo.SizeOfImage = 0;
1514     }
1515 
1516     // Invoke callback function
1517     result = callback(filename, (address)modinfo.lpBaseOfDll,
1518                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1519     if (result) break;
1520   }
1521 
1522   CloseHandle(hProcess);
1523   return result;
1524 }
1525 
1526 bool os::get_host_name(char* buf, size_t buflen) {
1527   DWORD size = (DWORD)buflen;
1528   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1529 }
1530 
1531 void os::get_summary_os_info(char* buf, size_t buflen) {
1532   stringStream sst(buf, buflen);
1533   os::win32::print_windows_version(&sst);
1534   // chop off newline character
1535   char* nl = strchr(buf, '\n');
1536   if (nl != NULL) *nl = '\0';
1537 }
1538 
1539 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1540 #if _MSC_VER >= 1900
1541   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1542   int result = ::vsnprintf(buf, len, fmt, args);
1543   // If an encoding error occurred (result < 0) then it's not clear
1544   // whether the buffer is NUL terminated, so ensure it is.
1545   if ((result < 0) && (len > 0)) {
1546     buf[len - 1] = '\0';
1547   }
1548   return result;
1549 #else
1550   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1551   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1552   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1553   // go straight to _vscprintf.  The output is going to be truncated in
1554   // that case, except in the unusual case of empty output.  More
1555   // importantly, the documentation for various versions of Visual Studio
1556   // are inconsistent about the behavior of _vsnprintf when len == 0,
1557   // including it possibly being an error.
1558   int result = -1;
1559   if (len > 0) {
1560     result = _vsnprintf(buf, len, fmt, args);
1561     // If output (including NUL terminator) is truncated, the buffer
1562     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1563     if ((result < 0) || ((size_t)result >= len)) {
1564       buf[len - 1] = '\0';
1565     }
1566   }
1567   if (result < 0) {
1568     result = _vscprintf(fmt, args);
1569   }
1570   return result;
1571 #endif // _MSC_VER dispatch
1572 }
1573 
1574 static inline time_t get_mtime(const char* filename) {
1575   struct stat st;
1576   int ret = os::stat(filename, &st);
1577   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1578   return st.st_mtime;
1579 }
1580 
1581 int os::compare_file_modified_times(const char* file1, const char* file2) {
1582   time_t t1 = get_mtime(file1);
1583   time_t t2 = get_mtime(file2);
1584   return t1 - t2;
1585 }
1586 
1587 void os::print_os_info_brief(outputStream* st) {
1588   os::print_os_info(st);
1589 }
1590 
1591 void os::win32::print_uptime_info(outputStream* st) {
1592   unsigned long long ticks = GetTickCount64();
1593   os::print_dhm(st, "OS uptime:", ticks/1000);
1594 }
1595 
1596 void os::print_os_info(outputStream* st) {
1597 #ifdef ASSERT
1598   char buffer[1024];
1599   st->print("HostName: ");
1600   if (get_host_name(buffer, sizeof(buffer))) {
1601     st->print("%s ", buffer);
1602   } else {
1603     st->print("N/A ");
1604   }
1605 #endif
1606   st->print_cr("OS:");
1607   os::win32::print_windows_version(st);
1608 
1609   os::win32::print_uptime_info(st);
1610 
1611 #ifdef _LP64
1612   VM_Version::print_platform_virtualization_info(st);
1613 #endif
1614 }
1615 
1616 void os::win32::print_windows_version(outputStream* st) {
1617   OSVERSIONINFOEX osvi;
1618   VS_FIXEDFILEINFO *file_info;
1619   TCHAR kernel32_path[MAX_PATH];
1620   UINT len, ret;
1621 
1622   // Use the GetVersionEx information to see if we're on a server or
1623   // workstation edition of Windows. Starting with Windows 8.1 we can't
1624   // trust the OS version information returned by this API.
1625   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1626   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1627   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1628     st->print_cr("Call to GetVersionEx failed");
1629     return;
1630   }
1631   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1632 
1633   // Get the full path to \Windows\System32\kernel32.dll and use that for
1634   // determining what version of Windows we're running on.
1635   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1636   ret = GetSystemDirectory(kernel32_path, len);
1637   if (ret == 0 || ret > len) {
1638     st->print_cr("Call to GetSystemDirectory failed");
1639     return;
1640   }
1641   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1642 
1643   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1644   if (version_size == 0) {
1645     st->print_cr("Call to GetFileVersionInfoSize failed");
1646     return;
1647   }
1648 
1649   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1650   if (version_info == NULL) {
1651     st->print_cr("Failed to allocate version_info");
1652     return;
1653   }
1654 
1655   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1656     os::free(version_info);
1657     st->print_cr("Call to GetFileVersionInfo failed");
1658     return;
1659   }
1660 
1661   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1662     os::free(version_info);
1663     st->print_cr("Call to VerQueryValue failed");
1664     return;
1665   }
1666 
1667   int major_version = HIWORD(file_info->dwProductVersionMS);
1668   int minor_version = LOWORD(file_info->dwProductVersionMS);
1669   int build_number = HIWORD(file_info->dwProductVersionLS);
1670   int build_minor = LOWORD(file_info->dwProductVersionLS);
1671   int os_vers = major_version * 1000 + minor_version;
1672   os::free(version_info);
1673 
1674   st->print(" Windows ");
1675   switch (os_vers) {
1676 
1677   case 6000:
1678     if (is_workstation) {
1679       st->print("Vista");
1680     } else {
1681       st->print("Server 2008");
1682     }
1683     break;
1684 
1685   case 6001:
1686     if (is_workstation) {
1687       st->print("7");
1688     } else {
1689       st->print("Server 2008 R2");
1690     }
1691     break;
1692 
1693   case 6002:
1694     if (is_workstation) {
1695       st->print("8");
1696     } else {
1697       st->print("Server 2012");
1698     }
1699     break;
1700 
1701   case 6003:
1702     if (is_workstation) {
1703       st->print("8.1");
1704     } else {
1705       st->print("Server 2012 R2");
1706     }
1707     break;
1708 
1709   case 10000:
1710     if (is_workstation) {
1711       st->print("10");
1712     } else {
1713       // distinguish Windows Server 2016 and 2019 by build number
1714       // Windows server 2019 GA 10/2018 build number is 17763
1715       if (build_number > 17762) {
1716         st->print("Server 2019");
1717       } else {
1718         st->print("Server 2016");
1719       }
1720     }
1721     break;
1722 
1723   default:
1724     // Unrecognized windows, print out its major and minor versions
1725     st->print("%d.%d", major_version, minor_version);
1726     break;
1727   }
1728 
1729   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1730   // find out whether we are running on 64 bit processor or not
1731   SYSTEM_INFO si;
1732   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1733   GetNativeSystemInfo(&si);
1734   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1735     st->print(" , 64 bit");
1736   }
1737 
1738   st->print(" Build %d", build_number);
1739   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1740   st->cr();
1741 }
1742 
1743 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1744   // Nothing to do for now.
1745 }
1746 
1747 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1748   HKEY key;
1749   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1750                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1751   if (status == ERROR_SUCCESS) {
1752     DWORD size = (DWORD)buflen;
1753     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1754     if (status != ERROR_SUCCESS) {
1755         strncpy(buf, "## __CPU__", buflen);
1756     }
1757     RegCloseKey(key);
1758   } else {
1759     // Put generic cpu info to return
1760     strncpy(buf, "## __CPU__", buflen);
1761   }
1762 }
1763 
1764 void os::print_memory_info(outputStream* st) {
1765   st->print("Memory:");
1766   st->print(" %dk page", os::vm_page_size()>>10);
1767 
1768   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1769   // value if total memory is larger than 4GB
1770   MEMORYSTATUSEX ms;
1771   ms.dwLength = sizeof(ms);
1772   int r1 = GlobalMemoryStatusEx(&ms);
1773 
1774   if (r1 != 0) {
1775     st->print(", system-wide physical " INT64_FORMAT "M ",
1776              (int64_t) ms.ullTotalPhys >> 20);
1777     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1778 
1779     st->print("TotalPageFile size " INT64_FORMAT "M ",
1780              (int64_t) ms.ullTotalPageFile >> 20);
1781     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1782              (int64_t) ms.ullAvailPageFile >> 20);
1783 
1784     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1785 #if defined(_M_IX86)
1786     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1787              (int64_t) ms.ullTotalVirtual >> 20);
1788     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1789 #endif
1790   } else {
1791     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1792   }
1793 
1794   // extended memory statistics for a process
1795   PROCESS_MEMORY_COUNTERS_EX pmex;
1796   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1797   pmex.cb = sizeof(pmex);
1798   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1799 
1800   if (r2 != 0) {
1801     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1802              (int64_t) pmex.WorkingSetSize >> 20);
1803     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1804 
1805     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1806              (int64_t) pmex.PrivateUsage >> 20);
1807     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1808   } else {
1809     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1810   }
1811 
1812   st->cr();
1813 }
1814 
1815 bool os::signal_sent_by_kill(const void* siginfo) {
1816   // TODO: Is this possible?
1817   return false;
1818 }
1819 
1820 void os::print_siginfo(outputStream *st, const void* siginfo) {
1821   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1822   st->print("siginfo:");
1823 
1824   char tmp[64];
1825   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1826     strcpy(tmp, "EXCEPTION_??");
1827   }
1828   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1829 
1830   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1831        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1832        er->NumberParameters >= 2) {
1833     switch (er->ExceptionInformation[0]) {
1834     case 0: st->print(", reading address"); break;
1835     case 1: st->print(", writing address"); break;
1836     case 8: st->print(", data execution prevention violation at address"); break;
1837     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1838                        er->ExceptionInformation[0]);
1839     }
1840     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1841   } else {
1842     int num = er->NumberParameters;
1843     if (num > 0) {
1844       st->print(", ExceptionInformation=");
1845       for (int i = 0; i < num; i++) {
1846         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1847       }
1848     }
1849   }
1850   st->cr();
1851 }
1852 
1853 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1854   // TODO: Can we kill thread?
1855   return false;
1856 }
1857 
1858 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1859   // do nothing
1860 }
1861 
1862 static char saved_jvm_path[MAX_PATH] = {0};
1863 
1864 // Find the full path to the current module, jvm.dll
1865 void os::jvm_path(char *buf, jint buflen) {
1866   // Error checking.
1867   if (buflen < MAX_PATH) {
1868     assert(false, "must use a large-enough buffer");
1869     buf[0] = '\0';
1870     return;
1871   }
1872   // Lazy resolve the path to current module.
1873   if (saved_jvm_path[0] != 0) {
1874     strcpy(buf, saved_jvm_path);
1875     return;
1876   }
1877 
1878   buf[0] = '\0';
1879   if (Arguments::sun_java_launcher_is_altjvm()) {
1880     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1881     // for a JAVA_HOME environment variable and fix up the path so it
1882     // looks like jvm.dll is installed there (append a fake suffix
1883     // hotspot/jvm.dll).
1884     char* java_home_var = ::getenv("JAVA_HOME");
1885     if (java_home_var != NULL && java_home_var[0] != 0 &&
1886         strlen(java_home_var) < (size_t)buflen) {
1887       strncpy(buf, java_home_var, buflen);
1888 
1889       // determine if this is a legacy image or modules image
1890       // modules image doesn't have "jre" subdirectory
1891       size_t len = strlen(buf);
1892       char* jrebin_p = buf + len;
1893       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1894       if (0 != _access(buf, 0)) {
1895         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1896       }
1897       len = strlen(buf);
1898       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1899     }
1900   }
1901 
1902   if (buf[0] == '\0') {
1903     GetModuleFileName(vm_lib_handle, buf, buflen);
1904   }
1905   strncpy(saved_jvm_path, buf, MAX_PATH);
1906   saved_jvm_path[MAX_PATH - 1] = '\0';
1907 }
1908 
1909 
1910 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1911 #ifndef _WIN64
1912   st->print("_");
1913 #endif
1914 }
1915 
1916 
1917 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1918 #ifndef _WIN64
1919   st->print("@%d", args_size  * sizeof(int));
1920 #endif
1921 }
1922 
1923 // This method is a copy of JDK's sysGetLastErrorString
1924 // from src/windows/hpi/src/system_md.c
1925 
1926 size_t os::lasterror(char* buf, size_t len) {
1927   DWORD errval;
1928 
1929   if ((errval = GetLastError()) != 0) {
1930     // DOS error
1931     size_t n = (size_t)FormatMessage(
1932                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1933                                      NULL,
1934                                      errval,
1935                                      0,
1936                                      buf,
1937                                      (DWORD)len,
1938                                      NULL);
1939     if (n > 3) {
1940       // Drop final '.', CR, LF
1941       if (buf[n - 1] == '\n') n--;
1942       if (buf[n - 1] == '\r') n--;
1943       if (buf[n - 1] == '.') n--;
1944       buf[n] = '\0';
1945     }
1946     return n;
1947   }
1948 
1949   if (errno != 0) {
1950     // C runtime error that has no corresponding DOS error code
1951     const char* s = os::strerror(errno);
1952     size_t n = strlen(s);
1953     if (n >= len) n = len - 1;
1954     strncpy(buf, s, n);
1955     buf[n] = '\0';
1956     return n;
1957   }
1958 
1959   return 0;
1960 }
1961 
1962 int os::get_last_error() {
1963   DWORD error = GetLastError();
1964   if (error == 0) {
1965     error = errno;
1966   }
1967   return (int)error;
1968 }
1969 
1970 // sun.misc.Signal
1971 // NOTE that this is a workaround for an apparent kernel bug where if
1972 // a signal handler for SIGBREAK is installed then that signal handler
1973 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1974 // See bug 4416763.
1975 static void (*sigbreakHandler)(int) = NULL;
1976 
1977 static void UserHandler(int sig, void *siginfo, void *context) {
1978   os::signal_notify(sig);
1979   // We need to reinstate the signal handler each time...
1980   os::signal(sig, (void*)UserHandler);
1981 }
1982 
1983 void* os::user_handler() {
1984   return (void*) UserHandler;
1985 }
1986 
1987 void* os::signal(int signal_number, void* handler) {
1988   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1989     void (*oldHandler)(int) = sigbreakHandler;
1990     sigbreakHandler = (void (*)(int)) handler;
1991     return (void*) oldHandler;
1992   } else {
1993     return (void*)::signal(signal_number, (void (*)(int))handler);
1994   }
1995 }
1996 
1997 void os::signal_raise(int signal_number) {
1998   raise(signal_number);
1999 }
2000 
2001 // The Win32 C runtime library maps all console control events other than ^C
2002 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2003 // logoff, and shutdown events.  We therefore install our own console handler
2004 // that raises SIGTERM for the latter cases.
2005 //
2006 static BOOL WINAPI consoleHandler(DWORD event) {
2007   switch (event) {
2008   case CTRL_C_EVENT:
2009     if (VMError::is_error_reported()) {
2010       // Ctrl-C is pressed during error reporting, likely because the error
2011       // handler fails to abort. Let VM die immediately.
2012       os::die();
2013     }
2014 
2015     os::signal_raise(SIGINT);
2016     return TRUE;
2017     break;
2018   case CTRL_BREAK_EVENT:
2019     if (sigbreakHandler != NULL) {
2020       (*sigbreakHandler)(SIGBREAK);
2021     }
2022     return TRUE;
2023     break;
2024   case CTRL_LOGOFF_EVENT: {
2025     // Don't terminate JVM if it is running in a non-interactive session,
2026     // such as a service process.
2027     USEROBJECTFLAGS flags;
2028     HANDLE handle = GetProcessWindowStation();
2029     if (handle != NULL &&
2030         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2031         sizeof(USEROBJECTFLAGS), NULL)) {
2032       // If it is a non-interactive session, let next handler to deal
2033       // with it.
2034       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2035         return FALSE;
2036       }
2037     }
2038   }
2039   case CTRL_CLOSE_EVENT:
2040   case CTRL_SHUTDOWN_EVENT:
2041     os::signal_raise(SIGTERM);
2042     return TRUE;
2043     break;
2044   default:
2045     break;
2046   }
2047   return FALSE;
2048 }
2049 
2050 // The following code is moved from os.cpp for making this
2051 // code platform specific, which it is by its very nature.
2052 
2053 // Return maximum OS signal used + 1 for internal use only
2054 // Used as exit signal for signal_thread
2055 int os::sigexitnum_pd() {
2056   return NSIG;
2057 }
2058 
2059 // a counter for each possible signal value, including signal_thread exit signal
2060 static volatile jint pending_signals[NSIG+1] = { 0 };
2061 static Semaphore* sig_sem = NULL;
2062 
2063 static void jdk_misc_signal_init() {
2064   // Initialize signal structures
2065   memset((void*)pending_signals, 0, sizeof(pending_signals));
2066 
2067   // Initialize signal semaphore
2068   sig_sem = new Semaphore();
2069 
2070   // Programs embedding the VM do not want it to attempt to receive
2071   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2072   // shutdown hooks mechanism introduced in 1.3.  For example, when
2073   // the VM is run as part of a Windows NT service (i.e., a servlet
2074   // engine in a web server), the correct behavior is for any console
2075   // control handler to return FALSE, not TRUE, because the OS's
2076   // "final" handler for such events allows the process to continue if
2077   // it is a service (while terminating it if it is not a service).
2078   // To make this behavior uniform and the mechanism simpler, we
2079   // completely disable the VM's usage of these console events if -Xrs
2080   // (=ReduceSignalUsage) is specified.  This means, for example, that
2081   // the CTRL-BREAK thread dump mechanism is also disabled in this
2082   // case.  See bugs 4323062, 4345157, and related bugs.
2083 
2084   // Add a CTRL-C handler
2085   SetConsoleCtrlHandler(consoleHandler, TRUE);
2086 }
2087 
2088 void os::signal_notify(int sig) {
2089   if (sig_sem != NULL) {
2090     Atomic::inc(&pending_signals[sig]);
2091     sig_sem->signal();
2092   } else {
2093     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2094     // initialization isn't called.
2095     assert(ReduceSignalUsage, "signal semaphore should be created");
2096   }
2097 }
2098 
2099 static int check_pending_signals() {
2100   while (true) {
2101     for (int i = 0; i < NSIG + 1; i++) {
2102       jint n = pending_signals[i];
2103       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2104         return i;
2105       }
2106     }
2107     JavaThread *thread = JavaThread::current();
2108 
2109     ThreadBlockInVM tbivm(thread);
2110 
2111     bool threadIsSuspended;
2112     do {
2113       thread->set_suspend_equivalent();
2114       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2115       sig_sem->wait();
2116 
2117       // were we externally suspended while we were waiting?
2118       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2119       if (threadIsSuspended) {
2120         // The semaphore has been incremented, but while we were waiting
2121         // another thread suspended us. We don't want to continue running
2122         // while suspended because that would surprise the thread that
2123         // suspended us.
2124         sig_sem->signal();
2125 
2126         thread->java_suspend_self();
2127       }
2128     } while (threadIsSuspended);
2129   }
2130 }
2131 
2132 int os::signal_wait() {
2133   return check_pending_signals();
2134 }
2135 
2136 // Implicit OS exception handling
2137 
2138 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2139                       address handler) {
2140   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2141   // Save pc in thread
2142 #ifdef _M_AMD64
2143   // Do not blow up if no thread info available.
2144   if (thread) {
2145     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2146   }
2147   // Set pc to handler
2148   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2149 #else
2150   // Do not blow up if no thread info available.
2151   if (thread) {
2152     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2153   }
2154   // Set pc to handler
2155   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2156 #endif
2157 
2158   // Continue the execution
2159   return EXCEPTION_CONTINUE_EXECUTION;
2160 }
2161 
2162 
2163 // Used for PostMortemDump
2164 extern "C" void safepoints();
2165 extern "C" void find(int x);
2166 extern "C" void events();
2167 
2168 // According to Windows API documentation, an illegal instruction sequence should generate
2169 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2170 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2171 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2172 
2173 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2174 
2175 // From "Execution Protection in the Windows Operating System" draft 0.35
2176 // Once a system header becomes available, the "real" define should be
2177 // included or copied here.
2178 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2179 
2180 // Windows Vista/2008 heap corruption check
2181 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2182 
2183 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2184 // C++ compiler contain this error code. Because this is a compiler-generated
2185 // error, the code is not listed in the Win32 API header files.
2186 // The code is actually a cryptic mnemonic device, with the initial "E"
2187 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2188 // ASCII values of "msc".
2189 
2190 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2191 
2192 #define def_excpt(val) { #val, (val) }
2193 
2194 static const struct { const char* name; uint number; } exceptlabels[] = {
2195     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2196     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2197     def_excpt(EXCEPTION_BREAKPOINT),
2198     def_excpt(EXCEPTION_SINGLE_STEP),
2199     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2200     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2201     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2202     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2203     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2204     def_excpt(EXCEPTION_FLT_OVERFLOW),
2205     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2206     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2207     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2208     def_excpt(EXCEPTION_INT_OVERFLOW),
2209     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2210     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2211     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2212     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2213     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2214     def_excpt(EXCEPTION_STACK_OVERFLOW),
2215     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2216     def_excpt(EXCEPTION_GUARD_PAGE),
2217     def_excpt(EXCEPTION_INVALID_HANDLE),
2218     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2219     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2220 };
2221 
2222 #undef def_excpt
2223 
2224 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2225   uint code = static_cast<uint>(exception_code);
2226   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2227     if (exceptlabels[i].number == code) {
2228       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2229       return buf;
2230     }
2231   }
2232 
2233   return NULL;
2234 }
2235 
2236 //-----------------------------------------------------------------------------
2237 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2238   // handle exception caused by idiv; should only happen for -MinInt/-1
2239   // (division by zero is handled explicitly)
2240 #ifdef  _M_AMD64
2241   PCONTEXT ctx = exceptionInfo->ContextRecord;
2242   address pc = (address)ctx->Rip;
2243   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2244   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2245   if (pc[0] == 0xF7) {
2246     // set correct result values and continue after idiv instruction
2247     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2248   } else {
2249     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2250   }
2251   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2252   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2253   // idiv opcode (0xF7).
2254   ctx->Rdx = (DWORD)0;             // remainder
2255   // Continue the execution
2256 #else
2257   PCONTEXT ctx = exceptionInfo->ContextRecord;
2258   address pc = (address)ctx->Eip;
2259   assert(pc[0] == 0xF7, "not an idiv opcode");
2260   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2261   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2262   // set correct result values and continue after idiv instruction
2263   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2264   ctx->Eax = (DWORD)min_jint;      // result
2265   ctx->Edx = (DWORD)0;             // remainder
2266   // Continue the execution
2267 #endif
2268   return EXCEPTION_CONTINUE_EXECUTION;
2269 }
2270 
2271 //-----------------------------------------------------------------------------
2272 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2273   PCONTEXT ctx = exceptionInfo->ContextRecord;
2274 #ifndef  _WIN64
2275   // handle exception caused by native method modifying control word
2276   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2277 
2278   switch (exception_code) {
2279   case EXCEPTION_FLT_DENORMAL_OPERAND:
2280   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2281   case EXCEPTION_FLT_INEXACT_RESULT:
2282   case EXCEPTION_FLT_INVALID_OPERATION:
2283   case EXCEPTION_FLT_OVERFLOW:
2284   case EXCEPTION_FLT_STACK_CHECK:
2285   case EXCEPTION_FLT_UNDERFLOW:
2286     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2287     if (fp_control_word != ctx->FloatSave.ControlWord) {
2288       // Restore FPCW and mask out FLT exceptions
2289       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2290       // Mask out pending FLT exceptions
2291       ctx->FloatSave.StatusWord &=  0xffffff00;
2292       return EXCEPTION_CONTINUE_EXECUTION;
2293     }
2294   }
2295 
2296   if (prev_uef_handler != NULL) {
2297     // We didn't handle this exception so pass it to the previous
2298     // UnhandledExceptionFilter.
2299     return (prev_uef_handler)(exceptionInfo);
2300   }
2301 #else // !_WIN64
2302   // On Windows, the mxcsr control bits are non-volatile across calls
2303   // See also CR 6192333
2304   //
2305   jint MxCsr = INITIAL_MXCSR;
2306   // we can't use StubRoutines::addr_mxcsr_std()
2307   // because in Win64 mxcsr is not saved there
2308   if (MxCsr != ctx->MxCsr) {
2309     ctx->MxCsr = MxCsr;
2310     return EXCEPTION_CONTINUE_EXECUTION;
2311   }
2312 #endif // !_WIN64
2313 
2314   return EXCEPTION_CONTINUE_SEARCH;
2315 }
2316 
2317 static inline void report_error(Thread* t, DWORD exception_code,
2318                                 address addr, void* siginfo, void* context) {
2319   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2320 
2321   // If UseOsErrorReporting, this will return here and save the error file
2322   // somewhere where we can find it in the minidump.
2323 }
2324 
2325 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2326         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2327   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2328   address addr = (address) exceptionRecord->ExceptionInformation[1];
2329   if (Interpreter::contains(pc)) {
2330     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2331     if (!fr->is_first_java_frame()) {
2332       // get_frame_at_stack_banging_point() is only called when we
2333       // have well defined stacks so java_sender() calls do not need
2334       // to assert safe_for_sender() first.
2335       *fr = fr->java_sender();
2336     }
2337   } else {
2338     // more complex code with compiled code
2339     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2340     CodeBlob* cb = CodeCache::find_blob(pc);
2341     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2342       // Not sure where the pc points to, fallback to default
2343       // stack overflow handling
2344       return false;
2345     } else {
2346       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2347       // in compiled code, the stack banging is performed just after the return pc
2348       // has been pushed on the stack
2349       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2350       if (!fr->is_java_frame()) {
2351         // See java_sender() comment above.
2352         *fr = fr->java_sender();
2353       }
2354     }
2355   }
2356   assert(fr->is_java_frame(), "Safety check");
2357   return true;
2358 }
2359 
2360 #if INCLUDE_AOT
2361 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2362   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2363   address addr = (address) exceptionRecord->ExceptionInformation[1];
2364   address pc = (address) exceptionInfo->ContextRecord->Rip;
2365 
2366   // Handle the case where we get an implicit exception in AOT generated
2367   // code.  AOT DLL's loaded are not registered for structured exceptions.
2368   // If the exception occurred in the codeCache or AOT code, pass control
2369   // to our normal exception handler.
2370   CodeBlob* cb = CodeCache::find_blob(pc);
2371   if (cb != NULL) {
2372     return topLevelExceptionFilter(exceptionInfo);
2373   }
2374 
2375   return EXCEPTION_CONTINUE_SEARCH;
2376 }
2377 #endif
2378 
2379 //-----------------------------------------------------------------------------
2380 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2381   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2382   PEXCEPTION_RECORD exception_record = exceptionInfo->ExceptionRecord;
2383   DWORD exception_code = exception_record->ExceptionCode;
2384 #ifdef _M_AMD64
2385   address pc = (address) exceptionInfo->ContextRecord->Rip;
2386 #else
2387   address pc = (address) exceptionInfo->ContextRecord->Eip;
2388 #endif
2389   Thread* t = Thread::current_or_null_safe();
2390 
2391   // Handle SafeFetch32 and SafeFetchN exceptions.
2392   if (StubRoutines::is_safefetch_fault(pc)) {
2393     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2394   }
2395 
2396 #ifndef _WIN64
2397   // Execution protection violation - win32 running on AMD64 only
2398   // Handled first to avoid misdiagnosis as a "normal" access violation;
2399   // This is safe to do because we have a new/unique ExceptionInformation
2400   // code for this condition.
2401   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2402     int exception_subcode = (int) exception_record->ExceptionInformation[0];
2403     address addr = (address) exception_record->ExceptionInformation[1];
2404 
2405     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2406       int page_size = os::vm_page_size();
2407 
2408       // Make sure the pc and the faulting address are sane.
2409       //
2410       // If an instruction spans a page boundary, and the page containing
2411       // the beginning of the instruction is executable but the following
2412       // page is not, the pc and the faulting address might be slightly
2413       // different - we still want to unguard the 2nd page in this case.
2414       //
2415       // 15 bytes seems to be a (very) safe value for max instruction size.
2416       bool pc_is_near_addr =
2417         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2418       bool instr_spans_page_boundary =
2419         (align_down((intptr_t) pc ^ (intptr_t) addr,
2420                          (intptr_t) page_size) > 0);
2421 
2422       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2423         static volatile address last_addr =
2424           (address) os::non_memory_address_word();
2425 
2426         // In conservative mode, don't unguard unless the address is in the VM
2427         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2428             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2429 
2430           // Set memory to RWX and retry
2431           address page_start = align_down(addr, page_size);
2432           bool res = os::protect_memory((char*) page_start, page_size,
2433                                         os::MEM_PROT_RWX);
2434 
2435           log_debug(os)("Execution protection violation "
2436                         "at " INTPTR_FORMAT
2437                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2438                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2439 
2440           // Set last_addr so if we fault again at the same address, we don't
2441           // end up in an endless loop.
2442           //
2443           // There are two potential complications here.  Two threads trapping
2444           // at the same address at the same time could cause one of the
2445           // threads to think it already unguarded, and abort the VM.  Likely
2446           // very rare.
2447           //
2448           // The other race involves two threads alternately trapping at
2449           // different addresses and failing to unguard the page, resulting in
2450           // an endless loop.  This condition is probably even more unlikely
2451           // than the first.
2452           //
2453           // Although both cases could be avoided by using locks or thread
2454           // local last_addr, these solutions are unnecessary complication:
2455           // this handler is a best-effort safety net, not a complete solution.
2456           // It is disabled by default and should only be used as a workaround
2457           // in case we missed any no-execute-unsafe VM code.
2458 
2459           last_addr = addr;
2460 
2461           return EXCEPTION_CONTINUE_EXECUTION;
2462         }
2463       }
2464 
2465       // Last unguard failed or not unguarding
2466       tty->print_raw_cr("Execution protection violation");
2467       report_error(t, exception_code, addr, exception_record,
2468                    exceptionInfo->ContextRecord);
2469       return EXCEPTION_CONTINUE_SEARCH;
2470     }
2471   }
2472 #endif // _WIN64
2473 
2474   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2475       VM_Version::is_cpuinfo_segv_addr(pc)) {
2476     // Verify that OS save/restore AVX registers.
2477     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2478   }
2479 
2480   if (t != NULL && t->is_Java_thread()) {
2481     JavaThread* thread = (JavaThread*) t;
2482     bool in_java = thread->thread_state() == _thread_in_Java;
2483     bool in_native = thread->thread_state() == _thread_in_native;
2484     bool in_vm = thread->thread_state() == _thread_in_vm;
2485 
2486     // Handle potential stack overflows up front.
2487     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2488       if (thread->stack_guards_enabled()) {
2489         if (in_java) {
2490           frame fr;
2491           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2492             assert(fr.is_java_frame(), "Must be a Java frame");
2493             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2494           }
2495         }
2496         // Yellow zone violation.  The o/s has unprotected the first yellow
2497         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2498         // update the enabled status, even if the zone contains only one page.
2499         assert(!in_vm, "Undersized StackShadowPages");
2500         thread->disable_stack_yellow_reserved_zone();
2501         // If not in java code, return and hope for the best.
2502         return in_java
2503             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2504             :  EXCEPTION_CONTINUE_EXECUTION;
2505       } else {
2506         // Fatal red zone violation.
2507         thread->disable_stack_red_zone();
2508         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2509         report_error(t, exception_code, pc, exception_record,
2510                       exceptionInfo->ContextRecord);
2511         return EXCEPTION_CONTINUE_SEARCH;
2512       }
2513     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2514       if (in_java) {
2515         // Either stack overflow or null pointer exception.
2516         address addr = (address) exception_record->ExceptionInformation[1];
2517         address stack_end = thread->stack_end();
2518         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2519           // Stack overflow.
2520           assert(!os::uses_stack_guard_pages(),
2521                  "should be caught by red zone code above.");
2522           return Handle_Exception(exceptionInfo,
2523                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2524         }
2525         // Check for safepoint polling and implicit null
2526         // We only expect null pointers in the stubs (vtable)
2527         // the rest are checked explicitly now.
2528         CodeBlob* cb = CodeCache::find_blob(pc);
2529         if (cb != NULL) {
2530           if (SafepointMechanism::is_poll_address(addr)) {
2531             address stub = SharedRuntime::get_poll_stub(pc);
2532             return Handle_Exception(exceptionInfo, stub);
2533           }
2534         }
2535 #ifdef _WIN64
2536         // If it's a legal stack address map the entire region in
2537         if (thread->is_in_usable_stack(addr)) {
2538           addr = (address)((uintptr_t)addr &
2539                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2540           os::commit_memory((char *)addr, thread->stack_base() - addr,
2541                             !ExecMem);
2542           return EXCEPTION_CONTINUE_EXECUTION;
2543         }
2544 #endif
2545         // Null pointer exception.
2546         if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2547           address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2548           if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2549         }
2550         report_error(t, exception_code, pc, exception_record,
2551                       exceptionInfo->ContextRecord);
2552         return EXCEPTION_CONTINUE_SEARCH;
2553       }
2554 
2555 #ifdef _WIN64
2556       // Special care for fast JNI field accessors.
2557       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2558       // in and the heap gets shrunk before the field access.
2559       address slowcase_pc = JNI_FastGetField::find_slowcase_pc(pc);
2560       if (slowcase_pc != (address)-1) {
2561         return Handle_Exception(exceptionInfo, slowcase_pc);
2562       }
2563 #endif
2564 
2565       // Stack overflow or null pointer exception in native code.
2566       report_error(t, exception_code, pc, exception_record,
2567                    exceptionInfo->ContextRecord);
2568       return EXCEPTION_CONTINUE_SEARCH;
2569     } // /EXCEPTION_ACCESS_VIOLATION
2570     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2571 
2572     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2573       CompiledMethod* nm = NULL;
2574       JavaThread* thread = (JavaThread*)t;
2575       if (in_java) {
2576         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2577         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2578       }
2579 
2580       bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2581       if (((in_vm || in_native || is_unsafe_arraycopy) && thread->doing_unsafe_access()) ||
2582           (nm != NULL && nm->has_unsafe_access())) {
2583         address next_pc =  Assembler::locate_next_instruction(pc);
2584         if (is_unsafe_arraycopy) {
2585           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2586         }
2587         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2588       }
2589     }
2590 
2591     if (in_java) {
2592       switch (exception_code) {
2593       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2594         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2595 
2596       case EXCEPTION_INT_OVERFLOW:
2597         return Handle_IDiv_Exception(exceptionInfo);
2598 
2599       } // switch
2600     }
2601     if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2602       LONG result=Handle_FLT_Exception(exceptionInfo);
2603       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2604     }
2605   }
2606 
2607   if (exception_code != EXCEPTION_BREAKPOINT) {
2608     report_error(t, exception_code, pc, exception_record,
2609                  exceptionInfo->ContextRecord);
2610   }
2611   return EXCEPTION_CONTINUE_SEARCH;
2612 }
2613 
2614 #ifndef _WIN64
2615 // Special care for fast JNI accessors.
2616 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2617 // the heap gets shrunk before the field access.
2618 // Need to install our own structured exception handler since native code may
2619 // install its own.
2620 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2621   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2622   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2623     address pc = (address) exceptionInfo->ContextRecord->Eip;
2624     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2625     if (addr != (address)-1) {
2626       return Handle_Exception(exceptionInfo, addr);
2627     }
2628   }
2629   return EXCEPTION_CONTINUE_SEARCH;
2630 }
2631 
2632 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2633   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2634                                                      jobject obj,           \
2635                                                      jfieldID fieldID) {    \
2636     __try {                                                                 \
2637       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2638                                                                  obj,       \
2639                                                                  fieldID);  \
2640     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2641                                               _exception_info())) {         \
2642     }                                                                       \
2643     return 0;                                                               \
2644   }
2645 
2646 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2647 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2648 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2649 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2650 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2651 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2652 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2653 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2654 
2655 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2656   switch (type) {
2657   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2658   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2659   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2660   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2661   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2662   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2663   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2664   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2665   default:        ShouldNotReachHere();
2666   }
2667   return (address)-1;
2668 }
2669 #endif
2670 
2671 // Virtual Memory
2672 
2673 int os::vm_page_size() { return os::win32::vm_page_size(); }
2674 int os::vm_allocation_granularity() {
2675   return os::win32::vm_allocation_granularity();
2676 }
2677 
2678 // Windows large page support is available on Windows 2003. In order to use
2679 // large page memory, the administrator must first assign additional privilege
2680 // to the user:
2681 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2682 //   + select Local Policies -> User Rights Assignment
2683 //   + double click "Lock pages in memory", add users and/or groups
2684 //   + reboot
2685 // Note the above steps are needed for administrator as well, as administrators
2686 // by default do not have the privilege to lock pages in memory.
2687 //
2688 // Note about Windows 2003: although the API supports committing large page
2689 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2690 // scenario, I found through experiment it only uses large page if the entire
2691 // memory region is reserved and committed in a single VirtualAlloc() call.
2692 // This makes Windows large page support more or less like Solaris ISM, in
2693 // that the entire heap must be committed upfront. This probably will change
2694 // in the future, if so the code below needs to be revisited.
2695 
2696 #ifndef MEM_LARGE_PAGES
2697   #define MEM_LARGE_PAGES 0x20000000
2698 #endif
2699 
2700 #define VirtualFreeChecked(mem, size, type)                       \
2701   do {                                                            \
2702     bool ret = VirtualFree(mem, size, type);                      \
2703     assert(ret, "Failed to free memory: " PTR_FORMAT, p2i(mem));  \
2704   } while (false)
2705 
2706 // The number of bytes is setup to match 1 pixel and 32 bits per pixel.
2707 static const int gdi_tiny_bitmap_width_bytes = 4;
2708 
2709 static HBITMAP gdi_create_tiny_bitmap(void* mem) {
2710   // The documentation for CreateBitmap states a word-alignment requirement.
2711   STATIC_ASSERT(is_aligned_(gdi_tiny_bitmap_width_bytes, sizeof(WORD)));
2712 
2713   // Some callers use this function to test if memory crossing separate memory
2714   // reservations can be used. Create a height of 2 to make sure that one pixel
2715   // ends up in the first reservation and the other in the second.
2716   int nHeight = 2;
2717 
2718   assert(is_aligned(mem, gdi_tiny_bitmap_width_bytes), "Incorrect alignment");
2719 
2720   // Width is one pixel and correlates with gdi_tiny_bitmap_width_bytes.
2721   int nWidth = 1;
2722 
2723   // Calculate bit count - will be 32.
2724   UINT nBitCount = gdi_tiny_bitmap_width_bytes / nWidth * BitsPerByte;
2725 
2726   return CreateBitmap(
2727       nWidth,
2728       nHeight,
2729       1,         // nPlanes
2730       nBitCount,
2731       mem);      // lpBits
2732 }
2733 
2734 // It has been found that some of the GDI functions fail under these two situations:
2735 //  1) When used with large pages
2736 //  2) When mem crosses the boundary between two separate memory reservations.
2737 //
2738 // This is a small test used to see if the current GDI implementation is
2739 // susceptible to any of these problems.
2740 static bool gdi_can_use_memory(void* mem) {
2741   HBITMAP bitmap = gdi_create_tiny_bitmap(mem);
2742   if (bitmap != NULL) {
2743     DeleteObject(bitmap);
2744     return true;
2745   }
2746 
2747   // Verify that the bitmap could be created with a normal page.
2748   // If this fails, the testing method above isn't reliable.
2749 #ifdef ASSERT
2750   void* verify_mem = ::malloc(4 * 1024);
2751   HBITMAP verify_bitmap = gdi_create_tiny_bitmap(verify_mem);
2752   if (verify_bitmap == NULL) {
2753     fatal("Couldn't create test bitmap with malloced memory");
2754   } else {
2755     DeleteObject(verify_bitmap);
2756   }
2757   ::free(verify_mem);
2758 #endif
2759 
2760   return false;
2761 }
2762 
2763 // Test if GDI functions work when memory spans
2764 // two adjacent memory reservations.
2765 static bool gdi_can_use_split_reservation_memory(bool use_large_pages, size_t granule) {
2766   DWORD mem_large_pages = use_large_pages ? MEM_LARGE_PAGES : 0;
2767 
2768   // Find virtual memory range. Two granules for regions and one for alignment.
2769   void* reserved = VirtualAlloc(NULL,
2770                                 granule * 3,
2771                                 MEM_RESERVE,
2772                                 PAGE_NOACCESS);
2773   if (reserved == NULL) {
2774     // Can't proceed with test - pessimistically report false
2775     return false;
2776   }
2777   VirtualFreeChecked(reserved, 0, MEM_RELEASE);
2778 
2779   // Ensure proper alignment
2780   void* res0 = align_up(reserved, granule);
2781   void* res1 = (char*)res0 + granule;
2782 
2783   // Reserve and commit the first part
2784   void* mem0 = VirtualAlloc(res0,
2785                             granule,
2786                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2787                             PAGE_READWRITE);
2788   if (mem0 != res0) {
2789     // Can't proceed with test - pessimistically report false
2790     return false;
2791   }
2792 
2793   // Reserve and commit the second part
2794   void* mem1 = VirtualAlloc(res1,
2795                             granule,
2796                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2797                             PAGE_READWRITE);
2798   if (mem1 != res1) {
2799     VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2800     // Can't proceed with test - pessimistically report false
2801     return false;
2802   }
2803 
2804   // Set the bitmap's bits to point one "width" bytes before, so that
2805   // the bitmap extends across the reservation boundary.
2806   void* bitmapBits = (char*)mem1 - gdi_tiny_bitmap_width_bytes;
2807 
2808   bool success = gdi_can_use_memory(bitmapBits);
2809 
2810   VirtualFreeChecked(mem1, 0, MEM_RELEASE);
2811   VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2812 
2813   return success;
2814 }
2815 
2816 // Container for NUMA node list info
2817 class NUMANodeListHolder {
2818  private:
2819   int *_numa_used_node_list;  // allocated below
2820   int _numa_used_node_count;
2821 
2822   void free_node_list() {
2823     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2824   }
2825 
2826  public:
2827   NUMANodeListHolder() {
2828     _numa_used_node_count = 0;
2829     _numa_used_node_list = NULL;
2830     // do rest of initialization in build routine (after function pointers are set up)
2831   }
2832 
2833   ~NUMANodeListHolder() {
2834     free_node_list();
2835   }
2836 
2837   bool build() {
2838     DWORD_PTR proc_aff_mask;
2839     DWORD_PTR sys_aff_mask;
2840     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2841     ULONG highest_node_number;
2842     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2843     free_node_list();
2844     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2845     for (unsigned int i = 0; i <= highest_node_number; i++) {
2846       ULONGLONG proc_mask_numa_node;
2847       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2848       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2849         _numa_used_node_list[_numa_used_node_count++] = i;
2850       }
2851     }
2852     return (_numa_used_node_count > 1);
2853   }
2854 
2855   int get_count() { return _numa_used_node_count; }
2856   int get_node_list_entry(int n) {
2857     // for indexes out of range, returns -1
2858     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2859   }
2860 
2861 } numa_node_list_holder;
2862 
2863 static size_t _large_page_size = 0;
2864 
2865 static bool request_lock_memory_privilege() {
2866   HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2867                                 os::current_process_id());
2868 
2869   bool success = false;
2870   HANDLE hToken = NULL;
2871   LUID luid;
2872   if (hProcess != NULL &&
2873       OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
2874       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2875 
2876     TOKEN_PRIVILEGES tp;
2877     tp.PrivilegeCount = 1;
2878     tp.Privileges[0].Luid = luid;
2879     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2880 
2881     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2882     // privilege. Check GetLastError() too. See MSDN document.
2883     if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2884         (GetLastError() == ERROR_SUCCESS)) {
2885       success = true;
2886     }
2887   }
2888 
2889   // Cleanup
2890   if (hProcess != NULL) {
2891     CloseHandle(hProcess);
2892   }
2893   if (hToken != NULL) {
2894     CloseHandle(hToken);
2895   }
2896 
2897   return success;
2898 }
2899 
2900 static bool numa_interleaving_init() {
2901   bool success = false;
2902 
2903   // print a warning if UseNUMAInterleaving flag is specified on command line
2904   bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2905 
2906 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2907 
2908   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2909   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2910   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2911 
2912   if (!numa_node_list_holder.build()) {
2913     WARN("Process does not cover multiple NUMA nodes.");
2914     WARN("...Ignoring UseNUMAInterleaving flag.");
2915     return false;
2916   }
2917 
2918   if (!gdi_can_use_split_reservation_memory(UseLargePages, min_interleave_granularity)) {
2919     WARN("Windows GDI cannot handle split reservations.");
2920     WARN("...Ignoring UseNUMAInterleaving flag.");
2921     return false;
2922   }
2923 
2924   if (log_is_enabled(Debug, os, cpu)) {
2925     Log(os, cpu) log;
2926     log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2927     for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2928       log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2929     }
2930   }
2931 
2932 #undef WARN
2933 
2934   return true;
2935 }
2936 
2937 // this routine is used whenever we need to reserve a contiguous VA range
2938 // but we need to make separate VirtualAlloc calls for each piece of the range
2939 // Reasons for doing this:
2940 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2941 //  * UseNUMAInterleaving requires a separate node for each piece
2942 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2943                                          DWORD prot,
2944                                          bool should_inject_error = false) {
2945   char * p_buf;
2946   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2947   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2948   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2949 
2950   // first reserve enough address space in advance since we want to be
2951   // able to break a single contiguous virtual address range into multiple
2952   // large page commits but WS2003 does not allow reserving large page space
2953   // so we just use 4K pages for reserve, this gives us a legal contiguous
2954   // address space. then we will deallocate that reservation, and re alloc
2955   // using large pages
2956   const size_t size_of_reserve = bytes + chunk_size;
2957   if (bytes > size_of_reserve) {
2958     // Overflowed.
2959     return NULL;
2960   }
2961   p_buf = (char *) VirtualAlloc(addr,
2962                                 size_of_reserve,  // size of Reserve
2963                                 MEM_RESERVE,
2964                                 PAGE_READWRITE);
2965   // If reservation failed, return NULL
2966   if (p_buf == NULL) return NULL;
2967   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2968   os::release_memory(p_buf, bytes + chunk_size);
2969 
2970   // we still need to round up to a page boundary (in case we are using large pages)
2971   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2972   // instead we handle this in the bytes_to_rq computation below
2973   p_buf = align_up(p_buf, page_size);
2974 
2975   // now go through and allocate one chunk at a time until all bytes are
2976   // allocated
2977   size_t  bytes_remaining = bytes;
2978   // An overflow of align_up() would have been caught above
2979   // in the calculation of size_of_reserve.
2980   char * next_alloc_addr = p_buf;
2981   HANDLE hProc = GetCurrentProcess();
2982 
2983 #ifdef ASSERT
2984   // Variable for the failure injection
2985   int ran_num = os::random();
2986   size_t fail_after = ran_num % bytes;
2987 #endif
2988 
2989   int count=0;
2990   while (bytes_remaining) {
2991     // select bytes_to_rq to get to the next chunk_size boundary
2992 
2993     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2994     // Note allocate and commit
2995     char * p_new;
2996 
2997 #ifdef ASSERT
2998     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2999 #else
3000     const bool inject_error_now = false;
3001 #endif
3002 
3003     if (inject_error_now) {
3004       p_new = NULL;
3005     } else {
3006       if (!UseNUMAInterleaving) {
3007         p_new = (char *) VirtualAlloc(next_alloc_addr,
3008                                       bytes_to_rq,
3009                                       flags,
3010                                       prot);
3011       } else {
3012         // get the next node to use from the used_node_list
3013         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3014         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3015         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3016       }
3017     }
3018 
3019     if (p_new == NULL) {
3020       // Free any allocated pages
3021       if (next_alloc_addr > p_buf) {
3022         // Some memory was committed so release it.
3023         size_t bytes_to_release = bytes - bytes_remaining;
3024         // NMT has yet to record any individual blocks, so it
3025         // need to create a dummy 'reserve' record to match
3026         // the release.
3027         MemTracker::record_virtual_memory_reserve((address)p_buf,
3028                                                   bytes_to_release, CALLER_PC);
3029         os::release_memory(p_buf, bytes_to_release);
3030       }
3031 #ifdef ASSERT
3032       if (should_inject_error) {
3033         log_develop_debug(pagesize)("Reserving pages individually failed.");
3034       }
3035 #endif
3036       return NULL;
3037     }
3038 
3039     bytes_remaining -= bytes_to_rq;
3040     next_alloc_addr += bytes_to_rq;
3041     count++;
3042   }
3043   // Although the memory is allocated individually, it is returned as one.
3044   // NMT records it as one block.
3045   if ((flags & MEM_COMMIT) != 0) {
3046     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3047   } else {
3048     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3049   }
3050 
3051   // made it this far, success
3052   return p_buf;
3053 }
3054 
3055 static size_t large_page_init_decide_size() {
3056   // print a warning if any large page related flag is specified on command line
3057   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3058                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3059 
3060 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3061 
3062   if (!request_lock_memory_privilege()) {
3063     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3064     return 0;
3065   }
3066 
3067   size_t size = GetLargePageMinimum();
3068   if (size == 0) {
3069     WARN("Large page is not supported by the processor.");
3070     return 0;
3071   }
3072 
3073 #if defined(IA32) || defined(AMD64)
3074   if (size > 4*M || LargePageSizeInBytes > 4*M) {
3075     WARN("JVM cannot use large pages bigger than 4mb.");
3076     return 0;
3077   }
3078 #endif
3079 
3080   if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3081     size = LargePageSizeInBytes;
3082   }
3083 
3084   // Now test allocating a page
3085   void* large_page = VirtualAlloc(NULL,
3086                                   size,
3087                                   MEM_RESERVE|MEM_COMMIT|MEM_LARGE_PAGES,
3088                                   PAGE_READWRITE);
3089   if (large_page == NULL) {
3090     WARN("JVM cannot allocate one single large page.");
3091     return 0;
3092   }
3093 
3094   // Detect if GDI can use memory backed by large pages
3095   if (!gdi_can_use_memory(large_page)) {
3096     WARN("JVM cannot use large pages because of bug in Windows GDI.");
3097     return 0;
3098   }
3099 
3100   // Release test page
3101   VirtualFreeChecked(large_page, 0, MEM_RELEASE);
3102 
3103 #undef WARN
3104 
3105   return size;
3106 }
3107 
3108 void os::large_page_init() {
3109   if (!UseLargePages) {
3110     return;
3111   }
3112 
3113   _large_page_size = large_page_init_decide_size();
3114 
3115   const size_t default_page_size = (size_t) vm_page_size();
3116   if (_large_page_size > default_page_size) {
3117     _page_sizes[0] = _large_page_size;
3118     _page_sizes[1] = default_page_size;
3119     _page_sizes[2] = 0;
3120   }
3121 
3122   UseLargePages = _large_page_size != 0;
3123 
3124   if (UseLargePages && UseLargePagesIndividualAllocation) {
3125     if (!gdi_can_use_split_reservation_memory(true /* use_large_pages */, _large_page_size)) {
3126       if (FLAG_IS_CMDLINE(UseLargePagesIndividualAllocation)) {
3127         warning("Windows GDI cannot handle split reservations.");
3128         warning("...Ignoring UseLargePagesIndividualAllocation flag.");
3129       }
3130       UseLargePagesIndividualAllocation = false;
3131     }
3132   }
3133 }
3134 
3135 int os::create_file_for_heap(const char* dir) {
3136 
3137   const char name_template[] = "/jvmheap.XXXXXX";
3138 
3139   size_t fullname_len = strlen(dir) + strlen(name_template);
3140   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3141   if (fullname == NULL) {
3142     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3143     return -1;
3144   }
3145   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3146   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3147 
3148   os::native_path(fullname);
3149 
3150   char *path = _mktemp(fullname);
3151   if (path == NULL) {
3152     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3153     os::free(fullname);
3154     return -1;
3155   }
3156 
3157   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3158 
3159   os::free(fullname);
3160   if (fd < 0) {
3161     warning("Problem opening file for heap (%s)", os::strerror(errno));
3162     return -1;
3163   }
3164   return fd;
3165 }
3166 
3167 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3168 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3169   assert(fd != -1, "File descriptor is not valid");
3170 
3171   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3172 #ifdef _LP64
3173   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3174     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3175 #else
3176   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3177     0, (DWORD)size, NULL);
3178 #endif
3179   if (fileMapping == NULL) {
3180     if (GetLastError() == ERROR_DISK_FULL) {
3181       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3182     }
3183     else {
3184       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3185     }
3186 
3187     return NULL;
3188   }
3189 
3190   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3191 
3192   CloseHandle(fileMapping);
3193 
3194   return (char*)addr;
3195 }
3196 
3197 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3198   assert(fd != -1, "File descriptor is not valid");
3199   assert(base != NULL, "Base address cannot be NULL");
3200 
3201   release_memory(base, size);
3202   return map_memory_to_file(base, size, fd);
3203 }
3204 
3205 // On win32, one cannot release just a part of reserved memory, it's an
3206 // all or nothing deal.  When we split a reservation, we must break the
3207 // reservation into two reservations.
3208 void os::split_reserved_memory(char *base, size_t size, size_t split) {
3209 
3210   char* const split_address = base + split;
3211   assert(size > 0, "Sanity");
3212   assert(size > split, "Sanity");
3213   assert(split > 0, "Sanity");
3214   assert(is_aligned(base, os::vm_allocation_granularity()), "Sanity");
3215   assert(is_aligned(split_address, os::vm_allocation_granularity()), "Sanity");
3216 
3217   release_memory(base, size);
3218   reserve_memory(split, base);
3219   reserve_memory(size - split, split_address);
3220 
3221   // NMT: nothing to do here. Since Windows implements the split by
3222   //  releasing and re-reserving memory, the parts are already registered
3223   //  as individual mappings with NMT.
3224 
3225 }
3226 
3227 // Multiple threads can race in this code but it's not possible to unmap small sections of
3228 // virtual space to get requested alignment, like posix-like os's.
3229 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3230 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3231   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3232          "Alignment must be a multiple of allocation granularity (page size)");
3233   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3234 
3235   size_t extra_size = size + alignment;
3236   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3237 
3238   char* aligned_base = NULL;
3239 
3240   do {
3241     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3242     if (extra_base == NULL) {
3243       return NULL;
3244     }
3245     // Do manual alignment
3246     aligned_base = align_up(extra_base, alignment);
3247 
3248     if (file_desc != -1) {
3249       os::unmap_memory(extra_base, extra_size);
3250     } else {
3251       os::release_memory(extra_base, extra_size);
3252     }
3253 
3254     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3255 
3256   } while (aligned_base == NULL);
3257 
3258   return aligned_base;
3259 }
3260 
3261 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3262   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3263          "reserve alignment");
3264   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3265   char* res;
3266   // note that if UseLargePages is on, all the areas that require interleaving
3267   // will go thru reserve_memory_special rather than thru here.
3268   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3269   if (!use_individual) {
3270     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3271   } else {
3272     elapsedTimer reserveTimer;
3273     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3274     // in numa interleaving, we have to allocate pages individually
3275     // (well really chunks of NUMAInterleaveGranularity size)
3276     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3277     if (res == NULL) {
3278       warning("NUMA page allocation failed");
3279     }
3280     if (Verbose && PrintMiscellaneous) {
3281       reserveTimer.stop();
3282       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3283                     reserveTimer.milliseconds(), reserveTimer.ticks());
3284     }
3285   }
3286   assert(res == NULL || addr == NULL || addr == res,
3287          "Unexpected address from reserve.");
3288 
3289   return res;
3290 }
3291 
3292 // Reserve memory at an arbitrary address, only if that area is
3293 // available (and not reserved for something else).
3294 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3295   // Windows os::reserve_memory() fails of the requested address range is
3296   // not avilable.
3297   return reserve_memory(bytes, requested_addr);
3298 }
3299 
3300 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3301   assert(file_desc >= 0, "file_desc is not valid");
3302   return map_memory_to_file(requested_addr, bytes, file_desc);
3303 }
3304 
3305 size_t os::large_page_size() {
3306   return _large_page_size;
3307 }
3308 
3309 bool os::can_commit_large_page_memory() {
3310   // Windows only uses large page memory when the entire region is reserved
3311   // and committed in a single VirtualAlloc() call. This may change in the
3312   // future, but with Windows 2003 it's not possible to commit on demand.
3313   return false;
3314 }
3315 
3316 bool os::can_execute_large_page_memory() {
3317   return true;
3318 }
3319 
3320 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3321                                     bool exec) {
3322   assert(UseLargePages, "only for large pages");
3323 
3324   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3325     return NULL; // Fallback to small pages.
3326   }
3327 
3328   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3329   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3330 
3331   // with large pages, there are two cases where we need to use Individual Allocation
3332   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3333   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3334   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3335     log_debug(pagesize)("Reserving large pages individually.");
3336 
3337     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3338     if (p_buf == NULL) {
3339       // give an appropriate warning message
3340       if (UseNUMAInterleaving) {
3341         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3342       }
3343       if (UseLargePagesIndividualAllocation) {
3344         warning("Individually allocated large pages failed, "
3345                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3346       }
3347       return NULL;
3348     }
3349 
3350     return p_buf;
3351 
3352   } else {
3353     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3354 
3355     // normal policy just allocate it all at once
3356     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3357     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3358 
3359     return res;
3360   }
3361 }
3362 
3363 bool os::pd_release_memory_special(char* base, size_t bytes) {
3364   assert(base != NULL, "Sanity check");
3365   return pd_release_memory(base, bytes);
3366 }
3367 
3368 void os::print_statistics() {
3369 }
3370 
3371 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3372   int err = os::get_last_error();
3373   char buf[256];
3374   size_t buf_len = os::lasterror(buf, sizeof(buf));
3375   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3376           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3377           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3378 }
3379 
3380 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3381   if (bytes == 0) {
3382     // Don't bother the OS with noops.
3383     return true;
3384   }
3385   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3386   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3387   // Don't attempt to print anything if the OS call fails. We're
3388   // probably low on resources, so the print itself may cause crashes.
3389 
3390   // unless we have NUMAInterleaving enabled, the range of a commit
3391   // is always within a reserve covered by a single VirtualAlloc
3392   // in that case we can just do a single commit for the requested size
3393   if (!UseNUMAInterleaving) {
3394     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3395       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3396       return false;
3397     }
3398     if (exec) {
3399       DWORD oldprot;
3400       // Windows doc says to use VirtualProtect to get execute permissions
3401       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3402         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3403         return false;
3404       }
3405     }
3406     return true;
3407   } else {
3408 
3409     // when NUMAInterleaving is enabled, the commit might cover a range that
3410     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3411     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3412     // returns represents the number of bytes that can be committed in one step.
3413     size_t bytes_remaining = bytes;
3414     char * next_alloc_addr = addr;
3415     while (bytes_remaining > 0) {
3416       MEMORY_BASIC_INFORMATION alloc_info;
3417       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3418       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3419       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3420                        PAGE_READWRITE) == NULL) {
3421         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3422                                             exec);)
3423         return false;
3424       }
3425       if (exec) {
3426         DWORD oldprot;
3427         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3428                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3429           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3430                                               exec);)
3431           return false;
3432         }
3433       }
3434       bytes_remaining -= bytes_to_rq;
3435       next_alloc_addr += bytes_to_rq;
3436     }
3437   }
3438   // if we made it this far, return true
3439   return true;
3440 }
3441 
3442 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3443                           bool exec) {
3444   // alignment_hint is ignored on this OS
3445   return pd_commit_memory(addr, size, exec);
3446 }
3447 
3448 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3449                                   const char* mesg) {
3450   assert(mesg != NULL, "mesg must be specified");
3451   if (!pd_commit_memory(addr, size, exec)) {
3452     warn_fail_commit_memory(addr, size, exec);
3453     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3454   }
3455 }
3456 
3457 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3458                                   size_t alignment_hint, bool exec,
3459                                   const char* mesg) {
3460   // alignment_hint is ignored on this OS
3461   pd_commit_memory_or_exit(addr, size, exec, mesg);
3462 }
3463 
3464 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3465   if (bytes == 0) {
3466     // Don't bother the OS with noops.
3467     return true;
3468   }
3469   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3470   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3471   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3472 }
3473 
3474 bool os::pd_release_memory(char* addr, size_t bytes) {
3475   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3476 }
3477 
3478 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3479   return os::commit_memory(addr, size, !ExecMem);
3480 }
3481 
3482 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3483   return os::uncommit_memory(addr, size);
3484 }
3485 
3486 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3487   uint count = 0;
3488   bool ret = false;
3489   size_t bytes_remaining = bytes;
3490   char * next_protect_addr = addr;
3491 
3492   // Use VirtualQuery() to get the chunk size.
3493   while (bytes_remaining) {
3494     MEMORY_BASIC_INFORMATION alloc_info;
3495     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3496       return false;
3497     }
3498 
3499     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3500     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3501     // but we don't distinguish here as both cases are protected by same API.
3502     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3503     warning("Failed protecting pages individually for chunk #%u", count);
3504     if (!ret) {
3505       return false;
3506     }
3507 
3508     bytes_remaining -= bytes_to_protect;
3509     next_protect_addr += bytes_to_protect;
3510     count++;
3511   }
3512   return ret;
3513 }
3514 
3515 // Set protections specified
3516 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3517                         bool is_committed) {
3518   unsigned int p = 0;
3519   switch (prot) {
3520   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3521   case MEM_PROT_READ: p = PAGE_READONLY; break;
3522   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3523   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3524   default:
3525     ShouldNotReachHere();
3526   }
3527 
3528   DWORD old_status;
3529 
3530   // Strange enough, but on Win32 one can change protection only for committed
3531   // memory, not a big deal anyway, as bytes less or equal than 64K
3532   if (!is_committed) {
3533     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3534                           "cannot commit protection page");
3535   }
3536   // One cannot use os::guard_memory() here, as on Win32 guard page
3537   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3538   //
3539   // Pages in the region become guard pages. Any attempt to access a guard page
3540   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3541   // the guard page status. Guard pages thus act as a one-time access alarm.
3542   bool ret;
3543   if (UseNUMAInterleaving) {
3544     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3545     // so we must protect the chunks individually.
3546     ret = protect_pages_individually(addr, bytes, p, &old_status);
3547   } else {
3548     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3549   }
3550 #ifdef ASSERT
3551   if (!ret) {
3552     int err = os::get_last_error();
3553     char buf[256];
3554     size_t buf_len = os::lasterror(buf, sizeof(buf));
3555     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3556           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3557           buf_len != 0 ? buf : "<no_error_string>", err);
3558   }
3559 #endif
3560   return ret;
3561 }
3562 
3563 bool os::guard_memory(char* addr, size_t bytes) {
3564   DWORD old_status;
3565   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3566 }
3567 
3568 bool os::unguard_memory(char* addr, size_t bytes) {
3569   DWORD old_status;
3570   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3571 }
3572 
3573 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3574 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3575 void os::numa_make_global(char *addr, size_t bytes)    { }
3576 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3577 bool os::numa_topology_changed()                       { return false; }
3578 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3579 int os::numa_get_group_id()                            { return 0; }
3580 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3581   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3582     // Provide an answer for UMA systems
3583     ids[0] = 0;
3584     return 1;
3585   } else {
3586     // check for size bigger than actual groups_num
3587     size = MIN2(size, numa_get_groups_num());
3588     for (int i = 0; i < (int)size; i++) {
3589       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3590     }
3591     return size;
3592   }
3593 }
3594 
3595 int os::numa_get_group_id_for_address(const void* address) {
3596   return 0;
3597 }
3598 
3599 bool os::get_page_info(char *start, page_info* info) {
3600   return false;
3601 }
3602 
3603 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3604                      page_info* page_found) {
3605   return end;
3606 }
3607 
3608 char* os::non_memory_address_word() {
3609   // Must never look like an address returned by reserve_memory,
3610   // even in its subfields (as defined by the CPU immediate fields,
3611   // if the CPU splits constants across multiple instructions).
3612   return (char*)-1;
3613 }
3614 
3615 #define MAX_ERROR_COUNT 100
3616 #define SYS_THREAD_ERROR 0xffffffffUL
3617 
3618 void os::pd_start_thread(Thread* thread) {
3619   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3620   // Returns previous suspend state:
3621   // 0:  Thread was not suspended
3622   // 1:  Thread is running now
3623   // >1: Thread is still suspended.
3624   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3625 }
3626 
3627 
3628 // Short sleep, direct OS call.
3629 //
3630 // ms = 0, means allow others (if any) to run.
3631 //
3632 void os::naked_short_sleep(jlong ms) {
3633   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3634   Sleep(ms);
3635 }
3636 
3637 // Windows does not provide sleep functionality with nanosecond resolution, so we
3638 // try to approximate this with spinning combined with yielding if another thread
3639 // is ready to run on the current processor.
3640 void os::naked_short_nanosleep(jlong ns) {
3641   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3642 
3643   int64_t start = os::javaTimeNanos();
3644   do {
3645     if (SwitchToThread() == 0) {
3646       // Nothing else is ready to run on this cpu, spin a little
3647       SpinPause();
3648     }
3649   } while (os::javaTimeNanos() - start < ns);
3650 }
3651 
3652 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3653 void os::infinite_sleep() {
3654   while (true) {    // sleep forever ...
3655     Sleep(100000);  // ... 100 seconds at a time
3656   }
3657 }
3658 
3659 typedef BOOL (WINAPI * STTSignature)(void);
3660 
3661 void os::naked_yield() {
3662   // Consider passing back the return value from SwitchToThread().
3663   SwitchToThread();
3664 }
3665 
3666 // Win32 only gives you access to seven real priorities at a time,
3667 // so we compress Java's ten down to seven.  It would be better
3668 // if we dynamically adjusted relative priorities.
3669 
3670 int os::java_to_os_priority[CriticalPriority + 1] = {
3671   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3672   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3673   THREAD_PRIORITY_LOWEST,                       // 2
3674   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3675   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3676   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3677   THREAD_PRIORITY_NORMAL,                       // 6
3678   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3679   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3680   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3681   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3682   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3683 };
3684 
3685 int prio_policy1[CriticalPriority + 1] = {
3686   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3687   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3688   THREAD_PRIORITY_LOWEST,                       // 2
3689   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3690   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3691   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3692   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3693   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3694   THREAD_PRIORITY_HIGHEST,                      // 8
3695   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3696   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3697   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3698 };
3699 
3700 static int prio_init() {
3701   // If ThreadPriorityPolicy is 1, switch tables
3702   if (ThreadPriorityPolicy == 1) {
3703     int i;
3704     for (i = 0; i < CriticalPriority + 1; i++) {
3705       os::java_to_os_priority[i] = prio_policy1[i];
3706     }
3707   }
3708   if (UseCriticalJavaThreadPriority) {
3709     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3710   }
3711   return 0;
3712 }
3713 
3714 OSReturn os::set_native_priority(Thread* thread, int priority) {
3715   if (!UseThreadPriorities) return OS_OK;
3716   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3717   return ret ? OS_OK : OS_ERR;
3718 }
3719 
3720 OSReturn os::get_native_priority(const Thread* const thread,
3721                                  int* priority_ptr) {
3722   if (!UseThreadPriorities) {
3723     *priority_ptr = java_to_os_priority[NormPriority];
3724     return OS_OK;
3725   }
3726   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3727   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3728     assert(false, "GetThreadPriority failed");
3729     return OS_ERR;
3730   }
3731   *priority_ptr = os_prio;
3732   return OS_OK;
3733 }
3734 
3735 // GetCurrentThreadId() returns DWORD
3736 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3737 
3738 static int _initial_pid = 0;
3739 
3740 int os::current_process_id() {
3741   return (_initial_pid ? _initial_pid : _getpid());
3742 }
3743 
3744 int    os::win32::_vm_page_size              = 0;
3745 int    os::win32::_vm_allocation_granularity = 0;
3746 int    os::win32::_processor_type            = 0;
3747 // Processor level is not available on non-NT systems, use vm_version instead
3748 int    os::win32::_processor_level           = 0;
3749 julong os::win32::_physical_memory           = 0;
3750 size_t os::win32::_default_stack_size        = 0;
3751 
3752 intx          os::win32::_os_thread_limit    = 0;
3753 volatile intx os::win32::_os_thread_count    = 0;
3754 
3755 bool   os::win32::_is_windows_server         = false;
3756 
3757 // 6573254
3758 // Currently, the bug is observed across all the supported Windows releases,
3759 // including the latest one (as of this writing - Windows Server 2012 R2)
3760 bool   os::win32::_has_exit_bug              = true;
3761 
3762 void os::win32::initialize_system_info() {
3763   SYSTEM_INFO si;
3764   GetSystemInfo(&si);
3765   _vm_page_size    = si.dwPageSize;
3766   _vm_allocation_granularity = si.dwAllocationGranularity;
3767   _processor_type  = si.dwProcessorType;
3768   _processor_level = si.wProcessorLevel;
3769   set_processor_count(si.dwNumberOfProcessors);
3770 
3771   MEMORYSTATUSEX ms;
3772   ms.dwLength = sizeof(ms);
3773 
3774   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3775   // dwMemoryLoad (% of memory in use)
3776   GlobalMemoryStatusEx(&ms);
3777   _physical_memory = ms.ullTotalPhys;
3778 
3779   if (FLAG_IS_DEFAULT(MaxRAM)) {
3780     // Adjust MaxRAM according to the maximum virtual address space available.
3781     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3782   }
3783 
3784   OSVERSIONINFOEX oi;
3785   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3786   GetVersionEx((OSVERSIONINFO*)&oi);
3787   switch (oi.dwPlatformId) {
3788   case VER_PLATFORM_WIN32_NT:
3789     {
3790       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3791       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3792           oi.wProductType == VER_NT_SERVER) {
3793         _is_windows_server = true;
3794       }
3795     }
3796     break;
3797   default: fatal("Unknown platform");
3798   }
3799 
3800   _default_stack_size = os::current_stack_size();
3801   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3802   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3803          "stack size not a multiple of page size");
3804 
3805   initialize_performance_counter();
3806 }
3807 
3808 
3809 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3810                                       int ebuflen) {
3811   char path[MAX_PATH];
3812   DWORD size;
3813   DWORD pathLen = (DWORD)sizeof(path);
3814   HINSTANCE result = NULL;
3815 
3816   // only allow library name without path component
3817   assert(strchr(name, '\\') == NULL, "path not allowed");
3818   assert(strchr(name, ':') == NULL, "path not allowed");
3819   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3820     jio_snprintf(ebuf, ebuflen,
3821                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3822     return NULL;
3823   }
3824 
3825   // search system directory
3826   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3827     if (size >= pathLen) {
3828       return NULL; // truncated
3829     }
3830     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3831       return NULL; // truncated
3832     }
3833     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3834       return result;
3835     }
3836   }
3837 
3838   // try Windows directory
3839   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3840     if (size >= pathLen) {
3841       return NULL; // truncated
3842     }
3843     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3844       return NULL; // truncated
3845     }
3846     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3847       return result;
3848     }
3849   }
3850 
3851   jio_snprintf(ebuf, ebuflen,
3852                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3853   return NULL;
3854 }
3855 
3856 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3857 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3858 
3859 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3860   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3861   return TRUE;
3862 }
3863 
3864 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3865   // Basic approach:
3866   //  - Each exiting thread registers its intent to exit and then does so.
3867   //  - A thread trying to terminate the process must wait for all
3868   //    threads currently exiting to complete their exit.
3869 
3870   if (os::win32::has_exit_bug()) {
3871     // The array holds handles of the threads that have started exiting by calling
3872     // _endthreadex().
3873     // Should be large enough to avoid blocking the exiting thread due to lack of
3874     // a free slot.
3875     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3876     static int handle_count = 0;
3877 
3878     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3879     static CRITICAL_SECTION crit_sect;
3880     static volatile DWORD process_exiting = 0;
3881     int i, j;
3882     DWORD res;
3883     HANDLE hproc, hthr;
3884 
3885     // We only attempt to register threads until a process exiting
3886     // thread manages to set the process_exiting flag. Any threads
3887     // that come through here after the process_exiting flag is set
3888     // are unregistered and will be caught in the SuspendThread()
3889     // infinite loop below.
3890     bool registered = false;
3891 
3892     // The first thread that reached this point, initializes the critical section.
3893     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3894       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3895     } else if (Atomic::load_acquire(&process_exiting) == 0) {
3896       if (what != EPT_THREAD) {
3897         // Atomically set process_exiting before the critical section
3898         // to increase the visibility between racing threads.
3899         Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
3900       }
3901       EnterCriticalSection(&crit_sect);
3902 
3903       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3904         // Remove from the array those handles of the threads that have completed exiting.
3905         for (i = 0, j = 0; i < handle_count; ++i) {
3906           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3907           if (res == WAIT_TIMEOUT) {
3908             handles[j++] = handles[i];
3909           } else {
3910             if (res == WAIT_FAILED) {
3911               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3912                       GetLastError(), __FILE__, __LINE__);
3913             }
3914             // Don't keep the handle, if we failed waiting for it.
3915             CloseHandle(handles[i]);
3916           }
3917         }
3918 
3919         // If there's no free slot in the array of the kept handles, we'll have to
3920         // wait until at least one thread completes exiting.
3921         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3922           // Raise the priority of the oldest exiting thread to increase its chances
3923           // to complete sooner.
3924           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3925           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3926           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3927             i = (res - WAIT_OBJECT_0);
3928             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3929             for (; i < handle_count; ++i) {
3930               handles[i] = handles[i + 1];
3931             }
3932           } else {
3933             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3934                     (res == WAIT_FAILED ? "failed" : "timed out"),
3935                     GetLastError(), __FILE__, __LINE__);
3936             // Don't keep handles, if we failed waiting for them.
3937             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3938               CloseHandle(handles[i]);
3939             }
3940             handle_count = 0;
3941           }
3942         }
3943 
3944         // Store a duplicate of the current thread handle in the array of handles.
3945         hproc = GetCurrentProcess();
3946         hthr = GetCurrentThread();
3947         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3948                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3949           warning("DuplicateHandle failed (%u) in %s: %d\n",
3950                   GetLastError(), __FILE__, __LINE__);
3951 
3952           // We can't register this thread (no more handles) so this thread
3953           // may be racing with a thread that is calling exit(). If the thread
3954           // that is calling exit() has managed to set the process_exiting
3955           // flag, then this thread will be caught in the SuspendThread()
3956           // infinite loop below which closes that race. A small timing
3957           // window remains before the process_exiting flag is set, but it
3958           // is only exposed when we are out of handles.
3959         } else {
3960           ++handle_count;
3961           registered = true;
3962 
3963           // The current exiting thread has stored its handle in the array, and now
3964           // should leave the critical section before calling _endthreadex().
3965         }
3966 
3967       } else if (what != EPT_THREAD && handle_count > 0) {
3968         jlong start_time, finish_time, timeout_left;
3969         // Before ending the process, make sure all the threads that had called
3970         // _endthreadex() completed.
3971 
3972         // Set the priority level of the current thread to the same value as
3973         // the priority level of exiting threads.
3974         // This is to ensure it will be given a fair chance to execute if
3975         // the timeout expires.
3976         hthr = GetCurrentThread();
3977         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3978         start_time = os::javaTimeNanos();
3979         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3980         for (i = 0; ; ) {
3981           int portion_count = handle_count - i;
3982           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3983             portion_count = MAXIMUM_WAIT_OBJECTS;
3984           }
3985           for (j = 0; j < portion_count; ++j) {
3986             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3987           }
3988           timeout_left = (finish_time - start_time) / 1000000L;
3989           if (timeout_left < 0) {
3990             timeout_left = 0;
3991           }
3992           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3993           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3994             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3995                     (res == WAIT_FAILED ? "failed" : "timed out"),
3996                     GetLastError(), __FILE__, __LINE__);
3997             // Reset portion_count so we close the remaining
3998             // handles due to this error.
3999             portion_count = handle_count - i;
4000           }
4001           for (j = 0; j < portion_count; ++j) {
4002             CloseHandle(handles[i + j]);
4003           }
4004           if ((i += portion_count) >= handle_count) {
4005             break;
4006           }
4007           start_time = os::javaTimeNanos();
4008         }
4009         handle_count = 0;
4010       }
4011 
4012       LeaveCriticalSection(&crit_sect);
4013     }
4014 
4015     if (!registered &&
4016         Atomic::load_acquire(&process_exiting) != 0 &&
4017         process_exiting != GetCurrentThreadId()) {
4018       // Some other thread is about to call exit(), so we don't let
4019       // the current unregistered thread proceed to exit() or _endthreadex()
4020       while (true) {
4021         SuspendThread(GetCurrentThread());
4022         // Avoid busy-wait loop, if SuspendThread() failed.
4023         Sleep(EXIT_TIMEOUT);
4024       }
4025     }
4026   }
4027 
4028   // We are here if either
4029   // - there's no 'race at exit' bug on this OS release;
4030   // - initialization of the critical section failed (unlikely);
4031   // - the current thread has registered itself and left the critical section;
4032   // - the process-exiting thread has raised the flag and left the critical section.
4033   if (what == EPT_THREAD) {
4034     _endthreadex((unsigned)exit_code);
4035   } else if (what == EPT_PROCESS) {
4036     ::exit(exit_code);
4037   } else {
4038     _exit(exit_code);
4039   }
4040 
4041   // Should not reach here
4042   return exit_code;
4043 }
4044 
4045 #undef EXIT_TIMEOUT
4046 
4047 void os::win32::setmode_streams() {
4048   _setmode(_fileno(stdin), _O_BINARY);
4049   _setmode(_fileno(stdout), _O_BINARY);
4050   _setmode(_fileno(stderr), _O_BINARY);
4051 }
4052 
4053 void os::wait_for_keypress_at_exit(void) {
4054   if (PauseAtExit) {
4055     fprintf(stderr, "Press any key to continue...\n");
4056     fgetc(stdin);
4057   }
4058 }
4059 
4060 
4061 bool os::message_box(const char* title, const char* message) {
4062   int result = MessageBox(NULL, message, title,
4063                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4064   return result == IDYES;
4065 }
4066 
4067 #ifndef PRODUCT
4068 #ifndef _WIN64
4069 // Helpers to check whether NX protection is enabled
4070 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4071   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4072       pex->ExceptionRecord->NumberParameters > 0 &&
4073       pex->ExceptionRecord->ExceptionInformation[0] ==
4074       EXCEPTION_INFO_EXEC_VIOLATION) {
4075     return EXCEPTION_EXECUTE_HANDLER;
4076   }
4077   return EXCEPTION_CONTINUE_SEARCH;
4078 }
4079 
4080 void nx_check_protection() {
4081   // If NX is enabled we'll get an exception calling into code on the stack
4082   char code[] = { (char)0xC3 }; // ret
4083   void *code_ptr = (void *)code;
4084   __try {
4085     __asm call code_ptr
4086   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4087     tty->print_raw_cr("NX protection detected.");
4088   }
4089 }
4090 #endif // _WIN64
4091 #endif // PRODUCT
4092 
4093 // This is called _before_ the global arguments have been parsed
4094 void os::init(void) {
4095   _initial_pid = _getpid();
4096 
4097   init_random(1234567);
4098 
4099   win32::initialize_system_info();
4100   win32::setmode_streams();
4101   init_page_sizes((size_t) win32::vm_page_size());
4102 
4103   // This may be overridden later when argument processing is done.
4104   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4105 
4106   // Initialize main_process and main_thread
4107   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4108   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4109                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4110     fatal("DuplicateHandle failed\n");
4111   }
4112   main_thread_id = (int) GetCurrentThreadId();
4113 
4114   // initialize fast thread access - only used for 32-bit
4115   win32::initialize_thread_ptr_offset();
4116 }
4117 
4118 // To install functions for atexit processing
4119 extern "C" {
4120   static void perfMemory_exit_helper() {
4121     perfMemory_exit();
4122   }
4123 }
4124 
4125 static jint initSock();
4126 
4127 // this is called _after_ the global arguments have been parsed
4128 jint os::init_2(void) {
4129 
4130   // This could be set any time but all platforms
4131   // have to set it the same so we have to mirror Solaris.
4132   DEBUG_ONLY(os::set_mutex_init_done();)
4133 
4134   // Setup Windows Exceptions
4135 
4136 #if INCLUDE_AOT
4137   // If AOT is enabled we need to install a vectored exception handler
4138   // in order to forward implicit exceptions from code in AOT
4139   // generated DLLs.  This is necessary since these DLLs are not
4140   // registered for structured exceptions like codecache methods are.
4141   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4142     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4143   }
4144 #endif
4145 
4146   // for debugging float code generation bugs
4147   if (ForceFloatExceptions) {
4148 #ifndef  _WIN64
4149     static long fp_control_word = 0;
4150     __asm { fstcw fp_control_word }
4151     // see Intel PPro Manual, Vol. 2, p 7-16
4152     const long precision = 0x20;
4153     const long underflow = 0x10;
4154     const long overflow  = 0x08;
4155     const long zero_div  = 0x04;
4156     const long denorm    = 0x02;
4157     const long invalid   = 0x01;
4158     fp_control_word |= invalid;
4159     __asm { fldcw fp_control_word }
4160 #endif
4161   }
4162 
4163   // If stack_commit_size is 0, windows will reserve the default size,
4164   // but only commit a small portion of it.
4165   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4166   size_t default_reserve_size = os::win32::default_stack_size();
4167   size_t actual_reserve_size = stack_commit_size;
4168   if (stack_commit_size < default_reserve_size) {
4169     // If stack_commit_size == 0, we want this too
4170     actual_reserve_size = default_reserve_size;
4171   }
4172 
4173   // Check minimum allowable stack size for thread creation and to initialize
4174   // the java system classes, including StackOverflowError - depends on page
4175   // size.  Add two 4K pages for compiler2 recursion in main thread.
4176   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4177   // class initialization depending on 32 or 64 bit VM.
4178   size_t min_stack_allowed =
4179             (size_t)(JavaThread::stack_guard_zone_size() +
4180                      JavaThread::stack_shadow_zone_size() +
4181                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4182 
4183   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4184 
4185   if (actual_reserve_size < min_stack_allowed) {
4186     tty->print_cr("\nThe Java thread stack size specified is too small. "
4187                   "Specify at least %dk",
4188                   min_stack_allowed / K);
4189     return JNI_ERR;
4190   }
4191 
4192   JavaThread::set_stack_size_at_create(stack_commit_size);
4193 
4194   // Calculate theoretical max. size of Threads to guard gainst artifical
4195   // out-of-memory situations, where all available address-space has been
4196   // reserved by thread stacks.
4197   assert(actual_reserve_size != 0, "Must have a stack");
4198 
4199   // Calculate the thread limit when we should start doing Virtual Memory
4200   // banging. Currently when the threads will have used all but 200Mb of space.
4201   //
4202   // TODO: consider performing a similar calculation for commit size instead
4203   // as reserve size, since on a 64-bit platform we'll run into that more
4204   // often than running out of virtual memory space.  We can use the
4205   // lower value of the two calculations as the os_thread_limit.
4206   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4207   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4208 
4209   // at exit methods are called in the reverse order of their registration.
4210   // there is no limit to the number of functions registered. atexit does
4211   // not set errno.
4212 
4213   if (PerfAllowAtExitRegistration) {
4214     // only register atexit functions if PerfAllowAtExitRegistration is set.
4215     // atexit functions can be delayed until process exit time, which
4216     // can be problematic for embedded VM situations. Embedded VMs should
4217     // call DestroyJavaVM() to assure that VM resources are released.
4218 
4219     // note: perfMemory_exit_helper atexit function may be removed in
4220     // the future if the appropriate cleanup code can be added to the
4221     // VM_Exit VMOperation's doit method.
4222     if (atexit(perfMemory_exit_helper) != 0) {
4223       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4224     }
4225   }
4226 
4227 #ifndef _WIN64
4228   // Print something if NX is enabled (win32 on AMD64)
4229   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4230 #endif
4231 
4232   // initialize thread priority policy
4233   prio_init();
4234 
4235   if (UseNUMA && !ForceNUMA) {
4236     UseNUMA = false; // We don't fully support this yet
4237   }
4238 
4239   if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4240     if (!numa_interleaving_init()) {
4241       FLAG_SET_ERGO(UseNUMAInterleaving, false);
4242     } else if (!UseNUMAInterleaving) {
4243       // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4244       FLAG_SET_ERGO(UseNUMAInterleaving, true);
4245     }
4246   }
4247 
4248   if (initSock() != JNI_OK) {
4249     return JNI_ERR;
4250   }
4251 
4252   SymbolEngine::recalc_search_path();
4253 
4254   // Initialize data for jdk.internal.misc.Signal
4255   if (!ReduceSignalUsage) {
4256     jdk_misc_signal_init();
4257   }
4258 
4259   return JNI_OK;
4260 }
4261 
4262 // combine the high and low DWORD into a ULONGLONG
4263 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4264   ULONGLONG value = high_word;
4265   value <<= sizeof(high_word) * 8;
4266   value |= low_word;
4267   return value;
4268 }
4269 
4270 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4271 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4272   ::memset((void*)sbuf, 0, sizeof(struct stat));
4273   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4274   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4275                                   file_data.ftLastWriteTime.dwLowDateTime);
4276   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4277                                   file_data.ftCreationTime.dwLowDateTime);
4278   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4279                                   file_data.ftLastAccessTime.dwLowDateTime);
4280   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4281     sbuf->st_mode |= S_IFDIR;
4282   } else {
4283     sbuf->st_mode |= S_IFREG;
4284   }
4285 }
4286 
4287 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4288   // Get required buffer size to convert to Unicode
4289   int unicode_path_len = MultiByteToWideChar(CP_ACP,
4290                                              MB_ERR_INVALID_CHARS,
4291                                              char_path, -1,
4292                                              NULL, 0);
4293   if (unicode_path_len == 0) {
4294     return EINVAL;
4295   }
4296 
4297   *unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4298 
4299   int result = MultiByteToWideChar(CP_ACP,
4300                                    MB_ERR_INVALID_CHARS,
4301                                    char_path, -1,
4302                                    *unicode_path, unicode_path_len);
4303   assert(result == unicode_path_len, "length already checked above");
4304 
4305   return ERROR_SUCCESS;
4306 }
4307 
4308 static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4309   // Get required buffer size to convert to full path. The return
4310   // value INCLUDES the terminating null character.
4311   DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4312   if (full_path_len == 0) {
4313     return EINVAL;
4314   }
4315 
4316   *full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4317 
4318   // When the buffer has sufficient size, the return value EXCLUDES the
4319   // terminating null character
4320   DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4321   assert(result <= full_path_len, "length already checked above");
4322 
4323   return ERROR_SUCCESS;
4324 }
4325 
4326 static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4327   *prefix_off = 0;
4328   *needs_fullpath = true;
4329 
4330   if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4331     *prefix = L"\\\\?\\";
4332   } else if (buf[0] == '\\' && buf[1] == '\\') {
4333     if (buf[2] == '?' && buf[3] == '\\') {
4334       *prefix = L"";
4335       *needs_fullpath = false;
4336     } else {
4337       *prefix = L"\\\\?\\UNC";
4338       *prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4339     }
4340   } else {
4341     *prefix = L"\\\\?\\";
4342   }
4343 }
4344 
4345 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4346 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4347 // additional_space is the size of space, in wchar_t, the function will additionally add to
4348 // the allocation of return buffer (such that the size of the returned buffer is at least
4349 // wcslen(buf) + 1 + additional_space).
4350 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4351   if ((path == NULL) || (path[0] == '\0')) {
4352     err = ENOENT;
4353     return NULL;
4354   }
4355 
4356   // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4357   size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4358   char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4359   strncpy(buf, path, buf_len);
4360   os::native_path(buf);
4361 
4362   LPWSTR prefix = NULL;
4363   int prefix_off = 0;
4364   bool needs_fullpath = true;
4365   set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4366 
4367   LPWSTR unicode_path = NULL;
4368   err = convert_to_unicode(buf, &unicode_path);
4369   FREE_C_HEAP_ARRAY(char, buf);
4370   if (err != ERROR_SUCCESS) {
4371     return NULL;
4372   }
4373 
4374   LPWSTR converted_path = NULL;
4375   if (needs_fullpath) {
4376     err = get_full_path(unicode_path, &converted_path);
4377   } else {
4378     converted_path = unicode_path;
4379   }
4380 
4381   LPWSTR result = NULL;
4382   if (converted_path != NULL) {
4383     size_t prefix_len = wcslen(prefix);
4384     size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4385     result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4386     _snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4387 
4388     // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4389     result_len = wcslen(result);
4390     if ((result[result_len - 1] == L'\\') &&
4391         !(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4392       result[result_len - 1] = L'\0';
4393     }
4394   }
4395 
4396   if (converted_path != unicode_path) {
4397     FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4398   }
4399   FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4400 
4401   return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4402 }
4403 
4404 int os::stat(const char *path, struct stat *sbuf) {
4405   errno_t err;
4406   wchar_t* wide_path = wide_abs_unc_path(path, err);
4407 
4408   if (wide_path == NULL) {
4409     errno = err;
4410     return -1;
4411   }
4412 
4413   WIN32_FILE_ATTRIBUTE_DATA file_data;;
4414   BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4415   os::free(wide_path);
4416 
4417   if (!bret) {
4418     errno = ::GetLastError();
4419     return -1;
4420   }
4421 
4422   file_attribute_data_to_stat(sbuf, file_data);
4423   return 0;
4424 }
4425 
4426 static HANDLE create_read_only_file_handle(const char* file) {
4427   errno_t err;
4428   wchar_t* wide_path = wide_abs_unc_path(file, err);
4429 
4430   if (wide_path == NULL) {
4431     errno = err;
4432     return INVALID_HANDLE_VALUE;
4433   }
4434 
4435   HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4436                                 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4437   os::free(wide_path);
4438 
4439   return handle;
4440 }
4441 
4442 bool os::same_files(const char* file1, const char* file2) {
4443 
4444   if (file1 == NULL && file2 == NULL) {
4445     return true;
4446   }
4447 
4448   if (file1 == NULL || file2 == NULL) {
4449     return false;
4450   }
4451 
4452   if (strcmp(file1, file2) == 0) {
4453     return true;
4454   }
4455 
4456   HANDLE handle1 = create_read_only_file_handle(file1);
4457   HANDLE handle2 = create_read_only_file_handle(file2);
4458   bool result = false;
4459 
4460   // if we could open both paths...
4461   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4462     BY_HANDLE_FILE_INFORMATION fileInfo1;
4463     BY_HANDLE_FILE_INFORMATION fileInfo2;
4464     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4465       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4466       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4467       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4468         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4469         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4470         result = true;
4471       }
4472     }
4473   }
4474 
4475   //free the handles
4476   if (handle1 != INVALID_HANDLE_VALUE) {
4477     ::CloseHandle(handle1);
4478   }
4479 
4480   if (handle2 != INVALID_HANDLE_VALUE) {
4481     ::CloseHandle(handle2);
4482   }
4483 
4484   return result;
4485 }
4486 
4487 #define FT2INT64(ft) \
4488   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4489 
4490 
4491 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4492 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4493 // of a thread.
4494 //
4495 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4496 // the fast estimate available on the platform.
4497 
4498 // current_thread_cpu_time() is not optimized for Windows yet
4499 jlong os::current_thread_cpu_time() {
4500   // return user + sys since the cost is the same
4501   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4502 }
4503 
4504 jlong os::thread_cpu_time(Thread* thread) {
4505   // consistent with what current_thread_cpu_time() returns.
4506   return os::thread_cpu_time(thread, true /* user+sys */);
4507 }
4508 
4509 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4510   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4511 }
4512 
4513 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4514   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4515   // If this function changes, os::is_thread_cpu_time_supported() should too
4516   FILETIME CreationTime;
4517   FILETIME ExitTime;
4518   FILETIME KernelTime;
4519   FILETIME UserTime;
4520 
4521   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4522                       &ExitTime, &KernelTime, &UserTime) == 0) {
4523     return -1;
4524   } else if (user_sys_cpu_time) {
4525     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4526   } else {
4527     return FT2INT64(UserTime) * 100;
4528   }
4529 }
4530 
4531 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4532   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4533   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4534   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4535   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4536 }
4537 
4538 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4539   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4540   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4541   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4542   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4543 }
4544 
4545 bool os::is_thread_cpu_time_supported() {
4546   // see os::thread_cpu_time
4547   FILETIME CreationTime;
4548   FILETIME ExitTime;
4549   FILETIME KernelTime;
4550   FILETIME UserTime;
4551 
4552   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4553                       &KernelTime, &UserTime) == 0) {
4554     return false;
4555   } else {
4556     return true;
4557   }
4558 }
4559 
4560 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4561 // It does have primitives (PDH API) to get CPU usage and run queue length.
4562 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4563 // If we wanted to implement loadavg on Windows, we have a few options:
4564 //
4565 // a) Query CPU usage and run queue length and "fake" an answer by
4566 //    returning the CPU usage if it's under 100%, and the run queue
4567 //    length otherwise.  It turns out that querying is pretty slow
4568 //    on Windows, on the order of 200 microseconds on a fast machine.
4569 //    Note that on the Windows the CPU usage value is the % usage
4570 //    since the last time the API was called (and the first call
4571 //    returns 100%), so we'd have to deal with that as well.
4572 //
4573 // b) Sample the "fake" answer using a sampling thread and store
4574 //    the answer in a global variable.  The call to loadavg would
4575 //    just return the value of the global, avoiding the slow query.
4576 //
4577 // c) Sample a better answer using exponential decay to smooth the
4578 //    value.  This is basically the algorithm used by UNIX kernels.
4579 //
4580 // Note that sampling thread starvation could affect both (b) and (c).
4581 int os::loadavg(double loadavg[], int nelem) {
4582   return -1;
4583 }
4584 
4585 
4586 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4587 bool os::dont_yield() {
4588   return DontYieldALot;
4589 }
4590 
4591 int os::open(const char *path, int oflag, int mode) {
4592   errno_t err;
4593   wchar_t* wide_path = wide_abs_unc_path(path, err);
4594 
4595   if (wide_path == NULL) {
4596     errno = err;
4597     return -1;
4598   }
4599   int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4600   os::free(wide_path);
4601 
4602   if (fd == -1) {
4603     errno = ::GetLastError();
4604   }
4605 
4606   return fd;
4607 }
4608 
4609 FILE* os::open(int fd, const char* mode) {
4610   return ::_fdopen(fd, mode);
4611 }
4612 
4613 // Is a (classpath) directory empty?
4614 bool os::dir_is_empty(const char* path) {
4615   errno_t err;
4616   wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4617 
4618   if (wide_path == NULL) {
4619     errno = err;
4620     return false;
4621   }
4622 
4623   // Make sure we end with "\\*"
4624   if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4625     wcscat(wide_path, L"*");
4626   } else {
4627     wcscat(wide_path, L"\\*");
4628   }
4629 
4630   WIN32_FIND_DATAW fd;
4631   HANDLE f = ::FindFirstFileW(wide_path, &fd);
4632   os::free(wide_path);
4633   bool is_empty = true;
4634 
4635   if (f != INVALID_HANDLE_VALUE) {
4636     while (is_empty && ::FindNextFileW(f, &fd)) {
4637       // An empty directory contains only the current directory file
4638       // and the previous directory file.
4639       if ((wcscmp(fd.cFileName, L".") != 0) &&
4640           (wcscmp(fd.cFileName, L"..") != 0)) {
4641         is_empty = false;
4642       }
4643     }
4644     FindClose(f);
4645   } else {
4646     errno = ::GetLastError();
4647   }
4648 
4649   return is_empty;
4650 }
4651 
4652 // create binary file, rewriting existing file if required
4653 int os::create_binary_file(const char* path, bool rewrite_existing) {
4654   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4655   if (!rewrite_existing) {
4656     oflags |= _O_EXCL;
4657   }
4658   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4659 }
4660 
4661 // return current position of file pointer
4662 jlong os::current_file_offset(int fd) {
4663   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4664 }
4665 
4666 // move file pointer to the specified offset
4667 jlong os::seek_to_file_offset(int fd, jlong offset) {
4668   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4669 }
4670 
4671 
4672 jlong os::lseek(int fd, jlong offset, int whence) {
4673   return (jlong) ::_lseeki64(fd, offset, whence);
4674 }
4675 
4676 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4677   OVERLAPPED ov;
4678   DWORD nread;
4679   BOOL result;
4680 
4681   ZeroMemory(&ov, sizeof(ov));
4682   ov.Offset = (DWORD)offset;
4683   ov.OffsetHigh = (DWORD)(offset >> 32);
4684 
4685   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4686 
4687   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4688 
4689   return result ? nread : 0;
4690 }
4691 
4692 
4693 // This method is a slightly reworked copy of JDK's sysNativePath
4694 // from src/windows/hpi/src/path_md.c
4695 
4696 // Convert a pathname to native format.  On win32, this involves forcing all
4697 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4698 // sometimes rejects '/') and removing redundant separators.  The input path is
4699 // assumed to have been converted into the character encoding used by the local
4700 // system.  Because this might be a double-byte encoding, care is taken to
4701 // treat double-byte lead characters correctly.
4702 //
4703 // This procedure modifies the given path in place, as the result is never
4704 // longer than the original.  There is no error return; this operation always
4705 // succeeds.
4706 char * os::native_path(char *path) {
4707   char *src = path, *dst = path, *end = path;
4708   char *colon = NULL;  // If a drive specifier is found, this will
4709                        // point to the colon following the drive letter
4710 
4711   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4712   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4713           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4714 
4715   // Check for leading separators
4716 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4717   while (isfilesep(*src)) {
4718     src++;
4719   }
4720 
4721   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4722     // Remove leading separators if followed by drive specifier.  This
4723     // hack is necessary to support file URLs containing drive
4724     // specifiers (e.g., "file://c:/path").  As a side effect,
4725     // "/c:/path" can be used as an alternative to "c:/path".
4726     *dst++ = *src++;
4727     colon = dst;
4728     *dst++ = ':';
4729     src++;
4730   } else {
4731     src = path;
4732     if (isfilesep(src[0]) && isfilesep(src[1])) {
4733       // UNC pathname: Retain first separator; leave src pointed at
4734       // second separator so that further separators will be collapsed
4735       // into the second separator.  The result will be a pathname
4736       // beginning with "\\\\" followed (most likely) by a host name.
4737       src = dst = path + 1;
4738       path[0] = '\\';     // Force first separator to '\\'
4739     }
4740   }
4741 
4742   end = dst;
4743 
4744   // Remove redundant separators from remainder of path, forcing all
4745   // separators to be '\\' rather than '/'. Also, single byte space
4746   // characters are removed from the end of the path because those
4747   // are not legal ending characters on this operating system.
4748   //
4749   while (*src != '\0') {
4750     if (isfilesep(*src)) {
4751       *dst++ = '\\'; src++;
4752       while (isfilesep(*src)) src++;
4753       if (*src == '\0') {
4754         // Check for trailing separator
4755         end = dst;
4756         if (colon == dst - 2) break;  // "z:\\"
4757         if (dst == path + 1) break;   // "\\"
4758         if (dst == path + 2 && isfilesep(path[0])) {
4759           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4760           // beginning of a UNC pathname.  Even though it is not, by
4761           // itself, a valid UNC pathname, we leave it as is in order
4762           // to be consistent with the path canonicalizer as well
4763           // as the win32 APIs, which treat this case as an invalid
4764           // UNC pathname rather than as an alias for the root
4765           // directory of the current drive.
4766           break;
4767         }
4768         end = --dst;  // Path does not denote a root directory, so
4769                       // remove trailing separator
4770         break;
4771       }
4772       end = dst;
4773     } else {
4774       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4775         *dst++ = *src++;
4776         if (*src) *dst++ = *src++;
4777         end = dst;
4778       } else {  // Copy a single-byte character
4779         char c = *src++;
4780         *dst++ = c;
4781         // Space is not a legal ending character
4782         if (c != ' ') end = dst;
4783       }
4784     }
4785   }
4786 
4787   *end = '\0';
4788 
4789   // For "z:", add "." to work around a bug in the C runtime library
4790   if (colon == dst - 1) {
4791     path[2] = '.';
4792     path[3] = '\0';
4793   }
4794 
4795   return path;
4796 }
4797 
4798 // This code is a copy of JDK's sysSetLength
4799 // from src/windows/hpi/src/sys_api_md.c
4800 
4801 int os::ftruncate(int fd, jlong length) {
4802   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4803   long high = (long)(length >> 32);
4804   DWORD ret;
4805 
4806   if (h == (HANDLE)(-1)) {
4807     return -1;
4808   }
4809 
4810   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4811   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4812     return -1;
4813   }
4814 
4815   if (::SetEndOfFile(h) == FALSE) {
4816     return -1;
4817   }
4818 
4819   return 0;
4820 }
4821 
4822 int os::get_fileno(FILE* fp) {
4823   return _fileno(fp);
4824 }
4825 
4826 // This code is a copy of JDK's sysSync
4827 // from src/windows/hpi/src/sys_api_md.c
4828 // except for the legacy workaround for a bug in Win 98
4829 
4830 int os::fsync(int fd) {
4831   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4832 
4833   if ((!::FlushFileBuffers(handle)) &&
4834       (GetLastError() != ERROR_ACCESS_DENIED)) {
4835     // from winerror.h
4836     return -1;
4837   }
4838   return 0;
4839 }
4840 
4841 static int nonSeekAvailable(int, long *);
4842 static int stdinAvailable(int, long *);
4843 
4844 // This code is a copy of JDK's sysAvailable
4845 // from src/windows/hpi/src/sys_api_md.c
4846 
4847 int os::available(int fd, jlong *bytes) {
4848   jlong cur, end;
4849   struct _stati64 stbuf64;
4850 
4851   if (::_fstati64(fd, &stbuf64) >= 0) {
4852     int mode = stbuf64.st_mode;
4853     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4854       int ret;
4855       long lpbytes;
4856       if (fd == 0) {
4857         ret = stdinAvailable(fd, &lpbytes);
4858       } else {
4859         ret = nonSeekAvailable(fd, &lpbytes);
4860       }
4861       (*bytes) = (jlong)(lpbytes);
4862       return ret;
4863     }
4864     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4865       return FALSE;
4866     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4867       return FALSE;
4868     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4869       return FALSE;
4870     }
4871     *bytes = end - cur;
4872     return TRUE;
4873   } else {
4874     return FALSE;
4875   }
4876 }
4877 
4878 void os::flockfile(FILE* fp) {
4879   _lock_file(fp);
4880 }
4881 
4882 void os::funlockfile(FILE* fp) {
4883   _unlock_file(fp);
4884 }
4885 
4886 // This code is a copy of JDK's nonSeekAvailable
4887 // from src/windows/hpi/src/sys_api_md.c
4888 
4889 static int nonSeekAvailable(int fd, long *pbytes) {
4890   // This is used for available on non-seekable devices
4891   // (like both named and anonymous pipes, such as pipes
4892   //  connected to an exec'd process).
4893   // Standard Input is a special case.
4894   HANDLE han;
4895 
4896   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4897     return FALSE;
4898   }
4899 
4900   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4901     // PeekNamedPipe fails when at EOF.  In that case we
4902     // simply make *pbytes = 0 which is consistent with the
4903     // behavior we get on Solaris when an fd is at EOF.
4904     // The only alternative is to raise an Exception,
4905     // which isn't really warranted.
4906     //
4907     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4908       return FALSE;
4909     }
4910     *pbytes = 0;
4911   }
4912   return TRUE;
4913 }
4914 
4915 #define MAX_INPUT_EVENTS 2000
4916 
4917 // This code is a copy of JDK's stdinAvailable
4918 // from src/windows/hpi/src/sys_api_md.c
4919 
4920 static int stdinAvailable(int fd, long *pbytes) {
4921   HANDLE han;
4922   DWORD numEventsRead = 0;  // Number of events read from buffer
4923   DWORD numEvents = 0;      // Number of events in buffer
4924   DWORD i = 0;              // Loop index
4925   DWORD curLength = 0;      // Position marker
4926   DWORD actualLength = 0;   // Number of bytes readable
4927   BOOL error = FALSE;       // Error holder
4928   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4929 
4930   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4931     return FALSE;
4932   }
4933 
4934   // Construct an array of input records in the console buffer
4935   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4936   if (error == 0) {
4937     return nonSeekAvailable(fd, pbytes);
4938   }
4939 
4940   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4941   if (numEvents > MAX_INPUT_EVENTS) {
4942     numEvents = MAX_INPUT_EVENTS;
4943   }
4944 
4945   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4946   if (lpBuffer == NULL) {
4947     return FALSE;
4948   }
4949 
4950   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4951   if (error == 0) {
4952     os::free(lpBuffer);
4953     return FALSE;
4954   }
4955 
4956   // Examine input records for the number of bytes available
4957   for (i=0; i<numEvents; i++) {
4958     if (lpBuffer[i].EventType == KEY_EVENT) {
4959 
4960       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4961                                       &(lpBuffer[i].Event);
4962       if (keyRecord->bKeyDown == TRUE) {
4963         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4964         curLength++;
4965         if (*keyPressed == '\r') {
4966           actualLength = curLength;
4967         }
4968       }
4969     }
4970   }
4971 
4972   if (lpBuffer != NULL) {
4973     os::free(lpBuffer);
4974   }
4975 
4976   *pbytes = (long) actualLength;
4977   return TRUE;
4978 }
4979 
4980 // Map a block of memory.
4981 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4982                         char *addr, size_t bytes, bool read_only,
4983                         bool allow_exec) {
4984   HANDLE hFile;
4985   char* base;
4986 
4987   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4988                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4989   if (hFile == NULL) {
4990     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4991     return NULL;
4992   }
4993 
4994   if (allow_exec) {
4995     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4996     // unless it comes from a PE image (which the shared archive is not.)
4997     // Even VirtualProtect refuses to give execute access to mapped memory
4998     // that was not previously executable.
4999     //
5000     // Instead, stick the executable region in anonymous memory.  Yuck.
5001     // Penalty is that ~4 pages will not be shareable - in the future
5002     // we might consider DLLizing the shared archive with a proper PE
5003     // header so that mapping executable + sharing is possible.
5004 
5005     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
5006                                 PAGE_READWRITE);
5007     if (base == NULL) {
5008       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
5009       CloseHandle(hFile);
5010       return NULL;
5011     }
5012 
5013     // Record virtual memory allocation
5014     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
5015 
5016     DWORD bytes_read;
5017     OVERLAPPED overlapped;
5018     overlapped.Offset = (DWORD)file_offset;
5019     overlapped.OffsetHigh = 0;
5020     overlapped.hEvent = NULL;
5021     // ReadFile guarantees that if the return value is true, the requested
5022     // number of bytes were read before returning.
5023     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
5024     if (!res) {
5025       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
5026       release_memory(base, bytes);
5027       CloseHandle(hFile);
5028       return NULL;
5029     }
5030   } else {
5031     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
5032                                     NULL /* file_name */);
5033     if (hMap == NULL) {
5034       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
5035       CloseHandle(hFile);
5036       return NULL;
5037     }
5038 
5039     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
5040     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
5041                                   (DWORD)bytes, addr);
5042     if (base == NULL) {
5043       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
5044       CloseHandle(hMap);
5045       CloseHandle(hFile);
5046       return NULL;
5047     }
5048 
5049     if (CloseHandle(hMap) == 0) {
5050       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
5051       CloseHandle(hFile);
5052       return base;
5053     }
5054   }
5055 
5056   if (allow_exec) {
5057     DWORD old_protect;
5058     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
5059     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
5060 
5061     if (!res) {
5062       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
5063       // Don't consider this a hard error, on IA32 even if the
5064       // VirtualProtect fails, we should still be able to execute
5065       CloseHandle(hFile);
5066       return base;
5067     }
5068   }
5069 
5070   if (CloseHandle(hFile) == 0) {
5071     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
5072     return base;
5073   }
5074 
5075   return base;
5076 }
5077 
5078 
5079 // Remap a block of memory.
5080 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5081                           char *addr, size_t bytes, bool read_only,
5082                           bool allow_exec) {
5083   // This OS does not allow existing memory maps to be remapped so we
5084   // would have to unmap the memory before we remap it.
5085 
5086   // Because there is a small window between unmapping memory and mapping
5087   // it in again with different protections, CDS archives are mapped RW
5088   // on windows, so this function isn't called.
5089   ShouldNotReachHere();
5090   return NULL;
5091 }
5092 
5093 
5094 // Unmap a block of memory.
5095 // Returns true=success, otherwise false.
5096 
5097 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5098   MEMORY_BASIC_INFORMATION mem_info;
5099   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5100     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5101     return false;
5102   }
5103 
5104   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5105   // Instead, executable region was allocated using VirtualAlloc(). See
5106   // pd_map_memory() above.
5107   //
5108   // The following flags should match the 'exec_access' flages used for
5109   // VirtualProtect() in pd_map_memory().
5110   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5111       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5112     return pd_release_memory(addr, bytes);
5113   }
5114 
5115   BOOL result = UnmapViewOfFile(addr);
5116   if (result == 0) {
5117     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
5118     return false;
5119   }
5120   return true;
5121 }
5122 
5123 void os::pause() {
5124   char filename[MAX_PATH];
5125   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5126     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5127   } else {
5128     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5129   }
5130 
5131   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5132   if (fd != -1) {
5133     struct stat buf;
5134     ::close(fd);
5135     while (::stat(filename, &buf) == 0) {
5136       Sleep(100);
5137     }
5138   } else {
5139     jio_fprintf(stderr,
5140                 "Could not open pause file '%s', continuing immediately.\n", filename);
5141   }
5142 }
5143 
5144 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5145 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5146 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5147 
5148 os::ThreadCrashProtection::ThreadCrashProtection() {
5149 }
5150 
5151 // See the caveats for this class in os_windows.hpp
5152 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5153 // into this method and returns false. If no OS EXCEPTION was raised, returns
5154 // true.
5155 // The callback is supposed to provide the method that should be protected.
5156 //
5157 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5158 
5159   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5160 
5161   _protected_thread = Thread::current_or_null();
5162   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5163 
5164   bool success = true;
5165   __try {
5166     _crash_protection = this;
5167     cb.call();
5168   } __except(EXCEPTION_EXECUTE_HANDLER) {
5169     // only for protection, nothing to do
5170     success = false;
5171   }
5172   _crash_protection = NULL;
5173   _protected_thread = NULL;
5174   Thread::muxRelease(&_crash_mux);
5175   return success;
5176 }
5177 
5178 
5179 class HighResolutionInterval : public CHeapObj<mtThread> {
5180   // The default timer resolution seems to be 10 milliseconds.
5181   // (Where is this written down?)
5182   // If someone wants to sleep for only a fraction of the default,
5183   // then we set the timer resolution down to 1 millisecond for
5184   // the duration of their interval.
5185   // We carefully set the resolution back, since otherwise we
5186   // seem to incur an overhead (3%?) that we don't need.
5187   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5188   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5189   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5190   // timeBeginPeriod() if the relative error exceeded some threshold.
5191   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5192   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5193   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5194   // resolution timers running.
5195  private:
5196   jlong resolution;
5197  public:
5198   HighResolutionInterval(jlong ms) {
5199     resolution = ms % 10L;
5200     if (resolution != 0) {
5201       MMRESULT result = timeBeginPeriod(1L);
5202     }
5203   }
5204   ~HighResolutionInterval() {
5205     if (resolution != 0) {
5206       MMRESULT result = timeEndPeriod(1L);
5207     }
5208     resolution = 0L;
5209   }
5210 };
5211 
5212 // An Event wraps a win32 "CreateEvent" kernel handle.
5213 //
5214 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5215 //
5216 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5217 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5218 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5219 //     In addition, an unpark() operation might fetch the handle field, but the
5220 //     event could recycle between the fetch and the SetEvent() operation.
5221 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5222 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5223 //     on an stale but recycled handle would be harmless, but in practice this might
5224 //     confuse other non-Sun code, so it's not a viable approach.
5225 //
5226 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5227 //     with the Event.  The event handle is never closed.  This could be construed
5228 //     as handle leakage, but only up to the maximum # of threads that have been extant
5229 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5230 //     permit a process to have hundreds of thousands of open handles.
5231 //
5232 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5233 //     and release unused handles.
5234 //
5235 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5236 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5237 //
5238 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5239 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5240 //
5241 // We use (2).
5242 //
5243 // TODO-FIXME:
5244 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5245 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5246 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5247 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5248 //     into a single win32 CreateEvent() handle.
5249 //
5250 // Assumption:
5251 //    Only one parker can exist on an event, which is why we allocate
5252 //    them per-thread. Multiple unparkers can coexist.
5253 //
5254 // _Event transitions in park()
5255 //   -1 => -1 : illegal
5256 //    1 =>  0 : pass - return immediately
5257 //    0 => -1 : block; then set _Event to 0 before returning
5258 //
5259 // _Event transitions in unpark()
5260 //    0 => 1 : just return
5261 //    1 => 1 : just return
5262 //   -1 => either 0 or 1; must signal target thread
5263 //         That is, we can safely transition _Event from -1 to either
5264 //         0 or 1.
5265 //
5266 // _Event serves as a restricted-range semaphore.
5267 //   -1 : thread is blocked, i.e. there is a waiter
5268 //    0 : neutral: thread is running or ready,
5269 //        could have been signaled after a wait started
5270 //    1 : signaled - thread is running or ready
5271 //
5272 // Another possible encoding of _Event would be with
5273 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5274 //
5275 
5276 int os::PlatformEvent::park(jlong Millis) {
5277   // Transitions for _Event:
5278   //   -1 => -1 : illegal
5279   //    1 =>  0 : pass - return immediately
5280   //    0 => -1 : block; then set _Event to 0 before returning
5281 
5282   guarantee(_ParkHandle != NULL , "Invariant");
5283   guarantee(Millis > 0          , "Invariant");
5284 
5285   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5286   // the initial park() operation.
5287   // Consider: use atomic decrement instead of CAS-loop
5288 
5289   int v;
5290   for (;;) {
5291     v = _Event;
5292     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5293   }
5294   guarantee((v == 0) || (v == 1), "invariant");
5295   if (v != 0) return OS_OK;
5296 
5297   // Do this the hard way by blocking ...
5298   // TODO: consider a brief spin here, gated on the success of recent
5299   // spin attempts by this thread.
5300   //
5301   // We decompose long timeouts into series of shorter timed waits.
5302   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5303   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5304   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5305   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5306   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5307   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5308   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5309   // for the already waited time.  This policy does not admit any new outcomes.
5310   // In the future, however, we might want to track the accumulated wait time and
5311   // adjust Millis accordingly if we encounter a spurious wakeup.
5312 
5313   const int MAXTIMEOUT = 0x10000000;
5314   DWORD rv = WAIT_TIMEOUT;
5315   while (_Event < 0 && Millis > 0) {
5316     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5317     if (Millis > MAXTIMEOUT) {
5318       prd = MAXTIMEOUT;
5319     }
5320     HighResolutionInterval *phri = NULL;
5321     if (!ForceTimeHighResolution) {
5322       phri = new HighResolutionInterval(prd);
5323     }
5324     rv = ::WaitForSingleObject(_ParkHandle, prd);
5325     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5326     if (rv == WAIT_TIMEOUT) {
5327       Millis -= prd;
5328     }
5329     delete phri; // if it is NULL, harmless
5330   }
5331   v = _Event;
5332   _Event = 0;
5333   // see comment at end of os::PlatformEvent::park() below:
5334   OrderAccess::fence();
5335   // If we encounter a nearly simultanous timeout expiry and unpark()
5336   // we return OS_OK indicating we awoke via unpark().
5337   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5338   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5339 }
5340 
5341 void os::PlatformEvent::park() {
5342   // Transitions for _Event:
5343   //   -1 => -1 : illegal
5344   //    1 =>  0 : pass - return immediately
5345   //    0 => -1 : block; then set _Event to 0 before returning
5346 
5347   guarantee(_ParkHandle != NULL, "Invariant");
5348   // Invariant: Only the thread associated with the Event/PlatformEvent
5349   // may call park().
5350   // Consider: use atomic decrement instead of CAS-loop
5351   int v;
5352   for (;;) {
5353     v = _Event;
5354     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5355   }
5356   guarantee((v == 0) || (v == 1), "invariant");
5357   if (v != 0) return;
5358 
5359   // Do this the hard way by blocking ...
5360   // TODO: consider a brief spin here, gated on the success of recent
5361   // spin attempts by this thread.
5362   while (_Event < 0) {
5363     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5364     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5365   }
5366 
5367   // Usually we'll find _Event == 0 at this point, but as
5368   // an optional optimization we clear it, just in case can
5369   // multiple unpark() operations drove _Event up to 1.
5370   _Event = 0;
5371   OrderAccess::fence();
5372   guarantee(_Event >= 0, "invariant");
5373 }
5374 
5375 void os::PlatformEvent::unpark() {
5376   guarantee(_ParkHandle != NULL, "Invariant");
5377 
5378   // Transitions for _Event:
5379   //    0 => 1 : just return
5380   //    1 => 1 : just return
5381   //   -1 => either 0 or 1; must signal target thread
5382   //         That is, we can safely transition _Event from -1 to either
5383   //         0 or 1.
5384   // See also: "Semaphores in Plan 9" by Mullender & Cox
5385   //
5386   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5387   // that it will take two back-to-back park() calls for the owning
5388   // thread to block. This has the benefit of forcing a spurious return
5389   // from the first park() call after an unpark() call which will help
5390   // shake out uses of park() and unpark() without condition variables.
5391 
5392   if (Atomic::xchg(&_Event, 1) >= 0) return;
5393 
5394   ::SetEvent(_ParkHandle);
5395 }
5396 
5397 
5398 // JSR166
5399 // -------------------------------------------------------
5400 
5401 // The Windows implementation of Park is very straightforward: Basic
5402 // operations on Win32 Events turn out to have the right semantics to
5403 // use them directly. We opportunistically resuse the event inherited
5404 // from Monitor.
5405 
5406 void Parker::park(bool isAbsolute, jlong time) {
5407   guarantee(_ParkEvent != NULL, "invariant");
5408   // First, demultiplex/decode time arguments
5409   if (time < 0) { // don't wait
5410     return;
5411   } else if (time == 0 && !isAbsolute) {
5412     time = INFINITE;
5413   } else if (isAbsolute) {
5414     time -= os::javaTimeMillis(); // convert to relative time
5415     if (time <= 0) {  // already elapsed
5416       return;
5417     }
5418   } else { // relative
5419     time /= 1000000;  // Must coarsen from nanos to millis
5420     if (time == 0) {  // Wait for the minimal time unit if zero
5421       time = 1;
5422     }
5423   }
5424 
5425   JavaThread* thread = JavaThread::current();
5426 
5427   // Don't wait if interrupted or already triggered
5428   if (thread->is_interrupted(false) ||
5429       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5430     ResetEvent(_ParkEvent);
5431     return;
5432   } else {
5433     ThreadBlockInVM tbivm(thread);
5434     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5435     thread->set_suspend_equivalent();
5436 
5437     WaitForSingleObject(_ParkEvent, time);
5438     ResetEvent(_ParkEvent);
5439 
5440     // If externally suspended while waiting, re-suspend
5441     if (thread->handle_special_suspend_equivalent_condition()) {
5442       thread->java_suspend_self();
5443     }
5444   }
5445 }
5446 
5447 void Parker::unpark() {
5448   guarantee(_ParkEvent != NULL, "invariant");
5449   SetEvent(_ParkEvent);
5450 }
5451 
5452 // Platform Monitor implementation
5453 
5454 // Must already be locked
5455 int os::PlatformMonitor::wait(jlong millis) {
5456   assert(millis >= 0, "negative timeout");
5457   int ret = OS_TIMEOUT;
5458   int status = SleepConditionVariableCS(&_cond, &_mutex,
5459                                         millis == 0 ? INFINITE : millis);
5460   if (status != 0) {
5461     ret = OS_OK;
5462   }
5463   #ifndef PRODUCT
5464   else {
5465     DWORD err = GetLastError();
5466     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5467   }
5468   #endif
5469   return ret;
5470 }
5471 
5472 // Run the specified command in a separate process. Return its exit value,
5473 // or -1 on failure (e.g. can't create a new process).
5474 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5475   STARTUPINFO si;
5476   PROCESS_INFORMATION pi;
5477   DWORD exit_code;
5478 
5479   char * cmd_string;
5480   const char * cmd_prefix = "cmd /C ";
5481   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5482   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5483   if (cmd_string == NULL) {
5484     return -1;
5485   }
5486   cmd_string[0] = '\0';
5487   strcat(cmd_string, cmd_prefix);
5488   strcat(cmd_string, cmd);
5489 
5490   // now replace all '\n' with '&'
5491   char * substring = cmd_string;
5492   while ((substring = strchr(substring, '\n')) != NULL) {
5493     substring[0] = '&';
5494     substring++;
5495   }
5496   memset(&si, 0, sizeof(si));
5497   si.cb = sizeof(si);
5498   memset(&pi, 0, sizeof(pi));
5499   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5500                             cmd_string,    // command line
5501                             NULL,   // process security attribute
5502                             NULL,   // thread security attribute
5503                             TRUE,   // inherits system handles
5504                             0,      // no creation flags
5505                             NULL,   // use parent's environment block
5506                             NULL,   // use parent's starting directory
5507                             &si,    // (in) startup information
5508                             &pi);   // (out) process information
5509 
5510   if (rslt) {
5511     // Wait until child process exits.
5512     WaitForSingleObject(pi.hProcess, INFINITE);
5513 
5514     GetExitCodeProcess(pi.hProcess, &exit_code);
5515 
5516     // Close process and thread handles.
5517     CloseHandle(pi.hProcess);
5518     CloseHandle(pi.hThread);
5519   } else {
5520     exit_code = -1;
5521   }
5522 
5523   FREE_C_HEAP_ARRAY(char, cmd_string);
5524   return (int)exit_code;
5525 }
5526 
5527 bool os::find(address addr, outputStream* st) {
5528   int offset = -1;
5529   bool result = false;
5530   char buf[256];
5531   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5532     st->print(PTR_FORMAT " ", addr);
5533     if (strlen(buf) < sizeof(buf) - 1) {
5534       char* p = strrchr(buf, '\\');
5535       if (p) {
5536         st->print("%s", p + 1);
5537       } else {
5538         st->print("%s", buf);
5539       }
5540     } else {
5541         // The library name is probably truncated. Let's omit the library name.
5542         // See also JDK-8147512.
5543     }
5544     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5545       st->print("::%s + 0x%x", buf, offset);
5546     }
5547     st->cr();
5548     result = true;
5549   }
5550   return result;
5551 }
5552 
5553 static jint initSock() {
5554   WSADATA wsadata;
5555 
5556   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5557     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5558                 ::GetLastError());
5559     return JNI_ERR;
5560   }
5561   return JNI_OK;
5562 }
5563 
5564 struct hostent* os::get_host_by_name(char* name) {
5565   return (struct hostent*)gethostbyname(name);
5566 }
5567 
5568 int os::socket_close(int fd) {
5569   return ::closesocket(fd);
5570 }
5571 
5572 int os::socket(int domain, int type, int protocol) {
5573   return ::socket(domain, type, protocol);
5574 }
5575 
5576 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5577   return ::connect(fd, him, len);
5578 }
5579 
5580 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5581   return ::recv(fd, buf, (int)nBytes, flags);
5582 }
5583 
5584 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5585   return ::send(fd, buf, (int)nBytes, flags);
5586 }
5587 
5588 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5589   return ::send(fd, buf, (int)nBytes, flags);
5590 }
5591 
5592 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5593 #if defined(IA32)
5594   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5595 #elif defined (AMD64)
5596   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5597 #endif
5598 
5599 // returns true if thread could be suspended,
5600 // false otherwise
5601 static bool do_suspend(HANDLE* h) {
5602   if (h != NULL) {
5603     if (SuspendThread(*h) != ~0) {
5604       return true;
5605     }
5606   }
5607   return false;
5608 }
5609 
5610 // resume the thread
5611 // calling resume on an active thread is a no-op
5612 static void do_resume(HANDLE* h) {
5613   if (h != NULL) {
5614     ResumeThread(*h);
5615   }
5616 }
5617 
5618 // retrieve a suspend/resume context capable handle
5619 // from the tid. Caller validates handle return value.
5620 void get_thread_handle_for_extended_context(HANDLE* h,
5621                                             OSThread::thread_id_t tid) {
5622   if (h != NULL) {
5623     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5624   }
5625 }
5626 
5627 // Thread sampling implementation
5628 //
5629 void os::SuspendedThreadTask::internal_do_task() {
5630   CONTEXT    ctxt;
5631   HANDLE     h = NULL;
5632 
5633   // get context capable handle for thread
5634   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5635 
5636   // sanity
5637   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5638     return;
5639   }
5640 
5641   // suspend the thread
5642   if (do_suspend(&h)) {
5643     ctxt.ContextFlags = sampling_context_flags;
5644     // get thread context
5645     GetThreadContext(h, &ctxt);
5646     SuspendedThreadTaskContext context(_thread, &ctxt);
5647     // pass context to Thread Sampling impl
5648     do_task(context);
5649     // resume thread
5650     do_resume(&h);
5651   }
5652 
5653   // close handle
5654   CloseHandle(h);
5655 }
5656 
5657 bool os::start_debugging(char *buf, int buflen) {
5658   int len = (int)strlen(buf);
5659   char *p = &buf[len];
5660 
5661   jio_snprintf(p, buflen-len,
5662              "\n\n"
5663              "Do you want to debug the problem?\n\n"
5664              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5665              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5666              "Otherwise, select 'No' to abort...",
5667              os::current_process_id(), os::current_thread_id());
5668 
5669   bool yes = os::message_box("Unexpected Error", buf);
5670 
5671   if (yes) {
5672     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5673     // exception. If VM is running inside a debugger, the debugger will
5674     // catch the exception. Otherwise, the breakpoint exception will reach
5675     // the default windows exception handler, which can spawn a debugger and
5676     // automatically attach to the dying VM.
5677     os::breakpoint();
5678     yes = false;
5679   }
5680   return yes;
5681 }
5682 
5683 void* os::get_default_process_handle() {
5684   return (void*)GetModuleHandle(NULL);
5685 }
5686 
5687 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5688 // which is used to find statically linked in agents.
5689 // Additionally for windows, takes into account __stdcall names.
5690 // Parameters:
5691 //            sym_name: Symbol in library we are looking for
5692 //            lib_name: Name of library to look in, NULL for shared libs.
5693 //            is_absolute_path == true if lib_name is absolute path to agent
5694 //                                     such as "C:/a/b/L.dll"
5695 //            == false if only the base name of the library is passed in
5696 //               such as "L"
5697 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5698                                     bool is_absolute_path) {
5699   char *agent_entry_name;
5700   size_t len;
5701   size_t name_len;
5702   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5703   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5704   const char *start;
5705 
5706   if (lib_name != NULL) {
5707     len = name_len = strlen(lib_name);
5708     if (is_absolute_path) {
5709       // Need to strip path, prefix and suffix
5710       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5711         lib_name = ++start;
5712       } else {
5713         // Need to check for drive prefix
5714         if ((start = strchr(lib_name, ':')) != NULL) {
5715           lib_name = ++start;
5716         }
5717       }
5718       if (len <= (prefix_len + suffix_len)) {
5719         return NULL;
5720       }
5721       lib_name += prefix_len;
5722       name_len = strlen(lib_name) - suffix_len;
5723     }
5724   }
5725   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5726   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5727   if (agent_entry_name == NULL) {
5728     return NULL;
5729   }
5730   if (lib_name != NULL) {
5731     const char *p = strrchr(sym_name, '@');
5732     if (p != NULL && p != sym_name) {
5733       // sym_name == _Agent_OnLoad@XX
5734       strncpy(agent_entry_name, sym_name, (p - sym_name));
5735       agent_entry_name[(p-sym_name)] = '\0';
5736       // agent_entry_name == _Agent_OnLoad
5737       strcat(agent_entry_name, "_");
5738       strncat(agent_entry_name, lib_name, name_len);
5739       strcat(agent_entry_name, p);
5740       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5741     } else {
5742       strcpy(agent_entry_name, sym_name);
5743       strcat(agent_entry_name, "_");
5744       strncat(agent_entry_name, lib_name, name_len);
5745     }
5746   } else {
5747     strcpy(agent_entry_name, sym_name);
5748   }
5749   return agent_entry_name;
5750 }
5751 
5752 #ifndef PRODUCT
5753 
5754 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5755 // contiguous memory block at a particular address.
5756 // The test first tries to find a good approximate address to allocate at by using the same
5757 // method to allocate some memory at any address. The test then tries to allocate memory in
5758 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5759 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5760 // the previously allocated memory is available for allocation. The only actual failure
5761 // that is reported is when the test tries to allocate at a particular location but gets a
5762 // different valid one. A NULL return value at this point is not considered an error but may
5763 // be legitimate.
5764 void TestReserveMemorySpecial_test() {
5765   if (!UseLargePages) {
5766     return;
5767   }
5768   // save current value of globals
5769   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5770   bool old_use_numa_interleaving = UseNUMAInterleaving;
5771 
5772   // set globals to make sure we hit the correct code path
5773   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5774 
5775   // do an allocation at an address selected by the OS to get a good one.
5776   const size_t large_allocation_size = os::large_page_size() * 4;
5777   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5778   if (result == NULL) {
5779   } else {
5780     os::release_memory_special(result, large_allocation_size);
5781 
5782     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5783     // we managed to get it once.
5784     const size_t expected_allocation_size = os::large_page_size();
5785     char* expected_location = result + os::large_page_size();
5786     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5787     if (actual_location == NULL) {
5788     } else {
5789       // release memory
5790       os::release_memory_special(actual_location, expected_allocation_size);
5791       // only now check, after releasing any memory to avoid any leaks.
5792       assert(actual_location == expected_location,
5793              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5794              expected_location, expected_allocation_size, actual_location);
5795     }
5796   }
5797 
5798   // restore globals
5799   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5800   UseNUMAInterleaving = old_use_numa_interleaving;
5801 }
5802 #endif // PRODUCT
5803 
5804 /*
5805   All the defined signal names for Windows.
5806 
5807   NOTE that not all of these names are accepted by FindSignal!
5808 
5809   For various reasons some of these may be rejected at runtime.
5810 
5811   Here are the names currently accepted by a user of sun.misc.Signal with
5812   1.4.1 (ignoring potential interaction with use of chaining, etc):
5813 
5814      (LIST TBD)
5815 
5816 */
5817 int os::get_signal_number(const char* name) {
5818   static const struct {
5819     const char* name;
5820     int         number;
5821   } siglabels [] =
5822     // derived from version 6.0 VC98/include/signal.h
5823   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5824   "FPE",        SIGFPE,         // floating point exception
5825   "SEGV",       SIGSEGV,        // segment violation
5826   "INT",        SIGINT,         // interrupt
5827   "TERM",       SIGTERM,        // software term signal from kill
5828   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5829   "ILL",        SIGILL};        // illegal instruction
5830   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5831     if (strcmp(name, siglabels[i].name) == 0) {
5832       return siglabels[i].number;
5833     }
5834   }
5835   return -1;
5836 }
5837 
5838 // Fast current thread access
5839 
5840 int os::win32::_thread_ptr_offset = 0;
5841 
5842 static void call_wrapper_dummy() {}
5843 
5844 // We need to call the os_exception_wrapper once so that it sets
5845 // up the offset from FS of the thread pointer.
5846 void os::win32::initialize_thread_ptr_offset() {
5847   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5848                            NULL, methodHandle(), NULL, NULL);
5849 }
5850 
5851 bool os::supports_map_sync() {
5852   return false;
5853 }