1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "classfile/classLoader.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/disassembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "jvm_windows.h"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/filemap.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "os_share_windows.hpp"
  43 #include "os_windows.inline.hpp"
  44 #include "prims/jniFastGetField.hpp"
  45 #include "prims/jvm.h"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.inline.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.inline.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "semaphore_windows.hpp"
  67 #include "services/attachListener.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/growableArray.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 
  77 #ifdef _DEBUG
  78 #include <crtdbg.h>
  79 #endif
  80 
  81 
  82 #include <windows.h>
  83 #include <sys/types.h>
  84 #include <sys/stat.h>
  85 #include <sys/timeb.h>
  86 #include <objidl.h>
  87 #include <shlobj.h>
  88 
  89 #include <malloc.h>
  90 #include <signal.h>
  91 #include <direct.h>
  92 #include <errno.h>
  93 #include <fcntl.h>
  94 #include <io.h>
  95 #include <process.h>              // For _beginthreadex(), _endthreadex()
  96 #include <imagehlp.h>             // For os::dll_address_to_function_name
  97 // for enumerating dll libraries
  98 #include <vdmdbg.h>
  99 
 100 // for timer info max values which include all bits
 101 #define ALL_64_BITS CONST64(-1)
 102 
 103 // For DLL loading/load error detection
 104 // Values of PE COFF
 105 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 106 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 107 
 108 static HANDLE main_process;
 109 static HANDLE main_thread;
 110 static int    main_thread_id;
 111 
 112 static FILETIME process_creation_time;
 113 static FILETIME process_exit_time;
 114 static FILETIME process_user_time;
 115 static FILETIME process_kernel_time;
 116 
 117 #ifdef _M_IA64
 118   #define __CPU__ ia64
 119 #else
 120   #ifdef _M_AMD64
 121     #define __CPU__ amd64
 122   #else
 123     #define __CPU__ i486
 124   #endif
 125 #endif
 126 
 127 // save DLL module handle, used by GetModuleFileName
 128 
 129 HINSTANCE vm_lib_handle;
 130 
 131 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 132   switch (reason) {
 133   case DLL_PROCESS_ATTACH:
 134     vm_lib_handle = hinst;
 135     if (ForceTimeHighResolution) {
 136       timeBeginPeriod(1L);
 137     }
 138     break;
 139   case DLL_PROCESS_DETACH:
 140     if (ForceTimeHighResolution) {
 141       timeEndPeriod(1L);
 142     }
 143     break;
 144   default:
 145     break;
 146   }
 147   return true;
 148 }
 149 
 150 static inline double fileTimeAsDouble(FILETIME* time) {
 151   const double high  = (double) ((unsigned int) ~0);
 152   const double split = 10000000.0;
 153   double result = (time->dwLowDateTime / split) +
 154                    time->dwHighDateTime * (high/split);
 155   return result;
 156 }
 157 
 158 // Implementation of os
 159 
 160 bool os::unsetenv(const char* name) {
 161   assert(name != NULL, "Null pointer");
 162   return (SetEnvironmentVariable(name, NULL) == TRUE);
 163 }
 164 
 165 // No setuid programs under Windows.
 166 bool os::have_special_privileges() {
 167   return false;
 168 }
 169 
 170 
 171 // This method is  a periodic task to check for misbehaving JNI applications
 172 // under CheckJNI, we can add any periodic checks here.
 173 // For Windows at the moment does nothing
 174 void os::run_periodic_checks() {
 175   return;
 176 }
 177 
 178 // previous UnhandledExceptionFilter, if there is one
 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 180 
 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 182 
 183 void os::init_system_properties_values() {
 184   // sysclasspath, java_home, dll_dir
 185   {
 186     char *home_path;
 187     char *dll_path;
 188     char *pslash;
 189     char *bin = "\\bin";
 190     char home_dir[MAX_PATH + 1];
 191     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 192 
 193     if (alt_home_dir != NULL)  {
 194       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 195       home_dir[MAX_PATH] = '\0';
 196     } else {
 197       os::jvm_path(home_dir, sizeof(home_dir));
 198       // Found the full path to jvm.dll.
 199       // Now cut the path to <java_home>/jre if we can.
 200       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 201       pslash = strrchr(home_dir, '\\');
 202       if (pslash != NULL) {
 203         *pslash = '\0';                   // get rid of \{client|server}
 204         pslash = strrchr(home_dir, '\\');
 205         if (pslash != NULL) {
 206           *pslash = '\0';                 // get rid of \bin
 207         }
 208       }
 209     }
 210 
 211     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 212     if (home_path == NULL) {
 213       return;
 214     }
 215     strcpy(home_path, home_dir);
 216     Arguments::set_java_home(home_path);
 217     FREE_C_HEAP_ARRAY(char, home_path);
 218 
 219     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 220                                 mtInternal);
 221     if (dll_path == NULL) {
 222       return;
 223     }
 224     strcpy(dll_path, home_dir);
 225     strcat(dll_path, bin);
 226     Arguments::set_dll_dir(dll_path);
 227     FREE_C_HEAP_ARRAY(char, dll_path);
 228 
 229     if (!set_boot_path('\\', ';')) {
 230       return;
 231     }
 232   }
 233 
 234 // library_path
 235 #define EXT_DIR "\\lib\\ext"
 236 #define BIN_DIR "\\bin"
 237 #define PACKAGE_DIR "\\Sun\\Java"
 238   {
 239     // Win32 library search order (See the documentation for LoadLibrary):
 240     //
 241     // 1. The directory from which application is loaded.
 242     // 2. The system wide Java Extensions directory (Java only)
 243     // 3. System directory (GetSystemDirectory)
 244     // 4. Windows directory (GetWindowsDirectory)
 245     // 5. The PATH environment variable
 246     // 6. The current directory
 247 
 248     char *library_path;
 249     char tmp[MAX_PATH];
 250     char *path_str = ::getenv("PATH");
 251 
 252     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 253                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 254 
 255     library_path[0] = '\0';
 256 
 257     GetModuleFileName(NULL, tmp, sizeof(tmp));
 258     *(strrchr(tmp, '\\')) = '\0';
 259     strcat(library_path, tmp);
 260 
 261     GetWindowsDirectory(tmp, sizeof(tmp));
 262     strcat(library_path, ";");
 263     strcat(library_path, tmp);
 264     strcat(library_path, PACKAGE_DIR BIN_DIR);
 265 
 266     GetSystemDirectory(tmp, sizeof(tmp));
 267     strcat(library_path, ";");
 268     strcat(library_path, tmp);
 269 
 270     GetWindowsDirectory(tmp, sizeof(tmp));
 271     strcat(library_path, ";");
 272     strcat(library_path, tmp);
 273 
 274     if (path_str) {
 275       strcat(library_path, ";");
 276       strcat(library_path, path_str);
 277     }
 278 
 279     strcat(library_path, ";.");
 280 
 281     Arguments::set_library_path(library_path);
 282     FREE_C_HEAP_ARRAY(char, library_path);
 283   }
 284 
 285   // Default extensions directory
 286   {
 287     char path[MAX_PATH];
 288     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 289     GetWindowsDirectory(path, MAX_PATH);
 290     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 291             path, PACKAGE_DIR, EXT_DIR);
 292     Arguments::set_ext_dirs(buf);
 293   }
 294   #undef EXT_DIR
 295   #undef BIN_DIR
 296   #undef PACKAGE_DIR
 297 
 298 #ifndef _WIN64
 299   // set our UnhandledExceptionFilter and save any previous one
 300   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 301 #endif
 302 
 303   // Done
 304   return;
 305 }
 306 
 307 void os::breakpoint() {
 308   DebugBreak();
 309 }
 310 
 311 // Invoked from the BREAKPOINT Macro
 312 extern "C" void breakpoint() {
 313   os::breakpoint();
 314 }
 315 
 316 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 317 // So far, this method is only used by Native Memory Tracking, which is
 318 // only supported on Windows XP or later.
 319 //
 320 int os::get_native_stack(address* stack, int frames, int toSkip) {
 321   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 322   for (int index = captured; index < frames; index ++) {
 323     stack[index] = NULL;
 324   }
 325   return captured;
 326 }
 327 
 328 
 329 // os::current_stack_base()
 330 //
 331 //   Returns the base of the stack, which is the stack's
 332 //   starting address.  This function must be called
 333 //   while running on the stack of the thread being queried.
 334 
 335 address os::current_stack_base() {
 336   MEMORY_BASIC_INFORMATION minfo;
 337   address stack_bottom;
 338   size_t stack_size;
 339 
 340   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 341   stack_bottom =  (address)minfo.AllocationBase;
 342   stack_size = minfo.RegionSize;
 343 
 344   // Add up the sizes of all the regions with the same
 345   // AllocationBase.
 346   while (1) {
 347     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 348     if (stack_bottom == (address)minfo.AllocationBase) {
 349       stack_size += minfo.RegionSize;
 350     } else {
 351       break;
 352     }
 353   }
 354 
 355 #ifdef _M_IA64
 356   // IA64 has memory and register stacks
 357   //
 358   // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
 359   // at thread creation (1MB backing store growing upwards, 1MB memory stack
 360   // growing downwards, 2MB summed up)
 361   //
 362   // ...
 363   // ------- top of stack (high address) -----
 364   // |
 365   // |      1MB
 366   // |      Backing Store (Register Stack)
 367   // |
 368   // |         / \
 369   // |          |
 370   // |          |
 371   // |          |
 372   // ------------------------ stack base -----
 373   // |      1MB
 374   // |      Memory Stack
 375   // |
 376   // |          |
 377   // |          |
 378   // |          |
 379   // |         \ /
 380   // |
 381   // ----- bottom of stack (low address) -----
 382   // ...
 383 
 384   stack_size = stack_size / 2;
 385 #endif
 386   return stack_bottom + stack_size;
 387 }
 388 
 389 size_t os::current_stack_size() {
 390   size_t sz;
 391   MEMORY_BASIC_INFORMATION minfo;
 392   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 393   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 394   return sz;
 395 }
 396 
 397 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 398   const struct tm* time_struct_ptr = localtime(clock);
 399   if (time_struct_ptr != NULL) {
 400     *res = *time_struct_ptr;
 401     return res;
 402   }
 403   return NULL;
 404 }
 405 
 406 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 407 
 408 // Thread start routine for all newly created threads
 409 static unsigned __stdcall thread_native_entry(Thread* thread) {
 410   // Try to randomize the cache line index of hot stack frames.
 411   // This helps when threads of the same stack traces evict each other's
 412   // cache lines. The threads can be either from the same JVM instance, or
 413   // from different JVM instances. The benefit is especially true for
 414   // processors with hyperthreading technology.
 415   static int counter = 0;
 416   int pid = os::current_process_id();
 417   _alloca(((pid ^ counter++) & 7) * 128);
 418 
 419   thread->initialize_thread_current();
 420 
 421   OSThread* osthr = thread->osthread();
 422   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 423 
 424   if (UseNUMA) {
 425     int lgrp_id = os::numa_get_group_id();
 426     if (lgrp_id != -1) {
 427       thread->set_lgrp_id(lgrp_id);
 428     }
 429   }
 430 
 431   // Diagnostic code to investigate JDK-6573254
 432   int res = 30115;  // non-java thread
 433   if (thread->is_Java_thread()) {
 434     res = 20115;    // java thread
 435   }
 436 
 437   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 438 
 439   // Install a win32 structured exception handler around every thread created
 440   // by VM, so VM can generate error dump when an exception occurred in non-
 441   // Java thread (e.g. VM thread).
 442   __try {
 443     thread->run();
 444   } __except(topLevelExceptionFilter(
 445                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 446     // Nothing to do.
 447   }
 448 
 449   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 450 
 451   // One less thread is executing
 452   // When the VMThread gets here, the main thread may have already exited
 453   // which frees the CodeHeap containing the Atomic::add code
 454   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 455     Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
 456   }
 457 
 458   // If a thread has not deleted itself ("delete this") as part of its
 459   // termination sequence, we have to ensure thread-local-storage is
 460   // cleared before we actually terminate. No threads should ever be
 461   // deleted asynchronously with respect to their termination.
 462   if (Thread::current_or_null_safe() != NULL) {
 463     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 464     thread->clear_thread_current();
 465   }
 466 
 467   // Thread must not return from exit_process_or_thread(), but if it does,
 468   // let it proceed to exit normally
 469   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 470 }
 471 
 472 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 473                                   int thread_id) {
 474   // Allocate the OSThread object
 475   OSThread* osthread = new OSThread(NULL, NULL);
 476   if (osthread == NULL) return NULL;
 477 
 478   // Initialize support for Java interrupts
 479   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 480   if (interrupt_event == NULL) {
 481     delete osthread;
 482     return NULL;
 483   }
 484   osthread->set_interrupt_event(interrupt_event);
 485 
 486   // Store info on the Win32 thread into the OSThread
 487   osthread->set_thread_handle(thread_handle);
 488   osthread->set_thread_id(thread_id);
 489 
 490   if (UseNUMA) {
 491     int lgrp_id = os::numa_get_group_id();
 492     if (lgrp_id != -1) {
 493       thread->set_lgrp_id(lgrp_id);
 494     }
 495   }
 496 
 497   // Initial thread state is INITIALIZED, not SUSPENDED
 498   osthread->set_state(INITIALIZED);
 499 
 500   return osthread;
 501 }
 502 
 503 
 504 bool os::create_attached_thread(JavaThread* thread) {
 505 #ifdef ASSERT
 506   thread->verify_not_published();
 507 #endif
 508   HANDLE thread_h;
 509   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 510                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 511     fatal("DuplicateHandle failed\n");
 512   }
 513   OSThread* osthread = create_os_thread(thread, thread_h,
 514                                         (int)current_thread_id());
 515   if (osthread == NULL) {
 516     return false;
 517   }
 518 
 519   // Initial thread state is RUNNABLE
 520   osthread->set_state(RUNNABLE);
 521 
 522   thread->set_osthread(osthread);
 523 
 524   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 525     os::current_thread_id());
 526 
 527   return true;
 528 }
 529 
 530 bool os::create_main_thread(JavaThread* thread) {
 531 #ifdef ASSERT
 532   thread->verify_not_published();
 533 #endif
 534   if (_starting_thread == NULL) {
 535     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 536     if (_starting_thread == NULL) {
 537       return false;
 538     }
 539   }
 540 
 541   // The primordial thread is runnable from the start)
 542   _starting_thread->set_state(RUNNABLE);
 543 
 544   thread->set_osthread(_starting_thread);
 545   return true;
 546 }
 547 
 548 // Helper function to trace _beginthreadex attributes,
 549 //  similar to os::Posix::describe_pthread_attr()
 550 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 551                                                size_t stacksize, unsigned initflag) {
 552   stringStream ss(buf, buflen);
 553   if (stacksize == 0) {
 554     ss.print("stacksize: default, ");
 555   } else {
 556     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 557   }
 558   ss.print("flags: ");
 559   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 560   #define ALL(X) \
 561     X(CREATE_SUSPENDED) \
 562     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 563   ALL(PRINT_FLAG)
 564   #undef ALL
 565   #undef PRINT_FLAG
 566   return buf;
 567 }
 568 
 569 // Allocate and initialize a new OSThread
 570 bool os::create_thread(Thread* thread, ThreadType thr_type,
 571                        size_t stack_size) {
 572   unsigned thread_id;
 573 
 574   // Allocate the OSThread object
 575   OSThread* osthread = new OSThread(NULL, NULL);
 576   if (osthread == NULL) {
 577     return false;
 578   }
 579 
 580   // Initialize support for Java interrupts
 581   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 582   if (interrupt_event == NULL) {
 583     delete osthread;
 584     return NULL;
 585   }
 586   osthread->set_interrupt_event(interrupt_event);
 587   osthread->set_interrupted(false);
 588 
 589   thread->set_osthread(osthread);
 590 
 591   if (stack_size == 0) {
 592     switch (thr_type) {
 593     case os::java_thread:
 594       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 595       if (JavaThread::stack_size_at_create() > 0) {
 596         stack_size = JavaThread::stack_size_at_create();
 597       }
 598       break;
 599     case os::compiler_thread:
 600       if (CompilerThreadStackSize > 0) {
 601         stack_size = (size_t)(CompilerThreadStackSize * K);
 602         break;
 603       } // else fall through:
 604         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 605     case os::vm_thread:
 606     case os::pgc_thread:
 607     case os::cgc_thread:
 608     case os::watcher_thread:
 609       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 610       break;
 611     }
 612   }
 613 
 614   // Create the Win32 thread
 615   //
 616   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 617   // does not specify stack size. Instead, it specifies the size of
 618   // initially committed space. The stack size is determined by
 619   // PE header in the executable. If the committed "stack_size" is larger
 620   // than default value in the PE header, the stack is rounded up to the
 621   // nearest multiple of 1MB. For example if the launcher has default
 622   // stack size of 320k, specifying any size less than 320k does not
 623   // affect the actual stack size at all, it only affects the initial
 624   // commitment. On the other hand, specifying 'stack_size' larger than
 625   // default value may cause significant increase in memory usage, because
 626   // not only the stack space will be rounded up to MB, but also the
 627   // entire space is committed upfront.
 628   //
 629   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 630   // for CreateThread() that can treat 'stack_size' as stack size. However we
 631   // are not supposed to call CreateThread() directly according to MSDN
 632   // document because JVM uses C runtime library. The good news is that the
 633   // flag appears to work with _beginthredex() as well.
 634 
 635   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 636   HANDLE thread_handle =
 637     (HANDLE)_beginthreadex(NULL,
 638                            (unsigned)stack_size,
 639                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 640                            thread,
 641                            initflag,
 642                            &thread_id);
 643 
 644   char buf[64];
 645   if (thread_handle != NULL) {
 646     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 647       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 648   } else {
 649     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 650       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 651   }
 652 
 653   if (thread_handle == NULL) {
 654     // Need to clean up stuff we've allocated so far
 655     CloseHandle(osthread->interrupt_event());
 656     thread->set_osthread(NULL);
 657     delete osthread;
 658     return NULL;
 659   }
 660 
 661   Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
 662 
 663   // Store info on the Win32 thread into the OSThread
 664   osthread->set_thread_handle(thread_handle);
 665   osthread->set_thread_id(thread_id);
 666 
 667   // Initial thread state is INITIALIZED, not SUSPENDED
 668   osthread->set_state(INITIALIZED);
 669 
 670   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 671   return true;
 672 }
 673 
 674 
 675 // Free Win32 resources related to the OSThread
 676 void os::free_thread(OSThread* osthread) {
 677   assert(osthread != NULL, "osthread not set");
 678 
 679   // We are told to free resources of the argument thread,
 680   // but we can only really operate on the current thread.
 681   assert(Thread::current()->osthread() == osthread,
 682          "os::free_thread but not current thread");
 683 
 684   CloseHandle(osthread->thread_handle());
 685   CloseHandle(osthread->interrupt_event());
 686   delete osthread;
 687 }
 688 
 689 static jlong first_filetime;
 690 static jlong initial_performance_count;
 691 static jlong performance_frequency;
 692 
 693 
 694 jlong as_long(LARGE_INTEGER x) {
 695   jlong result = 0; // initialization to avoid warning
 696   set_high(&result, x.HighPart);
 697   set_low(&result, x.LowPart);
 698   return result;
 699 }
 700 
 701 
 702 jlong os::elapsed_counter() {
 703   LARGE_INTEGER count;
 704   QueryPerformanceCounter(&count);
 705   return as_long(count) - initial_performance_count;
 706 }
 707 
 708 
 709 jlong os::elapsed_frequency() {
 710   return performance_frequency;
 711 }
 712 
 713 
 714 julong os::available_memory() {
 715   return win32::available_memory();
 716 }
 717 
 718 julong os::win32::available_memory() {
 719   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 720   // value if total memory is larger than 4GB
 721   MEMORYSTATUSEX ms;
 722   ms.dwLength = sizeof(ms);
 723   GlobalMemoryStatusEx(&ms);
 724 
 725   return (julong)ms.ullAvailPhys;
 726 }
 727 
 728 julong os::physical_memory() {
 729   return win32::physical_memory();
 730 }
 731 
 732 bool os::has_allocatable_memory_limit(julong* limit) {
 733   MEMORYSTATUSEX ms;
 734   ms.dwLength = sizeof(ms);
 735   GlobalMemoryStatusEx(&ms);
 736 #ifdef _LP64
 737   *limit = (julong)ms.ullAvailVirtual;
 738   return true;
 739 #else
 740   // Limit to 1400m because of the 2gb address space wall
 741   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 742   return true;
 743 #endif
 744 }
 745 
 746 int os::active_processor_count() {
 747   DWORD_PTR lpProcessAffinityMask = 0;
 748   DWORD_PTR lpSystemAffinityMask = 0;
 749   int proc_count = processor_count();
 750   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 751       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 752     // Nof active processors is number of bits in process affinity mask
 753     int bitcount = 0;
 754     while (lpProcessAffinityMask != 0) {
 755       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 756       bitcount++;
 757     }
 758     return bitcount;
 759   } else {
 760     return proc_count;
 761   }
 762 }
 763 
 764 void os::set_native_thread_name(const char *name) {
 765 
 766   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 767   //
 768   // Note that unfortunately this only works if the process
 769   // is already attached to a debugger; debugger must observe
 770   // the exception below to show the correct name.
 771 
 772   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 773   struct {
 774     DWORD dwType;     // must be 0x1000
 775     LPCSTR szName;    // pointer to name (in user addr space)
 776     DWORD dwThreadID; // thread ID (-1=caller thread)
 777     DWORD dwFlags;    // reserved for future use, must be zero
 778   } info;
 779 
 780   info.dwType = 0x1000;
 781   info.szName = name;
 782   info.dwThreadID = -1;
 783   info.dwFlags = 0;
 784 
 785   __try {
 786     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 787   } __except(EXCEPTION_CONTINUE_EXECUTION) {}
 788 }
 789 
 790 bool os::distribute_processes(uint length, uint* distribution) {
 791   // Not yet implemented.
 792   return false;
 793 }
 794 
 795 bool os::bind_to_processor(uint processor_id) {
 796   // Not yet implemented.
 797   return false;
 798 }
 799 
 800 void os::win32::initialize_performance_counter() {
 801   LARGE_INTEGER count;
 802   QueryPerformanceFrequency(&count);
 803   performance_frequency = as_long(count);
 804   QueryPerformanceCounter(&count);
 805   initial_performance_count = as_long(count);
 806 }
 807 
 808 
 809 double os::elapsedTime() {
 810   return (double) elapsed_counter() / (double) elapsed_frequency();
 811 }
 812 
 813 
 814 // Windows format:
 815 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 816 // Java format:
 817 //   Java standards require the number of milliseconds since 1/1/1970
 818 
 819 // Constant offset - calculated using offset()
 820 static jlong  _offset   = 116444736000000000;
 821 // Fake time counter for reproducible results when debugging
 822 static jlong  fake_time = 0;
 823 
 824 #ifdef ASSERT
 825 // Just to be safe, recalculate the offset in debug mode
 826 static jlong _calculated_offset = 0;
 827 static int   _has_calculated_offset = 0;
 828 
 829 jlong offset() {
 830   if (_has_calculated_offset) return _calculated_offset;
 831   SYSTEMTIME java_origin;
 832   java_origin.wYear          = 1970;
 833   java_origin.wMonth         = 1;
 834   java_origin.wDayOfWeek     = 0; // ignored
 835   java_origin.wDay           = 1;
 836   java_origin.wHour          = 0;
 837   java_origin.wMinute        = 0;
 838   java_origin.wSecond        = 0;
 839   java_origin.wMilliseconds  = 0;
 840   FILETIME jot;
 841   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 842     fatal("Error = %d\nWindows error", GetLastError());
 843   }
 844   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 845   _has_calculated_offset = 1;
 846   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 847   return _calculated_offset;
 848 }
 849 #else
 850 jlong offset() {
 851   return _offset;
 852 }
 853 #endif
 854 
 855 jlong windows_to_java_time(FILETIME wt) {
 856   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 857   return (a - offset()) / 10000;
 858 }
 859 
 860 // Returns time ticks in (10th of micro seconds)
 861 jlong windows_to_time_ticks(FILETIME wt) {
 862   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 863   return (a - offset());
 864 }
 865 
 866 FILETIME java_to_windows_time(jlong l) {
 867   jlong a = (l * 10000) + offset();
 868   FILETIME result;
 869   result.dwHighDateTime = high(a);
 870   result.dwLowDateTime  = low(a);
 871   return result;
 872 }
 873 
 874 bool os::supports_vtime() { return true; }
 875 bool os::enable_vtime() { return false; }
 876 bool os::vtime_enabled() { return false; }
 877 
 878 double os::elapsedVTime() {
 879   FILETIME created;
 880   FILETIME exited;
 881   FILETIME kernel;
 882   FILETIME user;
 883   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 884     // the resolution of windows_to_java_time() should be sufficient (ms)
 885     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 886   } else {
 887     return elapsedTime();
 888   }
 889 }
 890 
 891 jlong os::javaTimeMillis() {
 892   if (UseFakeTimers) {
 893     return fake_time++;
 894   } else {
 895     FILETIME wt;
 896     GetSystemTimeAsFileTime(&wt);
 897     return windows_to_java_time(wt);
 898   }
 899 }
 900 
 901 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 902   FILETIME wt;
 903   GetSystemTimeAsFileTime(&wt);
 904   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 905   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 906   seconds = secs;
 907   nanos = jlong(ticks - (secs*10000000)) * 100;
 908 }
 909 
 910 jlong os::javaTimeNanos() {
 911     LARGE_INTEGER current_count;
 912     QueryPerformanceCounter(&current_count);
 913     double current = as_long(current_count);
 914     double freq = performance_frequency;
 915     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 916     return time;
 917 }
 918 
 919 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 920   jlong freq = performance_frequency;
 921   if (freq < NANOSECS_PER_SEC) {
 922     // the performance counter is 64 bits and we will
 923     // be multiplying it -- so no wrap in 64 bits
 924     info_ptr->max_value = ALL_64_BITS;
 925   } else if (freq > NANOSECS_PER_SEC) {
 926     // use the max value the counter can reach to
 927     // determine the max value which could be returned
 928     julong max_counter = (julong)ALL_64_BITS;
 929     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 930   } else {
 931     // the performance counter is 64 bits and we will
 932     // be using it directly -- so no wrap in 64 bits
 933     info_ptr->max_value = ALL_64_BITS;
 934   }
 935 
 936   // using a counter, so no skipping
 937   info_ptr->may_skip_backward = false;
 938   info_ptr->may_skip_forward = false;
 939 
 940   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 941 }
 942 
 943 char* os::local_time_string(char *buf, size_t buflen) {
 944   SYSTEMTIME st;
 945   GetLocalTime(&st);
 946   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 947                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 948   return buf;
 949 }
 950 
 951 bool os::getTimesSecs(double* process_real_time,
 952                       double* process_user_time,
 953                       double* process_system_time) {
 954   HANDLE h_process = GetCurrentProcess();
 955   FILETIME create_time, exit_time, kernel_time, user_time;
 956   BOOL result = GetProcessTimes(h_process,
 957                                 &create_time,
 958                                 &exit_time,
 959                                 &kernel_time,
 960                                 &user_time);
 961   if (result != 0) {
 962     FILETIME wt;
 963     GetSystemTimeAsFileTime(&wt);
 964     jlong rtc_millis = windows_to_java_time(wt);
 965     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 966     *process_user_time =
 967       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 968     *process_system_time =
 969       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
 970     return true;
 971   } else {
 972     return false;
 973   }
 974 }
 975 
 976 void os::shutdown() {
 977   // allow PerfMemory to attempt cleanup of any persistent resources
 978   perfMemory_exit();
 979 
 980   // flush buffered output, finish log files
 981   ostream_abort();
 982 
 983   // Check for abort hook
 984   abort_hook_t abort_hook = Arguments::abort_hook();
 985   if (abort_hook != NULL) {
 986     abort_hook();
 987   }
 988 }
 989 
 990 
 991 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
 992                                          PMINIDUMP_EXCEPTION_INFORMATION,
 993                                          PMINIDUMP_USER_STREAM_INFORMATION,
 994                                          PMINIDUMP_CALLBACK_INFORMATION);
 995 
 996 static HANDLE dumpFile = NULL;
 997 
 998 // Check if dump file can be created.
 999 void os::check_dump_limit(char* buffer, size_t buffsz) {
1000   bool status = true;
1001   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1002     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1003     status = false;
1004   }
1005 
1006 #ifndef ASSERT
1007   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1008     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1009     status = false;
1010   }
1011 #endif
1012 
1013   if (status) {
1014     const char* cwd = get_current_directory(NULL, 0);
1015     int pid = current_process_id();
1016     if (cwd != NULL) {
1017       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1018     } else {
1019       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1020     }
1021 
1022     if (dumpFile == NULL &&
1023        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1024                  == INVALID_HANDLE_VALUE) {
1025       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1026       status = false;
1027     }
1028   }
1029   VMError::record_coredump_status(buffer, status);
1030 }
1031 
1032 void os::abort(bool dump_core, void* siginfo, const void* context) {
1033   HINSTANCE dbghelp;
1034   EXCEPTION_POINTERS ep;
1035   MINIDUMP_EXCEPTION_INFORMATION mei;
1036   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1037 
1038   HANDLE hProcess = GetCurrentProcess();
1039   DWORD processId = GetCurrentProcessId();
1040   MINIDUMP_TYPE dumpType;
1041 
1042   shutdown();
1043   if (!dump_core || dumpFile == NULL) {
1044     if (dumpFile != NULL) {
1045       CloseHandle(dumpFile);
1046     }
1047     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1048   }
1049 
1050   dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
1051 
1052   if (dbghelp == NULL) {
1053     jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
1054     CloseHandle(dumpFile);
1055     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1056   }
1057 
1058   _MiniDumpWriteDump =
1059       CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1060                                     PMINIDUMP_EXCEPTION_INFORMATION,
1061                                     PMINIDUMP_USER_STREAM_INFORMATION,
1062                                     PMINIDUMP_CALLBACK_INFORMATION),
1063                                     GetProcAddress(dbghelp,
1064                                     "MiniDumpWriteDump"));
1065 
1066   if (_MiniDumpWriteDump == NULL) {
1067     jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
1068     CloseHandle(dumpFile);
1069     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1070   }
1071 
1072   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1073     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1074 
1075   if (siginfo != NULL && context != NULL) {
1076     ep.ContextRecord = (PCONTEXT) context;
1077     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1078 
1079     mei.ThreadId = GetCurrentThreadId();
1080     mei.ExceptionPointers = &ep;
1081     pmei = &mei;
1082   } else {
1083     pmei = NULL;
1084   }
1085 
1086   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1087   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1088   if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1089       _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1090     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1091   }
1092   CloseHandle(dumpFile);
1093   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1094 }
1095 
1096 // Die immediately, no exit hook, no abort hook, no cleanup.
1097 void os::die() {
1098   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1099 }
1100 
1101 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1102 //  * dirent_md.c       1.15 00/02/02
1103 //
1104 // The declarations for DIR and struct dirent are in jvm_win32.h.
1105 
1106 // Caller must have already run dirname through JVM_NativePath, which removes
1107 // duplicate slashes and converts all instances of '/' into '\\'.
1108 
1109 DIR * os::opendir(const char *dirname) {
1110   assert(dirname != NULL, "just checking");   // hotspot change
1111   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1112   DWORD fattr;                                // hotspot change
1113   char alt_dirname[4] = { 0, 0, 0, 0 };
1114 
1115   if (dirp == 0) {
1116     errno = ENOMEM;
1117     return 0;
1118   }
1119 
1120   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1121   // as a directory in FindFirstFile().  We detect this case here and
1122   // prepend the current drive name.
1123   //
1124   if (dirname[1] == '\0' && dirname[0] == '\\') {
1125     alt_dirname[0] = _getdrive() + 'A' - 1;
1126     alt_dirname[1] = ':';
1127     alt_dirname[2] = '\\';
1128     alt_dirname[3] = '\0';
1129     dirname = alt_dirname;
1130   }
1131 
1132   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1133   if (dirp->path == 0) {
1134     free(dirp);
1135     errno = ENOMEM;
1136     return 0;
1137   }
1138   strcpy(dirp->path, dirname);
1139 
1140   fattr = GetFileAttributes(dirp->path);
1141   if (fattr == 0xffffffff) {
1142     free(dirp->path);
1143     free(dirp);
1144     errno = ENOENT;
1145     return 0;
1146   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1147     free(dirp->path);
1148     free(dirp);
1149     errno = ENOTDIR;
1150     return 0;
1151   }
1152 
1153   // Append "*.*", or possibly "\\*.*", to path
1154   if (dirp->path[1] == ':' &&
1155       (dirp->path[2] == '\0' ||
1156       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1157     // No '\\' needed for cases like "Z:" or "Z:\"
1158     strcat(dirp->path, "*.*");
1159   } else {
1160     strcat(dirp->path, "\\*.*");
1161   }
1162 
1163   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1164   if (dirp->handle == INVALID_HANDLE_VALUE) {
1165     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1166       free(dirp->path);
1167       free(dirp);
1168       errno = EACCES;
1169       return 0;
1170     }
1171   }
1172   return dirp;
1173 }
1174 
1175 // parameter dbuf unused on Windows
1176 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) {
1177   assert(dirp != NULL, "just checking");      // hotspot change
1178   if (dirp->handle == INVALID_HANDLE_VALUE) {
1179     return 0;
1180   }
1181 
1182   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1183 
1184   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1185     if (GetLastError() == ERROR_INVALID_HANDLE) {
1186       errno = EBADF;
1187       return 0;
1188     }
1189     FindClose(dirp->handle);
1190     dirp->handle = INVALID_HANDLE_VALUE;
1191   }
1192 
1193   return &dirp->dirent;
1194 }
1195 
1196 int os::closedir(DIR *dirp) {
1197   assert(dirp != NULL, "just checking");      // hotspot change
1198   if (dirp->handle != INVALID_HANDLE_VALUE) {
1199     if (!FindClose(dirp->handle)) {
1200       errno = EBADF;
1201       return -1;
1202     }
1203     dirp->handle = INVALID_HANDLE_VALUE;
1204   }
1205   free(dirp->path);
1206   free(dirp);
1207   return 0;
1208 }
1209 
1210 // This must be hard coded because it's the system's temporary
1211 // directory not the java application's temp directory, ala java.io.tmpdir.
1212 const char* os::get_temp_directory() {
1213   static char path_buf[MAX_PATH];
1214   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1215     return path_buf;
1216   } else {
1217     path_buf[0] = '\0';
1218     return path_buf;
1219   }
1220 }
1221 
1222 static bool file_exists(const char* filename) {
1223   if (filename == NULL || strlen(filename) == 0) {
1224     return false;
1225   }
1226   return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1227 }
1228 
1229 bool os::dll_build_name(char *buffer, size_t buflen,
1230                         const char* pname, const char* fname) {
1231   bool retval = false;
1232   const size_t pnamelen = pname ? strlen(pname) : 0;
1233   const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1234 
1235   // Return error on buffer overflow.
1236   if (pnamelen + strlen(fname) + 10 > buflen) {
1237     return retval;
1238   }
1239 
1240   if (pnamelen == 0) {
1241     jio_snprintf(buffer, buflen, "%s.dll", fname);
1242     retval = true;
1243   } else if (c == ':' || c == '\\') {
1244     jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1245     retval = true;
1246   } else if (strchr(pname, *os::path_separator()) != NULL) {
1247     int n;
1248     char** pelements = split_path(pname, &n);
1249     if (pelements == NULL) {
1250       return false;
1251     }
1252     for (int i = 0; i < n; i++) {
1253       char* path = pelements[i];
1254       // Really shouldn't be NULL, but check can't hurt
1255       size_t plen = (path == NULL) ? 0 : strlen(path);
1256       if (plen == 0) {
1257         continue; // skip the empty path values
1258       }
1259       const char lastchar = path[plen - 1];
1260       if (lastchar == ':' || lastchar == '\\') {
1261         jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1262       } else {
1263         jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1264       }
1265       if (file_exists(buffer)) {
1266         retval = true;
1267         break;
1268       }
1269     }
1270     // release the storage
1271     for (int i = 0; i < n; i++) {
1272       if (pelements[i] != NULL) {
1273         FREE_C_HEAP_ARRAY(char, pelements[i]);
1274       }
1275     }
1276     if (pelements != NULL) {
1277       FREE_C_HEAP_ARRAY(char*, pelements);
1278     }
1279   } else {
1280     jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1281     retval = true;
1282   }
1283   return retval;
1284 }
1285 
1286 // Needs to be in os specific directory because windows requires another
1287 // header file <direct.h>
1288 const char* os::get_current_directory(char *buf, size_t buflen) {
1289   int n = static_cast<int>(buflen);
1290   if (buflen > INT_MAX)  n = INT_MAX;
1291   return _getcwd(buf, n);
1292 }
1293 
1294 //-----------------------------------------------------------
1295 // Helper functions for fatal error handler
1296 #ifdef _WIN64
1297 // Helper routine which returns true if address in
1298 // within the NTDLL address space.
1299 //
1300 static bool _addr_in_ntdll(address addr) {
1301   HMODULE hmod;
1302   MODULEINFO minfo;
1303 
1304   hmod = GetModuleHandle("NTDLL.DLL");
1305   if (hmod == NULL) return false;
1306   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1307                                           &minfo, sizeof(MODULEINFO))) {
1308     return false;
1309   }
1310 
1311   if ((addr >= minfo.lpBaseOfDll) &&
1312       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1313     return true;
1314   } else {
1315     return false;
1316   }
1317 }
1318 #endif
1319 
1320 struct _modinfo {
1321   address addr;
1322   char*   full_path;   // point to a char buffer
1323   int     buflen;      // size of the buffer
1324   address base_addr;
1325 };
1326 
1327 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1328                                   address top_address, void * param) {
1329   struct _modinfo *pmod = (struct _modinfo *)param;
1330   if (!pmod) return -1;
1331 
1332   if (base_addr   <= pmod->addr &&
1333       top_address > pmod->addr) {
1334     // if a buffer is provided, copy path name to the buffer
1335     if (pmod->full_path) {
1336       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1337     }
1338     pmod->base_addr = base_addr;
1339     return 1;
1340   }
1341   return 0;
1342 }
1343 
1344 bool os::dll_address_to_library_name(address addr, char* buf,
1345                                      int buflen, int* offset) {
1346   // buf is not optional, but offset is optional
1347   assert(buf != NULL, "sanity check");
1348 
1349 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1350 //       return the full path to the DLL file, sometimes it returns path
1351 //       to the corresponding PDB file (debug info); sometimes it only
1352 //       returns partial path, which makes life painful.
1353 
1354   struct _modinfo mi;
1355   mi.addr      = addr;
1356   mi.full_path = buf;
1357   mi.buflen    = buflen;
1358   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1359     // buf already contains path name
1360     if (offset) *offset = addr - mi.base_addr;
1361     return true;
1362   }
1363 
1364   buf[0] = '\0';
1365   if (offset) *offset = -1;
1366   return false;
1367 }
1368 
1369 bool os::dll_address_to_function_name(address addr, char *buf,
1370                                       int buflen, int *offset,
1371                                       bool demangle) {
1372   // buf is not optional, but offset is optional
1373   assert(buf != NULL, "sanity check");
1374 
1375   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1376     return true;
1377   }
1378   if (offset != NULL)  *offset  = -1;
1379   buf[0] = '\0';
1380   return false;
1381 }
1382 
1383 // save the start and end address of jvm.dll into param[0] and param[1]
1384 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1385                            address top_address, void * param) {
1386   if (!param) return -1;
1387 
1388   if (base_addr   <= (address)_locate_jvm_dll &&
1389       top_address > (address)_locate_jvm_dll) {
1390     ((address*)param)[0] = base_addr;
1391     ((address*)param)[1] = top_address;
1392     return 1;
1393   }
1394   return 0;
1395 }
1396 
1397 address vm_lib_location[2];    // start and end address of jvm.dll
1398 
1399 // check if addr is inside jvm.dll
1400 bool os::address_is_in_vm(address addr) {
1401   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1402     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1403       assert(false, "Can't find jvm module.");
1404       return false;
1405     }
1406   }
1407 
1408   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1409 }
1410 
1411 // print module info; param is outputStream*
1412 static int _print_module(const char* fname, address base_address,
1413                          address top_address, void* param) {
1414   if (!param) return -1;
1415 
1416   outputStream* st = (outputStream*)param;
1417 
1418   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1419   return 0;
1420 }
1421 
1422 // Loads .dll/.so and
1423 // in case of error it checks if .dll/.so was built for the
1424 // same architecture as Hotspot is running on
1425 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1426   void * result = LoadLibrary(name);
1427   if (result != NULL) {
1428     return result;
1429   }
1430 
1431   DWORD errcode = GetLastError();
1432   if (errcode == ERROR_MOD_NOT_FOUND) {
1433     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1434     ebuf[ebuflen - 1] = '\0';
1435     return NULL;
1436   }
1437 
1438   // Parsing dll below
1439   // If we can read dll-info and find that dll was built
1440   // for an architecture other than Hotspot is running in
1441   // - then print to buffer "DLL was built for a different architecture"
1442   // else call os::lasterror to obtain system error message
1443 
1444   // Read system error message into ebuf
1445   // It may or may not be overwritten below (in the for loop and just above)
1446   lasterror(ebuf, (size_t) ebuflen);
1447   ebuf[ebuflen - 1] = '\0';
1448   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1449   if (fd < 0) {
1450     return NULL;
1451   }
1452 
1453   uint32_t signature_offset;
1454   uint16_t lib_arch = 0;
1455   bool failed_to_get_lib_arch =
1456     ( // Go to position 3c in the dll
1457      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1458      ||
1459      // Read location of signature
1460      (sizeof(signature_offset) !=
1461      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1462      ||
1463      // Go to COFF File Header in dll
1464      // that is located after "signature" (4 bytes long)
1465      (os::seek_to_file_offset(fd,
1466      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1467      ||
1468      // Read field that contains code of architecture
1469      // that dll was built for
1470      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1471     );
1472 
1473   ::close(fd);
1474   if (failed_to_get_lib_arch) {
1475     // file i/o error - report os::lasterror(...) msg
1476     return NULL;
1477   }
1478 
1479   typedef struct {
1480     uint16_t arch_code;
1481     char* arch_name;
1482   } arch_t;
1483 
1484   static const arch_t arch_array[] = {
1485     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1486     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1487     {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
1488   };
1489 #if   (defined _M_IA64)
1490   static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64;
1491 #elif (defined _M_AMD64)
1492   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1493 #elif (defined _M_IX86)
1494   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1495 #else
1496   #error Method os::dll_load requires that one of following \
1497          is defined :_M_IA64,_M_AMD64 or _M_IX86
1498 #endif
1499 
1500 
1501   // Obtain a string for printf operation
1502   // lib_arch_str shall contain string what platform this .dll was built for
1503   // running_arch_str shall string contain what platform Hotspot was built for
1504   char *running_arch_str = NULL, *lib_arch_str = NULL;
1505   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1506     if (lib_arch == arch_array[i].arch_code) {
1507       lib_arch_str = arch_array[i].arch_name;
1508     }
1509     if (running_arch == arch_array[i].arch_code) {
1510       running_arch_str = arch_array[i].arch_name;
1511     }
1512   }
1513 
1514   assert(running_arch_str,
1515          "Didn't find running architecture code in arch_array");
1516 
1517   // If the architecture is right
1518   // but some other error took place - report os::lasterror(...) msg
1519   if (lib_arch == running_arch) {
1520     return NULL;
1521   }
1522 
1523   if (lib_arch_str != NULL) {
1524     ::_snprintf(ebuf, ebuflen - 1,
1525                 "Can't load %s-bit .dll on a %s-bit platform",
1526                 lib_arch_str, running_arch_str);
1527   } else {
1528     // don't know what architecture this dll was build for
1529     ::_snprintf(ebuf, ebuflen - 1,
1530                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1531                 lib_arch, running_arch_str);
1532   }
1533 
1534   return NULL;
1535 }
1536 
1537 void os::print_dll_info(outputStream *st) {
1538   st->print_cr("Dynamic libraries:");
1539   get_loaded_modules_info(_print_module, (void *)st);
1540 }
1541 
1542 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1543   HANDLE   hProcess;
1544 
1545 # define MAX_NUM_MODULES 128
1546   HMODULE     modules[MAX_NUM_MODULES];
1547   static char filename[MAX_PATH];
1548   int         result = 0;
1549 
1550   int pid = os::current_process_id();
1551   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1552                          FALSE, pid);
1553   if (hProcess == NULL) return 0;
1554 
1555   DWORD size_needed;
1556   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1557     CloseHandle(hProcess);
1558     return 0;
1559   }
1560 
1561   // number of modules that are currently loaded
1562   int num_modules = size_needed / sizeof(HMODULE);
1563 
1564   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1565     // Get Full pathname:
1566     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1567       filename[0] = '\0';
1568     }
1569 
1570     MODULEINFO modinfo;
1571     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1572       modinfo.lpBaseOfDll = NULL;
1573       modinfo.SizeOfImage = 0;
1574     }
1575 
1576     // Invoke callback function
1577     result = callback(filename, (address)modinfo.lpBaseOfDll,
1578                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1579     if (result) break;
1580   }
1581 
1582   CloseHandle(hProcess);
1583   return result;
1584 }
1585 
1586 bool os::get_host_name(char* buf, size_t buflen) {
1587   DWORD size = (DWORD)buflen;
1588   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1589 }
1590 
1591 void os::get_summary_os_info(char* buf, size_t buflen) {
1592   stringStream sst(buf, buflen);
1593   os::win32::print_windows_version(&sst);
1594   // chop off newline character
1595   char* nl = strchr(buf, '\n');
1596   if (nl != NULL) *nl = '\0';
1597 }
1598 
1599 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1600   int ret = vsnprintf(buf, len, fmt, args);
1601   // Get the correct buffer size if buf is too small
1602   if (ret < 0) {
1603     return _vscprintf(fmt, args);
1604   }
1605   return ret;
1606 }
1607 
1608 static inline time_t get_mtime(const char* filename) {
1609   struct stat st;
1610   int ret = os::stat(filename, &st);
1611   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
1612   return st.st_mtime;
1613 }
1614 
1615 int os::compare_file_modified_times(const char* file1, const char* file2) {
1616   time_t t1 = get_mtime(file1);
1617   time_t t2 = get_mtime(file2);
1618   return t1 - t2;
1619 }
1620 
1621 void os::print_os_info_brief(outputStream* st) {
1622   os::print_os_info(st);
1623 }
1624 
1625 void os::print_os_info(outputStream* st) {
1626 #ifdef ASSERT
1627   char buffer[1024];
1628   st->print("HostName: ");
1629   if (get_host_name(buffer, sizeof(buffer))) {
1630     st->print("%s ", buffer);
1631   } else {
1632     st->print("N/A ");
1633   }
1634 #endif
1635   st->print("OS:");
1636   os::win32::print_windows_version(st);
1637 }
1638 
1639 void os::win32::print_windows_version(outputStream* st) {
1640   OSVERSIONINFOEX osvi;
1641   VS_FIXEDFILEINFO *file_info;
1642   TCHAR kernel32_path[MAX_PATH];
1643   UINT len, ret;
1644 
1645   // Use the GetVersionEx information to see if we're on a server or
1646   // workstation edition of Windows. Starting with Windows 8.1 we can't
1647   // trust the OS version information returned by this API.
1648   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1649   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1650   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1651     st->print_cr("Call to GetVersionEx failed");
1652     return;
1653   }
1654   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1655 
1656   // Get the full path to \Windows\System32\kernel32.dll and use that for
1657   // determining what version of Windows we're running on.
1658   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1659   ret = GetSystemDirectory(kernel32_path, len);
1660   if (ret == 0 || ret > len) {
1661     st->print_cr("Call to GetSystemDirectory failed");
1662     return;
1663   }
1664   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1665 
1666   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1667   if (version_size == 0) {
1668     st->print_cr("Call to GetFileVersionInfoSize failed");
1669     return;
1670   }
1671 
1672   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1673   if (version_info == NULL) {
1674     st->print_cr("Failed to allocate version_info");
1675     return;
1676   }
1677 
1678   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1679     os::free(version_info);
1680     st->print_cr("Call to GetFileVersionInfo failed");
1681     return;
1682   }
1683 
1684   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1685     os::free(version_info);
1686     st->print_cr("Call to VerQueryValue failed");
1687     return;
1688   }
1689 
1690   int major_version = HIWORD(file_info->dwProductVersionMS);
1691   int minor_version = LOWORD(file_info->dwProductVersionMS);
1692   int build_number = HIWORD(file_info->dwProductVersionLS);
1693   int build_minor = LOWORD(file_info->dwProductVersionLS);
1694   int os_vers = major_version * 1000 + minor_version;
1695   os::free(version_info);
1696 
1697   st->print(" Windows ");
1698   switch (os_vers) {
1699 
1700   case 6000:
1701     if (is_workstation) {
1702       st->print("Vista");
1703     } else {
1704       st->print("Server 2008");
1705     }
1706     break;
1707 
1708   case 6001:
1709     if (is_workstation) {
1710       st->print("7");
1711     } else {
1712       st->print("Server 2008 R2");
1713     }
1714     break;
1715 
1716   case 6002:
1717     if (is_workstation) {
1718       st->print("8");
1719     } else {
1720       st->print("Server 2012");
1721     }
1722     break;
1723 
1724   case 6003:
1725     if (is_workstation) {
1726       st->print("8.1");
1727     } else {
1728       st->print("Server 2012 R2");
1729     }
1730     break;
1731 
1732   case 10000:
1733     if (is_workstation) {
1734       st->print("10");
1735     } else {
1736       st->print("Server 2016");
1737     }
1738     break;
1739 
1740   default:
1741     // Unrecognized windows, print out its major and minor versions
1742     st->print("%d.%d", major_version, minor_version);
1743     break;
1744   }
1745 
1746   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1747   // find out whether we are running on 64 bit processor or not
1748   SYSTEM_INFO si;
1749   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1750   GetNativeSystemInfo(&si);
1751   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1752     st->print(" , 64 bit");
1753   }
1754 
1755   st->print(" Build %d", build_number);
1756   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1757   st->cr();
1758 }
1759 
1760 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1761   // Nothing to do for now.
1762 }
1763 
1764 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1765   HKEY key;
1766   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1767                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1768   if (status == ERROR_SUCCESS) {
1769     DWORD size = (DWORD)buflen;
1770     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1771     if (status != ERROR_SUCCESS) {
1772         strncpy(buf, "## __CPU__", buflen);
1773     }
1774     RegCloseKey(key);
1775   } else {
1776     // Put generic cpu info to return
1777     strncpy(buf, "## __CPU__", buflen);
1778   }
1779 }
1780 
1781 void os::print_memory_info(outputStream* st) {
1782   st->print("Memory:");
1783   st->print(" %dk page", os::vm_page_size()>>10);
1784 
1785   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1786   // value if total memory is larger than 4GB
1787   MEMORYSTATUSEX ms;
1788   ms.dwLength = sizeof(ms);
1789   GlobalMemoryStatusEx(&ms);
1790 
1791   st->print(", physical %uk", os::physical_memory() >> 10);
1792   st->print("(%uk free)", os::available_memory() >> 10);
1793 
1794   st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1795   st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1796   st->cr();
1797 }
1798 
1799 void os::print_siginfo(outputStream *st, const void* siginfo) {
1800   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1801   st->print("siginfo:");
1802 
1803   char tmp[64];
1804   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1805     strcpy(tmp, "EXCEPTION_??");
1806   }
1807   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1808 
1809   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1810        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1811        er->NumberParameters >= 2) {
1812     switch (er->ExceptionInformation[0]) {
1813     case 0: st->print(", reading address"); break;
1814     case 1: st->print(", writing address"); break;
1815     case 8: st->print(", data execution prevention violation at address"); break;
1816     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1817                        er->ExceptionInformation[0]);
1818     }
1819     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1820   } else {
1821     int num = er->NumberParameters;
1822     if (num > 0) {
1823       st->print(", ExceptionInformation=");
1824       for (int i = 0; i < num; i++) {
1825         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1826       }
1827     }
1828   }
1829   st->cr();
1830 }
1831 
1832 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1833   // do nothing
1834 }
1835 
1836 static char saved_jvm_path[MAX_PATH] = {0};
1837 
1838 // Find the full path to the current module, jvm.dll
1839 void os::jvm_path(char *buf, jint buflen) {
1840   // Error checking.
1841   if (buflen < MAX_PATH) {
1842     assert(false, "must use a large-enough buffer");
1843     buf[0] = '\0';
1844     return;
1845   }
1846   // Lazy resolve the path to current module.
1847   if (saved_jvm_path[0] != 0) {
1848     strcpy(buf, saved_jvm_path);
1849     return;
1850   }
1851 
1852   buf[0] = '\0';
1853   if (Arguments::sun_java_launcher_is_altjvm()) {
1854     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1855     // for a JAVA_HOME environment variable and fix up the path so it
1856     // looks like jvm.dll is installed there (append a fake suffix
1857     // hotspot/jvm.dll).
1858     char* java_home_var = ::getenv("JAVA_HOME");
1859     if (java_home_var != NULL && java_home_var[0] != 0 &&
1860         strlen(java_home_var) < (size_t)buflen) {
1861       strncpy(buf, java_home_var, buflen);
1862 
1863       // determine if this is a legacy image or modules image
1864       // modules image doesn't have "jre" subdirectory
1865       size_t len = strlen(buf);
1866       char* jrebin_p = buf + len;
1867       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1868       if (0 != _access(buf, 0)) {
1869         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1870       }
1871       len = strlen(buf);
1872       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1873     }
1874   }
1875 
1876   if (buf[0] == '\0') {
1877     GetModuleFileName(vm_lib_handle, buf, buflen);
1878   }
1879   strncpy(saved_jvm_path, buf, MAX_PATH);
1880   saved_jvm_path[MAX_PATH - 1] = '\0';
1881 }
1882 
1883 
1884 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1885 #ifndef _WIN64
1886   st->print("_");
1887 #endif
1888 }
1889 
1890 
1891 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1892 #ifndef _WIN64
1893   st->print("@%d", args_size  * sizeof(int));
1894 #endif
1895 }
1896 
1897 // This method is a copy of JDK's sysGetLastErrorString
1898 // from src/windows/hpi/src/system_md.c
1899 
1900 size_t os::lasterror(char* buf, size_t len) {
1901   DWORD errval;
1902 
1903   if ((errval = GetLastError()) != 0) {
1904     // DOS error
1905     size_t n = (size_t)FormatMessage(
1906                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1907                                      NULL,
1908                                      errval,
1909                                      0,
1910                                      buf,
1911                                      (DWORD)len,
1912                                      NULL);
1913     if (n > 3) {
1914       // Drop final '.', CR, LF
1915       if (buf[n - 1] == '\n') n--;
1916       if (buf[n - 1] == '\r') n--;
1917       if (buf[n - 1] == '.') n--;
1918       buf[n] = '\0';
1919     }
1920     return n;
1921   }
1922 
1923   if (errno != 0) {
1924     // C runtime error that has no corresponding DOS error code
1925     const char* s = os::strerror(errno);
1926     size_t n = strlen(s);
1927     if (n >= len) n = len - 1;
1928     strncpy(buf, s, n);
1929     buf[n] = '\0';
1930     return n;
1931   }
1932 
1933   return 0;
1934 }
1935 
1936 int os::get_last_error() {
1937   DWORD error = GetLastError();
1938   if (error == 0) {
1939     error = errno;
1940   }
1941   return (int)error;
1942 }
1943 
1944 WindowsSemaphore::WindowsSemaphore(uint value) {
1945   _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
1946 
1947   guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError());
1948 }
1949 
1950 WindowsSemaphore::~WindowsSemaphore() {
1951   ::CloseHandle(_semaphore);
1952 }
1953 
1954 void WindowsSemaphore::signal(uint count) {
1955   if (count > 0) {
1956     BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
1957 
1958     assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError());
1959   }
1960 }
1961 
1962 void WindowsSemaphore::wait() {
1963   DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
1964   assert(ret != WAIT_FAILED,   "WaitForSingleObject failed with error code: %lu", GetLastError());
1965   assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret);
1966 }
1967 
1968 // sun.misc.Signal
1969 // NOTE that this is a workaround for an apparent kernel bug where if
1970 // a signal handler for SIGBREAK is installed then that signal handler
1971 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1972 // See bug 4416763.
1973 static void (*sigbreakHandler)(int) = NULL;
1974 
1975 static void UserHandler(int sig, void *siginfo, void *context) {
1976   os::signal_notify(sig);
1977   // We need to reinstate the signal handler each time...
1978   os::signal(sig, (void*)UserHandler);
1979 }
1980 
1981 void* os::user_handler() {
1982   return (void*) UserHandler;
1983 }
1984 
1985 void* os::signal(int signal_number, void* handler) {
1986   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1987     void (*oldHandler)(int) = sigbreakHandler;
1988     sigbreakHandler = (void (*)(int)) handler;
1989     return (void*) oldHandler;
1990   } else {
1991     return (void*)::signal(signal_number, (void (*)(int))handler);
1992   }
1993 }
1994 
1995 void os::signal_raise(int signal_number) {
1996   raise(signal_number);
1997 }
1998 
1999 // The Win32 C runtime library maps all console control events other than ^C
2000 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2001 // logoff, and shutdown events.  We therefore install our own console handler
2002 // that raises SIGTERM for the latter cases.
2003 //
2004 static BOOL WINAPI consoleHandler(DWORD event) {
2005   switch (event) {
2006   case CTRL_C_EVENT:
2007     if (is_error_reported()) {
2008       // Ctrl-C is pressed during error reporting, likely because the error
2009       // handler fails to abort. Let VM die immediately.
2010       os::die();
2011     }
2012 
2013     os::signal_raise(SIGINT);
2014     return TRUE;
2015     break;
2016   case CTRL_BREAK_EVENT:
2017     if (sigbreakHandler != NULL) {
2018       (*sigbreakHandler)(SIGBREAK);
2019     }
2020     return TRUE;
2021     break;
2022   case CTRL_LOGOFF_EVENT: {
2023     // Don't terminate JVM if it is running in a non-interactive session,
2024     // such as a service process.
2025     USEROBJECTFLAGS flags;
2026     HANDLE handle = GetProcessWindowStation();
2027     if (handle != NULL &&
2028         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2029         sizeof(USEROBJECTFLAGS), NULL)) {
2030       // If it is a non-interactive session, let next handler to deal
2031       // with it.
2032       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2033         return FALSE;
2034       }
2035     }
2036   }
2037   case CTRL_CLOSE_EVENT:
2038   case CTRL_SHUTDOWN_EVENT:
2039     os::signal_raise(SIGTERM);
2040     return TRUE;
2041     break;
2042   default:
2043     break;
2044   }
2045   return FALSE;
2046 }
2047 
2048 // The following code is moved from os.cpp for making this
2049 // code platform specific, which it is by its very nature.
2050 
2051 // Return maximum OS signal used + 1 for internal use only
2052 // Used as exit signal for signal_thread
2053 int os::sigexitnum_pd() {
2054   return NSIG;
2055 }
2056 
2057 // a counter for each possible signal value, including signal_thread exit signal
2058 static volatile jint pending_signals[NSIG+1] = { 0 };
2059 static HANDLE sig_sem = NULL;
2060 
2061 void os::signal_init_pd() {
2062   // Initialize signal structures
2063   memset((void*)pending_signals, 0, sizeof(pending_signals));
2064 
2065   sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2066 
2067   // Programs embedding the VM do not want it to attempt to receive
2068   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2069   // shutdown hooks mechanism introduced in 1.3.  For example, when
2070   // the VM is run as part of a Windows NT service (i.e., a servlet
2071   // engine in a web server), the correct behavior is for any console
2072   // control handler to return FALSE, not TRUE, because the OS's
2073   // "final" handler for such events allows the process to continue if
2074   // it is a service (while terminating it if it is not a service).
2075   // To make this behavior uniform and the mechanism simpler, we
2076   // completely disable the VM's usage of these console events if -Xrs
2077   // (=ReduceSignalUsage) is specified.  This means, for example, that
2078   // the CTRL-BREAK thread dump mechanism is also disabled in this
2079   // case.  See bugs 4323062, 4345157, and related bugs.
2080 
2081   if (!ReduceSignalUsage) {
2082     // Add a CTRL-C handler
2083     SetConsoleCtrlHandler(consoleHandler, TRUE);
2084   }
2085 }
2086 
2087 void os::signal_notify(int signal_number) {
2088   BOOL ret;
2089   if (sig_sem != NULL) {
2090     Atomic::inc(&pending_signals[signal_number]);
2091     ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2092     assert(ret != 0, "ReleaseSemaphore() failed");
2093   }
2094 }
2095 
2096 static int check_pending_signals(bool wait_for_signal) {
2097   DWORD ret;
2098   while (true) {
2099     for (int i = 0; i < NSIG + 1; i++) {
2100       jint n = pending_signals[i];
2101       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2102         return i;
2103       }
2104     }
2105     if (!wait_for_signal) {
2106       return -1;
2107     }
2108 
2109     JavaThread *thread = JavaThread::current();
2110 
2111     ThreadBlockInVM tbivm(thread);
2112 
2113     bool threadIsSuspended;
2114     do {
2115       thread->set_suspend_equivalent();
2116       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2117       ret = ::WaitForSingleObject(sig_sem, INFINITE);
2118       assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2119 
2120       // were we externally suspended while we were waiting?
2121       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2122       if (threadIsSuspended) {
2123         // The semaphore has been incremented, but while we were waiting
2124         // another thread suspended us. We don't want to continue running
2125         // while suspended because that would surprise the thread that
2126         // suspended us.
2127         ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2128         assert(ret != 0, "ReleaseSemaphore() failed");
2129 
2130         thread->java_suspend_self();
2131       }
2132     } while (threadIsSuspended);
2133   }
2134 }
2135 
2136 int os::signal_lookup() {
2137   return check_pending_signals(false);
2138 }
2139 
2140 int os::signal_wait() {
2141   return check_pending_signals(true);
2142 }
2143 
2144 // Implicit OS exception handling
2145 
2146 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2147                       address handler) {
2148     JavaThread* thread = (JavaThread*) Thread::current_or_null();
2149   // Save pc in thread
2150 #ifdef _M_IA64
2151   // Do not blow up if no thread info available.
2152   if (thread) {
2153     // Saving PRECISE pc (with slot information) in thread.
2154     uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2155     // Convert precise PC into "Unix" format
2156     precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2157     thread->set_saved_exception_pc((address)precise_pc);
2158   }
2159   // Set pc to handler
2160   exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2161   // Clear out psr.ri (= Restart Instruction) in order to continue
2162   // at the beginning of the target bundle.
2163   exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2164   assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2165 #else
2166   #ifdef _M_AMD64
2167   // Do not blow up if no thread info available.
2168   if (thread) {
2169     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2170   }
2171   // Set pc to handler
2172   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2173   #else
2174   // Do not blow up if no thread info available.
2175   if (thread) {
2176     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2177   }
2178   // Set pc to handler
2179   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2180   #endif
2181 #endif
2182 
2183   // Continue the execution
2184   return EXCEPTION_CONTINUE_EXECUTION;
2185 }
2186 
2187 
2188 // Used for PostMortemDump
2189 extern "C" void safepoints();
2190 extern "C" void find(int x);
2191 extern "C" void events();
2192 
2193 // According to Windows API documentation, an illegal instruction sequence should generate
2194 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2195 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2196 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2197 
2198 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2199 
2200 // From "Execution Protection in the Windows Operating System" draft 0.35
2201 // Once a system header becomes available, the "real" define should be
2202 // included or copied here.
2203 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2204 
2205 // Handle NAT Bit consumption on IA64.
2206 #ifdef _M_IA64
2207   #define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
2208 #endif
2209 
2210 // Windows Vista/2008 heap corruption check
2211 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2212 
2213 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2214 // C++ compiler contain this error code. Because this is a compiler-generated
2215 // error, the code is not listed in the Win32 API header files.
2216 // The code is actually a cryptic mnemonic device, with the initial "E"
2217 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2218 // ASCII values of "msc".
2219 
2220 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2221 
2222 #define def_excpt(val) { #val, (val) }
2223 
2224 static const struct { char* name; uint number; } exceptlabels[] = {
2225     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2226     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2227     def_excpt(EXCEPTION_BREAKPOINT),
2228     def_excpt(EXCEPTION_SINGLE_STEP),
2229     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2230     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2231     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2232     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2233     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2234     def_excpt(EXCEPTION_FLT_OVERFLOW),
2235     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2236     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2237     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2238     def_excpt(EXCEPTION_INT_OVERFLOW),
2239     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2240     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2241     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2242     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2243     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2244     def_excpt(EXCEPTION_STACK_OVERFLOW),
2245     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2246     def_excpt(EXCEPTION_GUARD_PAGE),
2247     def_excpt(EXCEPTION_INVALID_HANDLE),
2248     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2249     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2250 #ifdef _M_IA64
2251     , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION)
2252 #endif
2253 };
2254 
2255 #undef def_excpt
2256 
2257 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2258   uint code = static_cast<uint>(exception_code);
2259   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2260     if (exceptlabels[i].number == code) {
2261       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2262       return buf;
2263     }
2264   }
2265 
2266   return NULL;
2267 }
2268 
2269 //-----------------------------------------------------------------------------
2270 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2271   // handle exception caused by idiv; should only happen for -MinInt/-1
2272   // (division by zero is handled explicitly)
2273 #ifdef _M_IA64
2274   assert(0, "Fix Handle_IDiv_Exception");
2275 #else
2276   #ifdef  _M_AMD64
2277   PCONTEXT ctx = exceptionInfo->ContextRecord;
2278   address pc = (address)ctx->Rip;
2279   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2280   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2281   if (pc[0] == 0xF7) {
2282     // set correct result values and continue after idiv instruction
2283     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2284   } else {
2285     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2286   }
2287   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2288   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2289   // idiv opcode (0xF7).
2290   ctx->Rdx = (DWORD)0;             // remainder
2291   // Continue the execution
2292   #else
2293   PCONTEXT ctx = exceptionInfo->ContextRecord;
2294   address pc = (address)ctx->Eip;
2295   assert(pc[0] == 0xF7, "not an idiv opcode");
2296   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2297   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2298   // set correct result values and continue after idiv instruction
2299   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2300   ctx->Eax = (DWORD)min_jint;      // result
2301   ctx->Edx = (DWORD)0;             // remainder
2302   // Continue the execution
2303   #endif
2304 #endif
2305   return EXCEPTION_CONTINUE_EXECUTION;
2306 }
2307 
2308 //-----------------------------------------------------------------------------
2309 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2310   PCONTEXT ctx = exceptionInfo->ContextRecord;
2311 #ifndef  _WIN64
2312   // handle exception caused by native method modifying control word
2313   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2314 
2315   switch (exception_code) {
2316   case EXCEPTION_FLT_DENORMAL_OPERAND:
2317   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2318   case EXCEPTION_FLT_INEXACT_RESULT:
2319   case EXCEPTION_FLT_INVALID_OPERATION:
2320   case EXCEPTION_FLT_OVERFLOW:
2321   case EXCEPTION_FLT_STACK_CHECK:
2322   case EXCEPTION_FLT_UNDERFLOW:
2323     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2324     if (fp_control_word != ctx->FloatSave.ControlWord) {
2325       // Restore FPCW and mask out FLT exceptions
2326       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2327       // Mask out pending FLT exceptions
2328       ctx->FloatSave.StatusWord &=  0xffffff00;
2329       return EXCEPTION_CONTINUE_EXECUTION;
2330     }
2331   }
2332 
2333   if (prev_uef_handler != NULL) {
2334     // We didn't handle this exception so pass it to the previous
2335     // UnhandledExceptionFilter.
2336     return (prev_uef_handler)(exceptionInfo);
2337   }
2338 #else // !_WIN64
2339   // On Windows, the mxcsr control bits are non-volatile across calls
2340   // See also CR 6192333
2341   //
2342   jint MxCsr = INITIAL_MXCSR;
2343   // we can't use StubRoutines::addr_mxcsr_std()
2344   // because in Win64 mxcsr is not saved there
2345   if (MxCsr != ctx->MxCsr) {
2346     ctx->MxCsr = MxCsr;
2347     return EXCEPTION_CONTINUE_EXECUTION;
2348   }
2349 #endif // !_WIN64
2350 
2351   return EXCEPTION_CONTINUE_SEARCH;
2352 }
2353 
2354 static inline void report_error(Thread* t, DWORD exception_code,
2355                                 address addr, void* siginfo, void* context) {
2356   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2357 
2358   // If UseOsErrorReporting, this will return here and save the error file
2359   // somewhere where we can find it in the minidump.
2360 }
2361 
2362 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2363         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2364   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2365   address addr = (address) exceptionRecord->ExceptionInformation[1];
2366   if (Interpreter::contains(pc)) {
2367     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2368     if (!fr->is_first_java_frame()) {
2369       assert(fr->safe_for_sender(thread), "Safety check");
2370       *fr = fr->java_sender();
2371     }
2372   } else {
2373     // more complex code with compiled code
2374     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2375     CodeBlob* cb = CodeCache::find_blob(pc);
2376     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2377       // Not sure where the pc points to, fallback to default
2378       // stack overflow handling
2379       return false;
2380     } else {
2381       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2382       // in compiled code, the stack banging is performed just after the return pc
2383       // has been pushed on the stack
2384       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2385       if (!fr->is_java_frame()) {
2386         assert(fr->safe_for_sender(thread), "Safety check");
2387         *fr = fr->java_sender();
2388       }
2389     }
2390   }
2391   assert(fr->is_java_frame(), "Safety check");
2392   return true;
2393 }
2394 
2395 //-----------------------------------------------------------------------------
2396 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2397   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2398   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2399 #ifdef _M_IA64
2400   // On Itanium, we need the "precise pc", which has the slot number coded
2401   // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2402   address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2403   // Convert the pc to "Unix format", which has the slot number coded
2404   // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2405   // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2406   // information is saved in the Unix format.
2407   address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2408 #else
2409   #ifdef _M_AMD64
2410   address pc = (address) exceptionInfo->ContextRecord->Rip;
2411   #else
2412   address pc = (address) exceptionInfo->ContextRecord->Eip;
2413   #endif
2414 #endif
2415   Thread* t = Thread::current_or_null_safe();
2416 
2417   // Handle SafeFetch32 and SafeFetchN exceptions.
2418   if (StubRoutines::is_safefetch_fault(pc)) {
2419     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2420   }
2421 
2422 #ifndef _WIN64
2423   // Execution protection violation - win32 running on AMD64 only
2424   // Handled first to avoid misdiagnosis as a "normal" access violation;
2425   // This is safe to do because we have a new/unique ExceptionInformation
2426   // code for this condition.
2427   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2428     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2429     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2430     address addr = (address) exceptionRecord->ExceptionInformation[1];
2431 
2432     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2433       int page_size = os::vm_page_size();
2434 
2435       // Make sure the pc and the faulting address are sane.
2436       //
2437       // If an instruction spans a page boundary, and the page containing
2438       // the beginning of the instruction is executable but the following
2439       // page is not, the pc and the faulting address might be slightly
2440       // different - we still want to unguard the 2nd page in this case.
2441       //
2442       // 15 bytes seems to be a (very) safe value for max instruction size.
2443       bool pc_is_near_addr =
2444         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2445       bool instr_spans_page_boundary =
2446         (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2447                          (intptr_t) page_size) > 0);
2448 
2449       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2450         static volatile address last_addr =
2451           (address) os::non_memory_address_word();
2452 
2453         // In conservative mode, don't unguard unless the address is in the VM
2454         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2455             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2456 
2457           // Set memory to RWX and retry
2458           address page_start =
2459             (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2460           bool res = os::protect_memory((char*) page_start, page_size,
2461                                         os::MEM_PROT_RWX);
2462 
2463           log_debug(os)("Execution protection violation "
2464                         "at " INTPTR_FORMAT
2465                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2466                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2467 
2468           // Set last_addr so if we fault again at the same address, we don't
2469           // end up in an endless loop.
2470           //
2471           // There are two potential complications here.  Two threads trapping
2472           // at the same address at the same time could cause one of the
2473           // threads to think it already unguarded, and abort the VM.  Likely
2474           // very rare.
2475           //
2476           // The other race involves two threads alternately trapping at
2477           // different addresses and failing to unguard the page, resulting in
2478           // an endless loop.  This condition is probably even more unlikely
2479           // than the first.
2480           //
2481           // Although both cases could be avoided by using locks or thread
2482           // local last_addr, these solutions are unnecessary complication:
2483           // this handler is a best-effort safety net, not a complete solution.
2484           // It is disabled by default and should only be used as a workaround
2485           // in case we missed any no-execute-unsafe VM code.
2486 
2487           last_addr = addr;
2488 
2489           return EXCEPTION_CONTINUE_EXECUTION;
2490         }
2491       }
2492 
2493       // Last unguard failed or not unguarding
2494       tty->print_raw_cr("Execution protection violation");
2495       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2496                    exceptionInfo->ContextRecord);
2497       return EXCEPTION_CONTINUE_SEARCH;
2498     }
2499   }
2500 #endif // _WIN64
2501 
2502   // Check to see if we caught the safepoint code in the
2503   // process of write protecting the memory serialization page.
2504   // It write enables the page immediately after protecting it
2505   // so just return.
2506   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2507     JavaThread* thread = (JavaThread*) t;
2508     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2509     address addr = (address) exceptionRecord->ExceptionInformation[1];
2510     if (os::is_memory_serialize_page(thread, addr)) {
2511       // Block current thread until the memory serialize page permission restored.
2512       os::block_on_serialize_page_trap();
2513       return EXCEPTION_CONTINUE_EXECUTION;
2514     }
2515   }
2516 
2517   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2518       VM_Version::is_cpuinfo_segv_addr(pc)) {
2519     // Verify that OS save/restore AVX registers.
2520     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2521   }
2522 
2523   if (t != NULL && t->is_Java_thread()) {
2524     JavaThread* thread = (JavaThread*) t;
2525     bool in_java = thread->thread_state() == _thread_in_Java;
2526 
2527     // Handle potential stack overflows up front.
2528     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2529 #ifdef _M_IA64
2530       // Use guard page for register stack.
2531       PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2532       address addr = (address) exceptionRecord->ExceptionInformation[1];
2533       // Check for a register stack overflow on Itanium
2534       if (thread->addr_inside_register_stack_red_zone(addr)) {
2535         // Fatal red zone violation happens if the Java program
2536         // catches a StackOverflow error and does so much processing
2537         // that it runs beyond the unprotected yellow guard zone. As
2538         // a result, we are out of here.
2539         fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2540       } else if(thread->addr_inside_register_stack(addr)) {
2541         // Disable the yellow zone which sets the state that
2542         // we've got a stack overflow problem.
2543         if (thread->stack_yellow_reserved_zone_enabled()) {
2544           thread->disable_stack_yellow_reserved_zone();
2545         }
2546         // Give us some room to process the exception.
2547         thread->disable_register_stack_guard();
2548         // Tracing with +Verbose.
2549         if (Verbose) {
2550           tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2551           tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2552           tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2553           tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2554                         thread->register_stack_base(),
2555                         thread->register_stack_base() + thread->stack_size());
2556         }
2557 
2558         // Reguard the permanent register stack red zone just to be sure.
2559         // We saw Windows silently disabling this without telling us.
2560         thread->enable_register_stack_red_zone();
2561 
2562         return Handle_Exception(exceptionInfo,
2563                                 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2564       }
2565 #endif
2566       if (thread->stack_guards_enabled()) {
2567         if (_thread_in_Java) {
2568           frame fr;
2569           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2570           address addr = (address) exceptionRecord->ExceptionInformation[1];
2571           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2572             assert(fr.is_java_frame(), "Must be a Java frame");
2573             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2574           }
2575         }
2576         // Yellow zone violation.  The o/s has unprotected the first yellow
2577         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2578         // update the enabled status, even if the zone contains only one page.
2579         thread->disable_stack_yellow_reserved_zone();
2580         // If not in java code, return and hope for the best.
2581         return in_java
2582             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2583             :  EXCEPTION_CONTINUE_EXECUTION;
2584       } else {
2585         // Fatal red zone violation.
2586         thread->disable_stack_red_zone();
2587         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2588         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2589                       exceptionInfo->ContextRecord);
2590         return EXCEPTION_CONTINUE_SEARCH;
2591       }
2592     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2593       // Either stack overflow or null pointer exception.
2594       if (in_java) {
2595         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2596         address addr = (address) exceptionRecord->ExceptionInformation[1];
2597         address stack_end = thread->stack_end();
2598         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2599           // Stack overflow.
2600           assert(!os::uses_stack_guard_pages(),
2601                  "should be caught by red zone code above.");
2602           return Handle_Exception(exceptionInfo,
2603                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2604         }
2605         // Check for safepoint polling and implicit null
2606         // We only expect null pointers in the stubs (vtable)
2607         // the rest are checked explicitly now.
2608         CodeBlob* cb = CodeCache::find_blob(pc);
2609         if (cb != NULL) {
2610           if (os::is_poll_address(addr)) {
2611             address stub = SharedRuntime::get_poll_stub(pc);
2612             return Handle_Exception(exceptionInfo, stub);
2613           }
2614         }
2615         {
2616 #ifdef _WIN64
2617           // If it's a legal stack address map the entire region in
2618           //
2619           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2620           address addr = (address) exceptionRecord->ExceptionInformation[1];
2621           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2622             addr = (address)((uintptr_t)addr &
2623                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2624             os::commit_memory((char *)addr, thread->stack_base() - addr,
2625                               !ExecMem);
2626             return EXCEPTION_CONTINUE_EXECUTION;
2627           } else
2628 #endif
2629           {
2630             // Null pointer exception.
2631 #ifdef _M_IA64
2632             // Process implicit null checks in compiled code. Note: Implicit null checks
2633             // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2634             if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2635               CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2636               // Handle implicit null check in UEP method entry
2637               if (cb && (cb->is_frame_complete_at(pc) ||
2638                          (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2639                 if (Verbose) {
2640                   intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2641                   tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2642                   tty->print_cr("      to addr " INTPTR_FORMAT, addr);
2643                   tty->print_cr("      bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2644                                 *(bundle_start + 1), *bundle_start);
2645                 }
2646                 return Handle_Exception(exceptionInfo,
2647                                         SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2648               }
2649             }
2650 
2651             // Implicit null checks were processed above.  Hence, we should not reach
2652             // here in the usual case => die!
2653             if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2654             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2655                          exceptionInfo->ContextRecord);
2656             return EXCEPTION_CONTINUE_SEARCH;
2657 
2658 #else // !IA64
2659 
2660             if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
2661               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2662               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2663             }
2664             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2665                          exceptionInfo->ContextRecord);
2666             return EXCEPTION_CONTINUE_SEARCH;
2667 #endif
2668           }
2669         }
2670       }
2671 
2672 #ifdef _WIN64
2673       // Special care for fast JNI field accessors.
2674       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2675       // in and the heap gets shrunk before the field access.
2676       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2677         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2678         if (addr != (address)-1) {
2679           return Handle_Exception(exceptionInfo, addr);
2680         }
2681       }
2682 #endif
2683 
2684       // Stack overflow or null pointer exception in native code.
2685       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2686                    exceptionInfo->ContextRecord);
2687       return EXCEPTION_CONTINUE_SEARCH;
2688     } // /EXCEPTION_ACCESS_VIOLATION
2689     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2690 #if defined _M_IA64
2691     else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2692               exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2693       M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2694 
2695       // Compiled method patched to be non entrant? Following conditions must apply:
2696       // 1. must be first instruction in bundle
2697       // 2. must be a break instruction with appropriate code
2698       if ((((uint64_t) pc & 0x0F) == 0) &&
2699           (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2700         return Handle_Exception(exceptionInfo,
2701                                 (address)SharedRuntime::get_handle_wrong_method_stub());
2702       }
2703     } // /EXCEPTION_ILLEGAL_INSTRUCTION
2704 #endif
2705 
2706 
2707     if (in_java) {
2708       switch (exception_code) {
2709       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2710         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2711 
2712       case EXCEPTION_INT_OVERFLOW:
2713         return Handle_IDiv_Exception(exceptionInfo);
2714 
2715       } // switch
2716     }
2717     if (((thread->thread_state() == _thread_in_Java) ||
2718          (thread->thread_state() == _thread_in_native)) &&
2719          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2720       LONG result=Handle_FLT_Exception(exceptionInfo);
2721       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2722     }
2723   }
2724 
2725   if (exception_code != EXCEPTION_BREAKPOINT) {
2726     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2727                  exceptionInfo->ContextRecord);
2728   }
2729   return EXCEPTION_CONTINUE_SEARCH;
2730 }
2731 
2732 #ifndef _WIN64
2733 // Special care for fast JNI accessors.
2734 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2735 // the heap gets shrunk before the field access.
2736 // Need to install our own structured exception handler since native code may
2737 // install its own.
2738 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2739   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2740   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2741     address pc = (address) exceptionInfo->ContextRecord->Eip;
2742     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2743     if (addr != (address)-1) {
2744       return Handle_Exception(exceptionInfo, addr);
2745     }
2746   }
2747   return EXCEPTION_CONTINUE_SEARCH;
2748 }
2749 
2750 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2751   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2752                                                      jobject obj,           \
2753                                                      jfieldID fieldID) {    \
2754     __try {                                                                 \
2755       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2756                                                                  obj,       \
2757                                                                  fieldID);  \
2758     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2759                                               _exception_info())) {         \
2760     }                                                                       \
2761     return 0;                                                               \
2762   }
2763 
2764 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2765 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2766 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2767 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2768 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2769 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2770 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2771 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2772 
2773 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2774   switch (type) {
2775   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2776   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2777   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2778   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2779   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2780   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2781   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2782   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2783   default:        ShouldNotReachHere();
2784   }
2785   return (address)-1;
2786 }
2787 #endif
2788 
2789 // Virtual Memory
2790 
2791 int os::vm_page_size() { return os::win32::vm_page_size(); }
2792 int os::vm_allocation_granularity() {
2793   return os::win32::vm_allocation_granularity();
2794 }
2795 
2796 // Windows large page support is available on Windows 2003. In order to use
2797 // large page memory, the administrator must first assign additional privilege
2798 // to the user:
2799 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2800 //   + select Local Policies -> User Rights Assignment
2801 //   + double click "Lock pages in memory", add users and/or groups
2802 //   + reboot
2803 // Note the above steps are needed for administrator as well, as administrators
2804 // by default do not have the privilege to lock pages in memory.
2805 //
2806 // Note about Windows 2003: although the API supports committing large page
2807 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2808 // scenario, I found through experiment it only uses large page if the entire
2809 // memory region is reserved and committed in a single VirtualAlloc() call.
2810 // This makes Windows large page support more or less like Solaris ISM, in
2811 // that the entire heap must be committed upfront. This probably will change
2812 // in the future, if so the code below needs to be revisited.
2813 
2814 #ifndef MEM_LARGE_PAGES
2815   #define MEM_LARGE_PAGES 0x20000000
2816 #endif
2817 
2818 static HANDLE    _hProcess;
2819 static HANDLE    _hToken;
2820 
2821 // Container for NUMA node list info
2822 class NUMANodeListHolder {
2823  private:
2824   int *_numa_used_node_list;  // allocated below
2825   int _numa_used_node_count;
2826 
2827   void free_node_list() {
2828     if (_numa_used_node_list != NULL) {
2829       FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2830     }
2831   }
2832 
2833  public:
2834   NUMANodeListHolder() {
2835     _numa_used_node_count = 0;
2836     _numa_used_node_list = NULL;
2837     // do rest of initialization in build routine (after function pointers are set up)
2838   }
2839 
2840   ~NUMANodeListHolder() {
2841     free_node_list();
2842   }
2843 
2844   bool build() {
2845     DWORD_PTR proc_aff_mask;
2846     DWORD_PTR sys_aff_mask;
2847     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2848     ULONG highest_node_number;
2849     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2850     free_node_list();
2851     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2852     for (unsigned int i = 0; i <= highest_node_number; i++) {
2853       ULONGLONG proc_mask_numa_node;
2854       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2855       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2856         _numa_used_node_list[_numa_used_node_count++] = i;
2857       }
2858     }
2859     return (_numa_used_node_count > 1);
2860   }
2861 
2862   int get_count() { return _numa_used_node_count; }
2863   int get_node_list_entry(int n) {
2864     // for indexes out of range, returns -1
2865     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2866   }
2867 
2868 } numa_node_list_holder;
2869 
2870 
2871 
2872 static size_t _large_page_size = 0;
2873 
2874 static bool request_lock_memory_privilege() {
2875   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2876                           os::current_process_id());
2877 
2878   LUID luid;
2879   if (_hProcess != NULL &&
2880       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2881       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2882 
2883     TOKEN_PRIVILEGES tp;
2884     tp.PrivilegeCount = 1;
2885     tp.Privileges[0].Luid = luid;
2886     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2887 
2888     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2889     // privilege. Check GetLastError() too. See MSDN document.
2890     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2891         (GetLastError() == ERROR_SUCCESS)) {
2892       return true;
2893     }
2894   }
2895 
2896   return false;
2897 }
2898 
2899 static void cleanup_after_large_page_init() {
2900   if (_hProcess) CloseHandle(_hProcess);
2901   _hProcess = NULL;
2902   if (_hToken) CloseHandle(_hToken);
2903   _hToken = NULL;
2904 }
2905 
2906 static bool numa_interleaving_init() {
2907   bool success = false;
2908   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2909 
2910   // print a warning if UseNUMAInterleaving flag is specified on command line
2911   bool warn_on_failure = use_numa_interleaving_specified;
2912 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2913 
2914   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2915   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2916   NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2917 
2918   if (numa_node_list_holder.build()) {
2919     if (log_is_enabled(Debug, os, cpu)) {
2920       Log(os, cpu) log;
2921       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2922       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2923         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2924       }
2925     }
2926     success = true;
2927   } else {
2928     WARN("Process does not cover multiple NUMA nodes.");
2929   }
2930   if (!success) {
2931     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2932   }
2933   return success;
2934 #undef WARN
2935 }
2936 
2937 // this routine is used whenever we need to reserve a contiguous VA range
2938 // but we need to make separate VirtualAlloc calls for each piece of the range
2939 // Reasons for doing this:
2940 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2941 //  * UseNUMAInterleaving requires a separate node for each piece
2942 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2943                                          DWORD prot,
2944                                          bool should_inject_error = false) {
2945   char * p_buf;
2946   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2947   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2948   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2949 
2950   // first reserve enough address space in advance since we want to be
2951   // able to break a single contiguous virtual address range into multiple
2952   // large page commits but WS2003 does not allow reserving large page space
2953   // so we just use 4K pages for reserve, this gives us a legal contiguous
2954   // address space. then we will deallocate that reservation, and re alloc
2955   // using large pages
2956   const size_t size_of_reserve = bytes + chunk_size;
2957   if (bytes > size_of_reserve) {
2958     // Overflowed.
2959     return NULL;
2960   }
2961   p_buf = (char *) VirtualAlloc(addr,
2962                                 size_of_reserve,  // size of Reserve
2963                                 MEM_RESERVE,
2964                                 PAGE_READWRITE);
2965   // If reservation failed, return NULL
2966   if (p_buf == NULL) return NULL;
2967   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2968   os::release_memory(p_buf, bytes + chunk_size);
2969 
2970   // we still need to round up to a page boundary (in case we are using large pages)
2971   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2972   // instead we handle this in the bytes_to_rq computation below
2973   p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2974 
2975   // now go through and allocate one chunk at a time until all bytes are
2976   // allocated
2977   size_t  bytes_remaining = bytes;
2978   // An overflow of align_size_up() would have been caught above
2979   // in the calculation of size_of_reserve.
2980   char * next_alloc_addr = p_buf;
2981   HANDLE hProc = GetCurrentProcess();
2982 
2983 #ifdef ASSERT
2984   // Variable for the failure injection
2985   long ran_num = os::random();
2986   size_t fail_after = ran_num % bytes;
2987 #endif
2988 
2989   int count=0;
2990   while (bytes_remaining) {
2991     // select bytes_to_rq to get to the next chunk_size boundary
2992 
2993     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2994     // Note allocate and commit
2995     char * p_new;
2996 
2997 #ifdef ASSERT
2998     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2999 #else
3000     const bool inject_error_now = false;
3001 #endif
3002 
3003     if (inject_error_now) {
3004       p_new = NULL;
3005     } else {
3006       if (!UseNUMAInterleaving) {
3007         p_new = (char *) VirtualAlloc(next_alloc_addr,
3008                                       bytes_to_rq,
3009                                       flags,
3010                                       prot);
3011       } else {
3012         // get the next node to use from the used_node_list
3013         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3014         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3015         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3016       }
3017     }
3018 
3019     if (p_new == NULL) {
3020       // Free any allocated pages
3021       if (next_alloc_addr > p_buf) {
3022         // Some memory was committed so release it.
3023         size_t bytes_to_release = bytes - bytes_remaining;
3024         // NMT has yet to record any individual blocks, so it
3025         // need to create a dummy 'reserve' record to match
3026         // the release.
3027         MemTracker::record_virtual_memory_reserve((address)p_buf,
3028                                                   bytes_to_release, CALLER_PC);
3029         os::release_memory(p_buf, bytes_to_release);
3030       }
3031 #ifdef ASSERT
3032       if (should_inject_error) {
3033         log_develop_debug(pagesize)("Reserving pages individually failed.");
3034       }
3035 #endif
3036       return NULL;
3037     }
3038 
3039     bytes_remaining -= bytes_to_rq;
3040     next_alloc_addr += bytes_to_rq;
3041     count++;
3042   }
3043   // Although the memory is allocated individually, it is returned as one.
3044   // NMT records it as one block.
3045   if ((flags & MEM_COMMIT) != 0) {
3046     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3047   } else {
3048     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3049   }
3050 
3051   // made it this far, success
3052   return p_buf;
3053 }
3054 
3055 
3056 
3057 void os::large_page_init() {
3058   if (!UseLargePages) return;
3059 
3060   // print a warning if any large page related flag is specified on command line
3061   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3062                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3063   bool success = false;
3064 
3065 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3066   if (request_lock_memory_privilege()) {
3067     size_t s = GetLargePageMinimum();
3068     if (s) {
3069 #if defined(IA32) || defined(AMD64)
3070       if (s > 4*M || LargePageSizeInBytes > 4*M) {
3071         WARN("JVM cannot use large pages bigger than 4mb.");
3072       } else {
3073 #endif
3074         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3075           _large_page_size = LargePageSizeInBytes;
3076         } else {
3077           _large_page_size = s;
3078         }
3079         success = true;
3080 #if defined(IA32) || defined(AMD64)
3081       }
3082 #endif
3083     } else {
3084       WARN("Large page is not supported by the processor.");
3085     }
3086   } else {
3087     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3088   }
3089 #undef WARN
3090 
3091   const size_t default_page_size = (size_t) vm_page_size();
3092   if (success && _large_page_size > default_page_size) {
3093     _page_sizes[0] = _large_page_size;
3094     _page_sizes[1] = default_page_size;
3095     _page_sizes[2] = 0;
3096   }
3097 
3098   cleanup_after_large_page_init();
3099   UseLargePages = success;
3100 }
3101 
3102 // On win32, one cannot release just a part of reserved memory, it's an
3103 // all or nothing deal.  When we split a reservation, we must break the
3104 // reservation into two reservations.
3105 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3106                                   bool realloc) {
3107   if (size > 0) {
3108     release_memory(base, size);
3109     if (realloc) {
3110       reserve_memory(split, base);
3111     }
3112     if (size != split) {
3113       reserve_memory(size - split, base + split);
3114     }
3115   }
3116 }
3117 
3118 // Multiple threads can race in this code but it's not possible to unmap small sections of
3119 // virtual space to get requested alignment, like posix-like os's.
3120 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3121 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3122   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3123          "Alignment must be a multiple of allocation granularity (page size)");
3124   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3125 
3126   size_t extra_size = size + alignment;
3127   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3128 
3129   char* aligned_base = NULL;
3130 
3131   do {
3132     char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3133     if (extra_base == NULL) {
3134       return NULL;
3135     }
3136     // Do manual alignment
3137     aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3138 
3139     os::release_memory(extra_base, extra_size);
3140 
3141     aligned_base = os::reserve_memory(size, aligned_base);
3142 
3143   } while (aligned_base == NULL);
3144 
3145   return aligned_base;
3146 }
3147 
3148 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3149   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3150          "reserve alignment");
3151   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3152   char* res;
3153   // note that if UseLargePages is on, all the areas that require interleaving
3154   // will go thru reserve_memory_special rather than thru here.
3155   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3156   if (!use_individual) {
3157     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3158   } else {
3159     elapsedTimer reserveTimer;
3160     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3161     // in numa interleaving, we have to allocate pages individually
3162     // (well really chunks of NUMAInterleaveGranularity size)
3163     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3164     if (res == NULL) {
3165       warning("NUMA page allocation failed");
3166     }
3167     if (Verbose && PrintMiscellaneous) {
3168       reserveTimer.stop();
3169       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3170                     reserveTimer.milliseconds(), reserveTimer.ticks());
3171     }
3172   }
3173   assert(res == NULL || addr == NULL || addr == res,
3174          "Unexpected address from reserve.");
3175 
3176   return res;
3177 }
3178 
3179 // Reserve memory at an arbitrary address, only if that area is
3180 // available (and not reserved for something else).
3181 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3182   // Windows os::reserve_memory() fails of the requested address range is
3183   // not avilable.
3184   return reserve_memory(bytes, requested_addr);
3185 }
3186 
3187 size_t os::large_page_size() {
3188   return _large_page_size;
3189 }
3190 
3191 bool os::can_commit_large_page_memory() {
3192   // Windows only uses large page memory when the entire region is reserved
3193   // and committed in a single VirtualAlloc() call. This may change in the
3194   // future, but with Windows 2003 it's not possible to commit on demand.
3195   return false;
3196 }
3197 
3198 bool os::can_execute_large_page_memory() {
3199   return true;
3200 }
3201 
3202 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3203                                  bool exec) {
3204   assert(UseLargePages, "only for large pages");
3205 
3206   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3207     return NULL; // Fallback to small pages.
3208   }
3209 
3210   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3211   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3212 
3213   // with large pages, there are two cases where we need to use Individual Allocation
3214   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3215   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3216   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3217     log_debug(pagesize)("Reserving large pages individually.");
3218 
3219     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3220     if (p_buf == NULL) {
3221       // give an appropriate warning message
3222       if (UseNUMAInterleaving) {
3223         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3224       }
3225       if (UseLargePagesIndividualAllocation) {
3226         warning("Individually allocated large pages failed, "
3227                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3228       }
3229       return NULL;
3230     }
3231 
3232     return p_buf;
3233 
3234   } else {
3235     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3236 
3237     // normal policy just allocate it all at once
3238     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3239     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3240     if (res != NULL) {
3241       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3242     }
3243 
3244     return res;
3245   }
3246 }
3247 
3248 bool os::release_memory_special(char* base, size_t bytes) {
3249   assert(base != NULL, "Sanity check");
3250   return release_memory(base, bytes);
3251 }
3252 
3253 void os::print_statistics() {
3254 }
3255 
3256 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3257   int err = os::get_last_error();
3258   char buf[256];
3259   size_t buf_len = os::lasterror(buf, sizeof(buf));
3260   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3261           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3262           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3263 }
3264 
3265 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3266   if (bytes == 0) {
3267     // Don't bother the OS with noops.
3268     return true;
3269   }
3270   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3271   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3272   // Don't attempt to print anything if the OS call fails. We're
3273   // probably low on resources, so the print itself may cause crashes.
3274 
3275   // unless we have NUMAInterleaving enabled, the range of a commit
3276   // is always within a reserve covered by a single VirtualAlloc
3277   // in that case we can just do a single commit for the requested size
3278   if (!UseNUMAInterleaving) {
3279     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3280       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3281       return false;
3282     }
3283     if (exec) {
3284       DWORD oldprot;
3285       // Windows doc says to use VirtualProtect to get execute permissions
3286       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3287         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3288         return false;
3289       }
3290     }
3291     return true;
3292   } else {
3293 
3294     // when NUMAInterleaving is enabled, the commit might cover a range that
3295     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3296     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3297     // returns represents the number of bytes that can be committed in one step.
3298     size_t bytes_remaining = bytes;
3299     char * next_alloc_addr = addr;
3300     while (bytes_remaining > 0) {
3301       MEMORY_BASIC_INFORMATION alloc_info;
3302       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3303       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3304       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3305                        PAGE_READWRITE) == NULL) {
3306         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3307                                             exec);)
3308         return false;
3309       }
3310       if (exec) {
3311         DWORD oldprot;
3312         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3313                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3314           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3315                                               exec);)
3316           return false;
3317         }
3318       }
3319       bytes_remaining -= bytes_to_rq;
3320       next_alloc_addr += bytes_to_rq;
3321     }
3322   }
3323   // if we made it this far, return true
3324   return true;
3325 }
3326 
3327 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3328                           bool exec) {
3329   // alignment_hint is ignored on this OS
3330   return pd_commit_memory(addr, size, exec);
3331 }
3332 
3333 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3334                                   const char* mesg) {
3335   assert(mesg != NULL, "mesg must be specified");
3336   if (!pd_commit_memory(addr, size, exec)) {
3337     warn_fail_commit_memory(addr, size, exec);
3338     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3339   }
3340 }
3341 
3342 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3343                                   size_t alignment_hint, bool exec,
3344                                   const char* mesg) {
3345   // alignment_hint is ignored on this OS
3346   pd_commit_memory_or_exit(addr, size, exec, mesg);
3347 }
3348 
3349 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3350   if (bytes == 0) {
3351     // Don't bother the OS with noops.
3352     return true;
3353   }
3354   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3355   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3356   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3357 }
3358 
3359 bool os::pd_release_memory(char* addr, size_t bytes) {
3360   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3361 }
3362 
3363 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3364   return os::commit_memory(addr, size, !ExecMem);
3365 }
3366 
3367 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3368   return os::uncommit_memory(addr, size);
3369 }
3370 
3371 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3372   uint count = 0;
3373   bool ret = false;
3374   size_t bytes_remaining = bytes;
3375   char * next_protect_addr = addr;
3376 
3377   // Use VirtualQuery() to get the chunk size.
3378   while (bytes_remaining) {
3379     MEMORY_BASIC_INFORMATION alloc_info;
3380     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3381       return false;
3382     }
3383 
3384     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3385     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3386     // but we don't distinguish here as both cases are protected by same API.
3387     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3388     warning("Failed protecting pages individually for chunk #%u", count);
3389     if (!ret) {
3390       return false;
3391     }
3392 
3393     bytes_remaining -= bytes_to_protect;
3394     next_protect_addr += bytes_to_protect;
3395     count++;
3396   }
3397   return ret;
3398 }
3399 
3400 // Set protections specified
3401 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3402                         bool is_committed) {
3403   unsigned int p = 0;
3404   switch (prot) {
3405   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3406   case MEM_PROT_READ: p = PAGE_READONLY; break;
3407   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3408   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3409   default:
3410     ShouldNotReachHere();
3411   }
3412 
3413   DWORD old_status;
3414 
3415   // Strange enough, but on Win32 one can change protection only for committed
3416   // memory, not a big deal anyway, as bytes less or equal than 64K
3417   if (!is_committed) {
3418     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3419                           "cannot commit protection page");
3420   }
3421   // One cannot use os::guard_memory() here, as on Win32 guard page
3422   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3423   //
3424   // Pages in the region become guard pages. Any attempt to access a guard page
3425   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3426   // the guard page status. Guard pages thus act as a one-time access alarm.
3427   bool ret;
3428   if (UseNUMAInterleaving) {
3429     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3430     // so we must protect the chunks individually.
3431     ret = protect_pages_individually(addr, bytes, p, &old_status);
3432   } else {
3433     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3434   }
3435 #ifdef ASSERT
3436   if (!ret) {
3437     int err = os::get_last_error();
3438     char buf[256];
3439     size_t buf_len = os::lasterror(buf, sizeof(buf));
3440     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3441           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3442           buf_len != 0 ? buf : "<no_error_string>", err);
3443   }
3444 #endif
3445   return ret;
3446 }
3447 
3448 bool os::guard_memory(char* addr, size_t bytes) {
3449   DWORD old_status;
3450   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3451 }
3452 
3453 bool os::unguard_memory(char* addr, size_t bytes) {
3454   DWORD old_status;
3455   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3456 }
3457 
3458 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3459 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3460 void os::numa_make_global(char *addr, size_t bytes)    { }
3461 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3462 bool os::numa_topology_changed()                       { return false; }
3463 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3464 int os::numa_get_group_id()                            { return 0; }
3465 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3466   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3467     // Provide an answer for UMA systems
3468     ids[0] = 0;
3469     return 1;
3470   } else {
3471     // check for size bigger than actual groups_num
3472     size = MIN2(size, numa_get_groups_num());
3473     for (int i = 0; i < (int)size; i++) {
3474       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3475     }
3476     return size;
3477   }
3478 }
3479 
3480 bool os::get_page_info(char *start, page_info* info) {
3481   return false;
3482 }
3483 
3484 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3485                      page_info* page_found) {
3486   return end;
3487 }
3488 
3489 char* os::non_memory_address_word() {
3490   // Must never look like an address returned by reserve_memory,
3491   // even in its subfields (as defined by the CPU immediate fields,
3492   // if the CPU splits constants across multiple instructions).
3493   return (char*)-1;
3494 }
3495 
3496 #define MAX_ERROR_COUNT 100
3497 #define SYS_THREAD_ERROR 0xffffffffUL
3498 
3499 void os::pd_start_thread(Thread* thread) {
3500   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3501   // Returns previous suspend state:
3502   // 0:  Thread was not suspended
3503   // 1:  Thread is running now
3504   // >1: Thread is still suspended.
3505   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3506 }
3507 
3508 class HighResolutionInterval : public CHeapObj<mtThread> {
3509   // The default timer resolution seems to be 10 milliseconds.
3510   // (Where is this written down?)
3511   // If someone wants to sleep for only a fraction of the default,
3512   // then we set the timer resolution down to 1 millisecond for
3513   // the duration of their interval.
3514   // We carefully set the resolution back, since otherwise we
3515   // seem to incur an overhead (3%?) that we don't need.
3516   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3517   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3518   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3519   // timeBeginPeriod() if the relative error exceeded some threshold.
3520   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3521   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
3522   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3523   // resolution timers running.
3524  private:
3525   jlong resolution;
3526  public:
3527   HighResolutionInterval(jlong ms) {
3528     resolution = ms % 10L;
3529     if (resolution != 0) {
3530       MMRESULT result = timeBeginPeriod(1L);
3531     }
3532   }
3533   ~HighResolutionInterval() {
3534     if (resolution != 0) {
3535       MMRESULT result = timeEndPeriod(1L);
3536     }
3537     resolution = 0L;
3538   }
3539 };
3540 
3541 int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3542   jlong limit = (jlong) MAXDWORD;
3543 
3544   while (ms > limit) {
3545     int res;
3546     if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) {
3547       return res;
3548     }
3549     ms -= limit;
3550   }
3551 
3552   assert(thread == Thread::current(), "thread consistency check");
3553   OSThread* osthread = thread->osthread();
3554   OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3555   int result;
3556   if (interruptable) {
3557     assert(thread->is_Java_thread(), "must be java thread");
3558     JavaThread *jt = (JavaThread *) thread;
3559     ThreadBlockInVM tbivm(jt);
3560 
3561     jt->set_suspend_equivalent();
3562     // cleared by handle_special_suspend_equivalent_condition() or
3563     // java_suspend_self() via check_and_wait_while_suspended()
3564 
3565     HANDLE events[1];
3566     events[0] = osthread->interrupt_event();
3567     HighResolutionInterval *phri=NULL;
3568     if (!ForceTimeHighResolution) {
3569       phri = new HighResolutionInterval(ms);
3570     }
3571     if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3572       result = OS_TIMEOUT;
3573     } else {
3574       ResetEvent(osthread->interrupt_event());
3575       osthread->set_interrupted(false);
3576       result = OS_INTRPT;
3577     }
3578     delete phri; //if it is NULL, harmless
3579 
3580     // were we externally suspended while we were waiting?
3581     jt->check_and_wait_while_suspended();
3582   } else {
3583     assert(!thread->is_Java_thread(), "must not be java thread");
3584     Sleep((long) ms);
3585     result = OS_TIMEOUT;
3586   }
3587   return result;
3588 }
3589 
3590 // Short sleep, direct OS call.
3591 //
3592 // ms = 0, means allow others (if any) to run.
3593 //
3594 void os::naked_short_sleep(jlong ms) {
3595   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3596   Sleep(ms);
3597 }
3598 
3599 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3600 void os::infinite_sleep() {
3601   while (true) {    // sleep forever ...
3602     Sleep(100000);  // ... 100 seconds at a time
3603   }
3604 }
3605 
3606 typedef BOOL (WINAPI * STTSignature)(void);
3607 
3608 void os::naked_yield() {
3609   // Consider passing back the return value from SwitchToThread().
3610   SwitchToThread();
3611 }
3612 
3613 // Win32 only gives you access to seven real priorities at a time,
3614 // so we compress Java's ten down to seven.  It would be better
3615 // if we dynamically adjusted relative priorities.
3616 
3617 int os::java_to_os_priority[CriticalPriority + 1] = {
3618   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3619   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3620   THREAD_PRIORITY_LOWEST,                       // 2
3621   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3622   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3623   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3624   THREAD_PRIORITY_NORMAL,                       // 6
3625   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3626   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3627   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3628   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3629   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3630 };
3631 
3632 int prio_policy1[CriticalPriority + 1] = {
3633   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3634   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3635   THREAD_PRIORITY_LOWEST,                       // 2
3636   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3637   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3638   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3639   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3640   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3641   THREAD_PRIORITY_HIGHEST,                      // 8
3642   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3643   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3644   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3645 };
3646 
3647 static int prio_init() {
3648   // If ThreadPriorityPolicy is 1, switch tables
3649   if (ThreadPriorityPolicy == 1) {
3650     int i;
3651     for (i = 0; i < CriticalPriority + 1; i++) {
3652       os::java_to_os_priority[i] = prio_policy1[i];
3653     }
3654   }
3655   if (UseCriticalJavaThreadPriority) {
3656     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3657   }
3658   return 0;
3659 }
3660 
3661 OSReturn os::set_native_priority(Thread* thread, int priority) {
3662   if (!UseThreadPriorities) return OS_OK;
3663   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3664   return ret ? OS_OK : OS_ERR;
3665 }
3666 
3667 OSReturn os::get_native_priority(const Thread* const thread,
3668                                  int* priority_ptr) {
3669   if (!UseThreadPriorities) {
3670     *priority_ptr = java_to_os_priority[NormPriority];
3671     return OS_OK;
3672   }
3673   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3674   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3675     assert(false, "GetThreadPriority failed");
3676     return OS_ERR;
3677   }
3678   *priority_ptr = os_prio;
3679   return OS_OK;
3680 }
3681 
3682 
3683 // Hint to the underlying OS that a task switch would not be good.
3684 // Void return because it's a hint and can fail.
3685 void os::hint_no_preempt() {}
3686 
3687 void os::interrupt(Thread* thread) {
3688   assert(!thread->is_Java_thread() || Thread::current() == thread ||
3689          Threads_lock->owned_by_self(),
3690          "possibility of dangling Thread pointer");
3691 
3692   OSThread* osthread = thread->osthread();
3693   osthread->set_interrupted(true);
3694   // More than one thread can get here with the same value of osthread,
3695   // resulting in multiple notifications.  We do, however, want the store
3696   // to interrupted() to be visible to other threads before we post
3697   // the interrupt event.
3698   OrderAccess::release();
3699   SetEvent(osthread->interrupt_event());
3700   // For JSR166:  unpark after setting status
3701   if (thread->is_Java_thread()) {
3702     ((JavaThread*)thread)->parker()->unpark();
3703   }
3704 
3705   ParkEvent * ev = thread->_ParkEvent;
3706   if (ev != NULL) ev->unpark();
3707 }
3708 
3709 
3710 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3711   assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3712          "possibility of dangling Thread pointer");
3713 
3714   OSThread* osthread = thread->osthread();
3715   // There is no synchronization between the setting of the interrupt
3716   // and it being cleared here. It is critical - see 6535709 - that
3717   // we only clear the interrupt state, and reset the interrupt event,
3718   // if we are going to report that we were indeed interrupted - else
3719   // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3720   // depending on the timing. By checking thread interrupt event to see
3721   // if the thread gets real interrupt thus prevent spurious wakeup.
3722   bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3723   if (interrupted && clear_interrupted) {
3724     osthread->set_interrupted(false);
3725     ResetEvent(osthread->interrupt_event());
3726   } // Otherwise leave the interrupted state alone
3727 
3728   return interrupted;
3729 }
3730 
3731 // Get's a pc (hint) for a running thread. Currently used only for profiling.
3732 ExtendedPC os::get_thread_pc(Thread* thread) {
3733   CONTEXT context;
3734   context.ContextFlags = CONTEXT_CONTROL;
3735   HANDLE handle = thread->osthread()->thread_handle();
3736 #ifdef _M_IA64
3737   assert(0, "Fix get_thread_pc");
3738   return ExtendedPC(NULL);
3739 #else
3740   if (GetThreadContext(handle, &context)) {
3741 #ifdef _M_AMD64
3742     return ExtendedPC((address) context.Rip);
3743 #else
3744     return ExtendedPC((address) context.Eip);
3745 #endif
3746   } else {
3747     return ExtendedPC(NULL);
3748   }
3749 #endif
3750 }
3751 
3752 // GetCurrentThreadId() returns DWORD
3753 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3754 
3755 static int _initial_pid = 0;
3756 
3757 int os::current_process_id() {
3758   return (_initial_pid ? _initial_pid : _getpid());
3759 }
3760 
3761 int    os::win32::_vm_page_size              = 0;
3762 int    os::win32::_vm_allocation_granularity = 0;
3763 int    os::win32::_processor_type            = 0;
3764 // Processor level is not available on non-NT systems, use vm_version instead
3765 int    os::win32::_processor_level           = 0;
3766 julong os::win32::_physical_memory           = 0;
3767 size_t os::win32::_default_stack_size        = 0;
3768 
3769 intx          os::win32::_os_thread_limit    = 0;
3770 volatile intx os::win32::_os_thread_count    = 0;
3771 
3772 bool   os::win32::_is_windows_server         = false;
3773 
3774 // 6573254
3775 // Currently, the bug is observed across all the supported Windows releases,
3776 // including the latest one (as of this writing - Windows Server 2012 R2)
3777 bool   os::win32::_has_exit_bug              = true;
3778 
3779 void os::win32::initialize_system_info() {
3780   SYSTEM_INFO si;
3781   GetSystemInfo(&si);
3782   _vm_page_size    = si.dwPageSize;
3783   _vm_allocation_granularity = si.dwAllocationGranularity;
3784   _processor_type  = si.dwProcessorType;
3785   _processor_level = si.wProcessorLevel;
3786   set_processor_count(si.dwNumberOfProcessors);
3787 
3788   MEMORYSTATUSEX ms;
3789   ms.dwLength = sizeof(ms);
3790 
3791   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3792   // dwMemoryLoad (% of memory in use)
3793   GlobalMemoryStatusEx(&ms);
3794   _physical_memory = ms.ullTotalPhys;
3795 
3796   OSVERSIONINFOEX oi;
3797   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3798   GetVersionEx((OSVERSIONINFO*)&oi);
3799   switch (oi.dwPlatformId) {
3800   case VER_PLATFORM_WIN32_NT:
3801     {
3802       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3803       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3804           oi.wProductType == VER_NT_SERVER) {
3805         _is_windows_server = true;
3806       }
3807     }
3808     break;
3809   default: fatal("Unknown platform");
3810   }
3811 
3812   _default_stack_size = os::current_stack_size();
3813   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3814   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3815          "stack size not a multiple of page size");
3816 
3817   initialize_performance_counter();
3818 }
3819 
3820 
3821 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3822                                       int ebuflen) {
3823   char path[MAX_PATH];
3824   DWORD size;
3825   DWORD pathLen = (DWORD)sizeof(path);
3826   HINSTANCE result = NULL;
3827 
3828   // only allow library name without path component
3829   assert(strchr(name, '\\') == NULL, "path not allowed");
3830   assert(strchr(name, ':') == NULL, "path not allowed");
3831   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3832     jio_snprintf(ebuf, ebuflen,
3833                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3834     return NULL;
3835   }
3836 
3837   // search system directory
3838   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3839     if (size >= pathLen) {
3840       return NULL; // truncated
3841     }
3842     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3843       return NULL; // truncated
3844     }
3845     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3846       return result;
3847     }
3848   }
3849 
3850   // try Windows directory
3851   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3852     if (size >= pathLen) {
3853       return NULL; // truncated
3854     }
3855     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3856       return NULL; // truncated
3857     }
3858     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3859       return result;
3860     }
3861   }
3862 
3863   jio_snprintf(ebuf, ebuflen,
3864                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3865   return NULL;
3866 }
3867 
3868 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3869 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3870 
3871 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3872   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3873   return TRUE;
3874 }
3875 
3876 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3877   // Basic approach:
3878   //  - Each exiting thread registers its intent to exit and then does so.
3879   //  - A thread trying to terminate the process must wait for all
3880   //    threads currently exiting to complete their exit.
3881 
3882   if (os::win32::has_exit_bug()) {
3883     // The array holds handles of the threads that have started exiting by calling
3884     // _endthreadex().
3885     // Should be large enough to avoid blocking the exiting thread due to lack of
3886     // a free slot.
3887     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3888     static int handle_count = 0;
3889 
3890     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3891     static CRITICAL_SECTION crit_sect;
3892     static volatile jint process_exiting = 0;
3893     int i, j;
3894     DWORD res;
3895     HANDLE hproc, hthr;
3896 
3897     // We only attempt to register threads until a process exiting
3898     // thread manages to set the process_exiting flag. Any threads
3899     // that come through here after the process_exiting flag is set
3900     // are unregistered and will be caught in the SuspendThread()
3901     // infinite loop below.
3902     bool registered = false;
3903 
3904     // The first thread that reached this point, initializes the critical section.
3905     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3906       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3907     } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3908       if (what != EPT_THREAD) {
3909         // Atomically set process_exiting before the critical section
3910         // to increase the visibility between racing threads.
3911         Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0);
3912       }
3913       EnterCriticalSection(&crit_sect);
3914 
3915       if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3916         // Remove from the array those handles of the threads that have completed exiting.
3917         for (i = 0, j = 0; i < handle_count; ++i) {
3918           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3919           if (res == WAIT_TIMEOUT) {
3920             handles[j++] = handles[i];
3921           } else {
3922             if (res == WAIT_FAILED) {
3923               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3924                       GetLastError(), __FILE__, __LINE__);
3925             }
3926             // Don't keep the handle, if we failed waiting for it.
3927             CloseHandle(handles[i]);
3928           }
3929         }
3930 
3931         // If there's no free slot in the array of the kept handles, we'll have to
3932         // wait until at least one thread completes exiting.
3933         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3934           // Raise the priority of the oldest exiting thread to increase its chances
3935           // to complete sooner.
3936           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3937           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3938           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3939             i = (res - WAIT_OBJECT_0);
3940             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3941             for (; i < handle_count; ++i) {
3942               handles[i] = handles[i + 1];
3943             }
3944           } else {
3945             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3946                     (res == WAIT_FAILED ? "failed" : "timed out"),
3947                     GetLastError(), __FILE__, __LINE__);
3948             // Don't keep handles, if we failed waiting for them.
3949             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3950               CloseHandle(handles[i]);
3951             }
3952             handle_count = 0;
3953           }
3954         }
3955 
3956         // Store a duplicate of the current thread handle in the array of handles.
3957         hproc = GetCurrentProcess();
3958         hthr = GetCurrentThread();
3959         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3960                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3961           warning("DuplicateHandle failed (%u) in %s: %d\n",
3962                   GetLastError(), __FILE__, __LINE__);
3963 
3964           // We can't register this thread (no more handles) so this thread
3965           // may be racing with a thread that is calling exit(). If the thread
3966           // that is calling exit() has managed to set the process_exiting
3967           // flag, then this thread will be caught in the SuspendThread()
3968           // infinite loop below which closes that race. A small timing
3969           // window remains before the process_exiting flag is set, but it
3970           // is only exposed when we are out of handles.
3971         } else {
3972           ++handle_count;
3973           registered = true;
3974 
3975           // The current exiting thread has stored its handle in the array, and now
3976           // should leave the critical section before calling _endthreadex().
3977         }
3978 
3979       } else if (what != EPT_THREAD && handle_count > 0) {
3980         jlong start_time, finish_time, timeout_left;
3981         // Before ending the process, make sure all the threads that had called
3982         // _endthreadex() completed.
3983 
3984         // Set the priority level of the current thread to the same value as
3985         // the priority level of exiting threads.
3986         // This is to ensure it will be given a fair chance to execute if
3987         // the timeout expires.
3988         hthr = GetCurrentThread();
3989         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3990         start_time = os::javaTimeNanos();
3991         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3992         for (i = 0; ; ) {
3993           int portion_count = handle_count - i;
3994           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3995             portion_count = MAXIMUM_WAIT_OBJECTS;
3996           }
3997           for (j = 0; j < portion_count; ++j) {
3998             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3999           }
4000           timeout_left = (finish_time - start_time) / 1000000L;
4001           if (timeout_left < 0) {
4002             timeout_left = 0;
4003           }
4004           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4005           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4006             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4007                     (res == WAIT_FAILED ? "failed" : "timed out"),
4008                     GetLastError(), __FILE__, __LINE__);
4009             // Reset portion_count so we close the remaining
4010             // handles due to this error.
4011             portion_count = handle_count - i;
4012           }
4013           for (j = 0; j < portion_count; ++j) {
4014             CloseHandle(handles[i + j]);
4015           }
4016           if ((i += portion_count) >= handle_count) {
4017             break;
4018           }
4019           start_time = os::javaTimeNanos();
4020         }
4021         handle_count = 0;
4022       }
4023 
4024       LeaveCriticalSection(&crit_sect);
4025     }
4026 
4027     if (!registered &&
4028         OrderAccess::load_acquire(&process_exiting) != 0 &&
4029         process_exiting != (jint)GetCurrentThreadId()) {
4030       // Some other thread is about to call exit(), so we don't let
4031       // the current unregistered thread proceed to exit() or _endthreadex()
4032       while (true) {
4033         SuspendThread(GetCurrentThread());
4034         // Avoid busy-wait loop, if SuspendThread() failed.
4035         Sleep(EXIT_TIMEOUT);
4036       }
4037     }
4038   }
4039 
4040   // We are here if either
4041   // - there's no 'race at exit' bug on this OS release;
4042   // - initialization of the critical section failed (unlikely);
4043   // - the current thread has registered itself and left the critical section;
4044   // - the process-exiting thread has raised the flag and left the critical section.
4045   if (what == EPT_THREAD) {
4046     _endthreadex((unsigned)exit_code);
4047   } else if (what == EPT_PROCESS) {
4048     ::exit(exit_code);
4049   } else {
4050     _exit(exit_code);
4051   }
4052 
4053   // Should not reach here
4054   return exit_code;
4055 }
4056 
4057 #undef EXIT_TIMEOUT
4058 
4059 void os::win32::setmode_streams() {
4060   _setmode(_fileno(stdin), _O_BINARY);
4061   _setmode(_fileno(stdout), _O_BINARY);
4062   _setmode(_fileno(stderr), _O_BINARY);
4063 }
4064 
4065 
4066 bool os::is_debugger_attached() {
4067   return IsDebuggerPresent() ? true : false;
4068 }
4069 
4070 
4071 void os::wait_for_keypress_at_exit(void) {
4072   if (PauseAtExit) {
4073     fprintf(stderr, "Press any key to continue...\n");
4074     fgetc(stdin);
4075   }
4076 }
4077 
4078 
4079 bool os::message_box(const char* title, const char* message) {
4080   int result = MessageBox(NULL, message, title,
4081                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4082   return result == IDYES;
4083 }
4084 
4085 #ifndef PRODUCT
4086 #ifndef _WIN64
4087 // Helpers to check whether NX protection is enabled
4088 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4089   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4090       pex->ExceptionRecord->NumberParameters > 0 &&
4091       pex->ExceptionRecord->ExceptionInformation[0] ==
4092       EXCEPTION_INFO_EXEC_VIOLATION) {
4093     return EXCEPTION_EXECUTE_HANDLER;
4094   }
4095   return EXCEPTION_CONTINUE_SEARCH;
4096 }
4097 
4098 void nx_check_protection() {
4099   // If NX is enabled we'll get an exception calling into code on the stack
4100   char code[] = { (char)0xC3 }; // ret
4101   void *code_ptr = (void *)code;
4102   __try {
4103     __asm call code_ptr
4104   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4105     tty->print_raw_cr("NX protection detected.");
4106   }
4107 }
4108 #endif // _WIN64
4109 #endif // PRODUCT
4110 
4111 // This is called _before_ the global arguments have been parsed
4112 void os::init(void) {
4113   _initial_pid = _getpid();
4114 
4115   init_random(1234567);
4116 
4117   win32::initialize_system_info();
4118   win32::setmode_streams();
4119   init_page_sizes((size_t) win32::vm_page_size());
4120 
4121   // This may be overridden later when argument processing is done.
4122   FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false);
4123 
4124   // Initialize main_process and main_thread
4125   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4126   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4127                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4128     fatal("DuplicateHandle failed\n");
4129   }
4130   main_thread_id = (int) GetCurrentThreadId();
4131 
4132   // initialize fast thread access - only used for 32-bit
4133   win32::initialize_thread_ptr_offset();
4134 }
4135 
4136 // To install functions for atexit processing
4137 extern "C" {
4138   static void perfMemory_exit_helper() {
4139     perfMemory_exit();
4140   }
4141 }
4142 
4143 static jint initSock();
4144 
4145 // this is called _after_ the global arguments have been parsed
4146 jint os::init_2(void) {
4147   // Allocate a single page and mark it as readable for safepoint polling
4148   address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
4149   guarantee(polling_page != NULL, "Reserve Failed for polling page");
4150 
4151   address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
4152   guarantee(return_page != NULL, "Commit Failed for polling page");
4153 
4154   os::set_polling_page(polling_page);
4155   log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
4156 
4157   if (!UseMembar) {
4158     address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
4159     guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
4160 
4161     return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
4162     guarantee(return_page != NULL, "Commit Failed for memory serialize page");
4163 
4164     os::set_memory_serialize_page(mem_serialize_page);
4165     log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
4166   }
4167 
4168   // Setup Windows Exceptions
4169 
4170   // for debugging float code generation bugs
4171   if (ForceFloatExceptions) {
4172 #ifndef  _WIN64
4173     static long fp_control_word = 0;
4174     __asm { fstcw fp_control_word }
4175     // see Intel PPro Manual, Vol. 2, p 7-16
4176     const long precision = 0x20;
4177     const long underflow = 0x10;
4178     const long overflow  = 0x08;
4179     const long zero_div  = 0x04;
4180     const long denorm    = 0x02;
4181     const long invalid   = 0x01;
4182     fp_control_word |= invalid;
4183     __asm { fldcw fp_control_word }
4184 #endif
4185   }
4186 
4187   // If stack_commit_size is 0, windows will reserve the default size,
4188   // but only commit a small portion of it.
4189   size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
4190   size_t default_reserve_size = os::win32::default_stack_size();
4191   size_t actual_reserve_size = stack_commit_size;
4192   if (stack_commit_size < default_reserve_size) {
4193     // If stack_commit_size == 0, we want this too
4194     actual_reserve_size = default_reserve_size;
4195   }
4196 
4197   // Check minimum allowable stack size for thread creation and to initialize
4198   // the java system classes, including StackOverflowError - depends on page
4199   // size.  Add two 4K pages for compiler2 recursion in main thread.
4200   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4201   // class initialization depending on 32 or 64 bit VM.
4202   size_t min_stack_allowed =
4203             (size_t)(JavaThread::stack_guard_zone_size() +
4204                      JavaThread::stack_shadow_zone_size() +
4205                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4206 
4207   min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
4208 
4209   if (actual_reserve_size < min_stack_allowed) {
4210     tty->print_cr("\nThe stack size specified is too small, "
4211                   "Specify at least %dk",
4212                   min_stack_allowed / K);
4213     return JNI_ERR;
4214   }
4215 
4216   JavaThread::set_stack_size_at_create(stack_commit_size);
4217 
4218   // Calculate theoretical max. size of Threads to guard gainst artifical
4219   // out-of-memory situations, where all available address-space has been
4220   // reserved by thread stacks.
4221   assert(actual_reserve_size != 0, "Must have a stack");
4222 
4223   // Calculate the thread limit when we should start doing Virtual Memory
4224   // banging. Currently when the threads will have used all but 200Mb of space.
4225   //
4226   // TODO: consider performing a similar calculation for commit size instead
4227   // as reserve size, since on a 64-bit platform we'll run into that more
4228   // often than running out of virtual memory space.  We can use the
4229   // lower value of the two calculations as the os_thread_limit.
4230   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4231   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4232 
4233   // at exit methods are called in the reverse order of their registration.
4234   // there is no limit to the number of functions registered. atexit does
4235   // not set errno.
4236 
4237   if (PerfAllowAtExitRegistration) {
4238     // only register atexit functions if PerfAllowAtExitRegistration is set.
4239     // atexit functions can be delayed until process exit time, which
4240     // can be problematic for embedded VM situations. Embedded VMs should
4241     // call DestroyJavaVM() to assure that VM resources are released.
4242 
4243     // note: perfMemory_exit_helper atexit function may be removed in
4244     // the future if the appropriate cleanup code can be added to the
4245     // VM_Exit VMOperation's doit method.
4246     if (atexit(perfMemory_exit_helper) != 0) {
4247       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4248     }
4249   }
4250 
4251 #ifndef _WIN64
4252   // Print something if NX is enabled (win32 on AMD64)
4253   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4254 #endif
4255 
4256   // initialize thread priority policy
4257   prio_init();
4258 
4259   if (UseNUMA && !ForceNUMA) {
4260     UseNUMA = false; // We don't fully support this yet
4261   }
4262 
4263   if (UseNUMAInterleaving) {
4264     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4265     bool success = numa_interleaving_init();
4266     if (!success) UseNUMAInterleaving = false;
4267   }
4268 
4269   if (initSock() != JNI_OK) {
4270     return JNI_ERR;
4271   }
4272 
4273   return JNI_OK;
4274 }
4275 
4276 // Mark the polling page as unreadable
4277 void os::make_polling_page_unreadable(void) {
4278   DWORD old_status;
4279   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4280                       PAGE_NOACCESS, &old_status)) {
4281     fatal("Could not disable polling page");
4282   }
4283 }
4284 
4285 // Mark the polling page as readable
4286 void os::make_polling_page_readable(void) {
4287   DWORD old_status;
4288   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4289                       PAGE_READONLY, &old_status)) {
4290     fatal("Could not enable polling page");
4291   }
4292 }
4293 
4294 
4295 int os::stat(const char *path, struct stat *sbuf) {
4296   char pathbuf[MAX_PATH];
4297   if (strlen(path) > MAX_PATH - 1) {
4298     errno = ENAMETOOLONG;
4299     return -1;
4300   }
4301   os::native_path(strcpy(pathbuf, path));
4302   int ret = ::stat(pathbuf, sbuf);
4303   if (sbuf != NULL && UseUTCFileTimestamp) {
4304     // Fix for 6539723.  st_mtime returned from stat() is dependent on
4305     // the system timezone and so can return different values for the
4306     // same file if/when daylight savings time changes.  This adjustment
4307     // makes sure the same timestamp is returned regardless of the TZ.
4308     //
4309     // See:
4310     // http://msdn.microsoft.com/library/
4311     //   default.asp?url=/library/en-us/sysinfo/base/
4312     //   time_zone_information_str.asp
4313     // and
4314     // http://msdn.microsoft.com/library/default.asp?url=
4315     //   /library/en-us/sysinfo/base/settimezoneinformation.asp
4316     //
4317     // NOTE: there is a insidious bug here:  If the timezone is changed
4318     // after the call to stat() but before 'GetTimeZoneInformation()', then
4319     // the adjustment we do here will be wrong and we'll return the wrong
4320     // value (which will likely end up creating an invalid class data
4321     // archive).  Absent a better API for this, or some time zone locking
4322     // mechanism, we'll have to live with this risk.
4323     TIME_ZONE_INFORMATION tz;
4324     DWORD tzid = GetTimeZoneInformation(&tz);
4325     int daylightBias =
4326       (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
4327     sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4328   }
4329   return ret;
4330 }
4331 
4332 
4333 #define FT2INT64(ft) \
4334   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4335 
4336 
4337 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4338 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4339 // of a thread.
4340 //
4341 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4342 // the fast estimate available on the platform.
4343 
4344 // current_thread_cpu_time() is not optimized for Windows yet
4345 jlong os::current_thread_cpu_time() {
4346   // return user + sys since the cost is the same
4347   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4348 }
4349 
4350 jlong os::thread_cpu_time(Thread* thread) {
4351   // consistent with what current_thread_cpu_time() returns.
4352   return os::thread_cpu_time(thread, true /* user+sys */);
4353 }
4354 
4355 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4356   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4357 }
4358 
4359 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4360   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4361   // If this function changes, os::is_thread_cpu_time_supported() should too
4362   FILETIME CreationTime;
4363   FILETIME ExitTime;
4364   FILETIME KernelTime;
4365   FILETIME UserTime;
4366 
4367   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4368                       &ExitTime, &KernelTime, &UserTime) == 0) {
4369     return -1;
4370   } else if (user_sys_cpu_time) {
4371     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4372   } else {
4373     return FT2INT64(UserTime) * 100;
4374   }
4375 }
4376 
4377 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4378   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4379   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4380   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4381   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4382 }
4383 
4384 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4385   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4386   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4387   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4388   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4389 }
4390 
4391 bool os::is_thread_cpu_time_supported() {
4392   // see os::thread_cpu_time
4393   FILETIME CreationTime;
4394   FILETIME ExitTime;
4395   FILETIME KernelTime;
4396   FILETIME UserTime;
4397 
4398   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4399                       &KernelTime, &UserTime) == 0) {
4400     return false;
4401   } else {
4402     return true;
4403   }
4404 }
4405 
4406 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4407 // It does have primitives (PDH API) to get CPU usage and run queue length.
4408 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4409 // If we wanted to implement loadavg on Windows, we have a few options:
4410 //
4411 // a) Query CPU usage and run queue length and "fake" an answer by
4412 //    returning the CPU usage if it's under 100%, and the run queue
4413 //    length otherwise.  It turns out that querying is pretty slow
4414 //    on Windows, on the order of 200 microseconds on a fast machine.
4415 //    Note that on the Windows the CPU usage value is the % usage
4416 //    since the last time the API was called (and the first call
4417 //    returns 100%), so we'd have to deal with that as well.
4418 //
4419 // b) Sample the "fake" answer using a sampling thread and store
4420 //    the answer in a global variable.  The call to loadavg would
4421 //    just return the value of the global, avoiding the slow query.
4422 //
4423 // c) Sample a better answer using exponential decay to smooth the
4424 //    value.  This is basically the algorithm used by UNIX kernels.
4425 //
4426 // Note that sampling thread starvation could affect both (b) and (c).
4427 int os::loadavg(double loadavg[], int nelem) {
4428   return -1;
4429 }
4430 
4431 
4432 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4433 bool os::dont_yield() {
4434   return DontYieldALot;
4435 }
4436 
4437 // This method is a slightly reworked copy of JDK's sysOpen
4438 // from src/windows/hpi/src/sys_api_md.c
4439 
4440 int os::open(const char *path, int oflag, int mode) {
4441   char pathbuf[MAX_PATH];
4442 
4443   if (strlen(path) > MAX_PATH - 1) {
4444     errno = ENAMETOOLONG;
4445     return -1;
4446   }
4447   os::native_path(strcpy(pathbuf, path));
4448   return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4449 }
4450 
4451 FILE* os::open(int fd, const char* mode) {
4452   return ::_fdopen(fd, mode);
4453 }
4454 
4455 // Is a (classpath) directory empty?
4456 bool os::dir_is_empty(const char* path) {
4457   WIN32_FIND_DATA fd;
4458   HANDLE f = FindFirstFile(path, &fd);
4459   if (f == INVALID_HANDLE_VALUE) {
4460     return true;
4461   }
4462   FindClose(f);
4463   return false;
4464 }
4465 
4466 // create binary file, rewriting existing file if required
4467 int os::create_binary_file(const char* path, bool rewrite_existing) {
4468   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4469   if (!rewrite_existing) {
4470     oflags |= _O_EXCL;
4471   }
4472   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4473 }
4474 
4475 // return current position of file pointer
4476 jlong os::current_file_offset(int fd) {
4477   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4478 }
4479 
4480 // move file pointer to the specified offset
4481 jlong os::seek_to_file_offset(int fd, jlong offset) {
4482   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4483 }
4484 
4485 
4486 jlong os::lseek(int fd, jlong offset, int whence) {
4487   return (jlong) ::_lseeki64(fd, offset, whence);
4488 }
4489 
4490 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4491   OVERLAPPED ov;
4492   DWORD nread;
4493   BOOL result;
4494 
4495   ZeroMemory(&ov, sizeof(ov));
4496   ov.Offset = (DWORD)offset;
4497   ov.OffsetHigh = (DWORD)(offset >> 32);
4498 
4499   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4500 
4501   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4502 
4503   return result ? nread : 0;
4504 }
4505 
4506 
4507 // This method is a slightly reworked copy of JDK's sysNativePath
4508 // from src/windows/hpi/src/path_md.c
4509 
4510 // Convert a pathname to native format.  On win32, this involves forcing all
4511 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4512 // sometimes rejects '/') and removing redundant separators.  The input path is
4513 // assumed to have been converted into the character encoding used by the local
4514 // system.  Because this might be a double-byte encoding, care is taken to
4515 // treat double-byte lead characters correctly.
4516 //
4517 // This procedure modifies the given path in place, as the result is never
4518 // longer than the original.  There is no error return; this operation always
4519 // succeeds.
4520 char * os::native_path(char *path) {
4521   char *src = path, *dst = path, *end = path;
4522   char *colon = NULL;  // If a drive specifier is found, this will
4523                        // point to the colon following the drive letter
4524 
4525   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4526   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4527           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4528 
4529   // Check for leading separators
4530 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4531   while (isfilesep(*src)) {
4532     src++;
4533   }
4534 
4535   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4536     // Remove leading separators if followed by drive specifier.  This
4537     // hack is necessary to support file URLs containing drive
4538     // specifiers (e.g., "file://c:/path").  As a side effect,
4539     // "/c:/path" can be used as an alternative to "c:/path".
4540     *dst++ = *src++;
4541     colon = dst;
4542     *dst++ = ':';
4543     src++;
4544   } else {
4545     src = path;
4546     if (isfilesep(src[0]) && isfilesep(src[1])) {
4547       // UNC pathname: Retain first separator; leave src pointed at
4548       // second separator so that further separators will be collapsed
4549       // into the second separator.  The result will be a pathname
4550       // beginning with "\\\\" followed (most likely) by a host name.
4551       src = dst = path + 1;
4552       path[0] = '\\';     // Force first separator to '\\'
4553     }
4554   }
4555 
4556   end = dst;
4557 
4558   // Remove redundant separators from remainder of path, forcing all
4559   // separators to be '\\' rather than '/'. Also, single byte space
4560   // characters are removed from the end of the path because those
4561   // are not legal ending characters on this operating system.
4562   //
4563   while (*src != '\0') {
4564     if (isfilesep(*src)) {
4565       *dst++ = '\\'; src++;
4566       while (isfilesep(*src)) src++;
4567       if (*src == '\0') {
4568         // Check for trailing separator
4569         end = dst;
4570         if (colon == dst - 2) break;  // "z:\\"
4571         if (dst == path + 1) break;   // "\\"
4572         if (dst == path + 2 && isfilesep(path[0])) {
4573           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4574           // beginning of a UNC pathname.  Even though it is not, by
4575           // itself, a valid UNC pathname, we leave it as is in order
4576           // to be consistent with the path canonicalizer as well
4577           // as the win32 APIs, which treat this case as an invalid
4578           // UNC pathname rather than as an alias for the root
4579           // directory of the current drive.
4580           break;
4581         }
4582         end = --dst;  // Path does not denote a root directory, so
4583                       // remove trailing separator
4584         break;
4585       }
4586       end = dst;
4587     } else {
4588       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4589         *dst++ = *src++;
4590         if (*src) *dst++ = *src++;
4591         end = dst;
4592       } else {  // Copy a single-byte character
4593         char c = *src++;
4594         *dst++ = c;
4595         // Space is not a legal ending character
4596         if (c != ' ') end = dst;
4597       }
4598     }
4599   }
4600 
4601   *end = '\0';
4602 
4603   // For "z:", add "." to work around a bug in the C runtime library
4604   if (colon == dst - 1) {
4605     path[2] = '.';
4606     path[3] = '\0';
4607   }
4608 
4609   return path;
4610 }
4611 
4612 // This code is a copy of JDK's sysSetLength
4613 // from src/windows/hpi/src/sys_api_md.c
4614 
4615 int os::ftruncate(int fd, jlong length) {
4616   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4617   long high = (long)(length >> 32);
4618   DWORD ret;
4619 
4620   if (h == (HANDLE)(-1)) {
4621     return -1;
4622   }
4623 
4624   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4625   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4626     return -1;
4627   }
4628 
4629   if (::SetEndOfFile(h) == FALSE) {
4630     return -1;
4631   }
4632 
4633   return 0;
4634 }
4635 
4636 int os::get_fileno(FILE* fp) {
4637   return _fileno(fp);
4638 }
4639 
4640 // This code is a copy of JDK's sysSync
4641 // from src/windows/hpi/src/sys_api_md.c
4642 // except for the legacy workaround for a bug in Win 98
4643 
4644 int os::fsync(int fd) {
4645   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4646 
4647   if ((!::FlushFileBuffers(handle)) &&
4648       (GetLastError() != ERROR_ACCESS_DENIED)) {
4649     // from winerror.h
4650     return -1;
4651   }
4652   return 0;
4653 }
4654 
4655 static int nonSeekAvailable(int, long *);
4656 static int stdinAvailable(int, long *);
4657 
4658 #define S_ISCHR(mode)   (((mode) & _S_IFCHR) == _S_IFCHR)
4659 #define S_ISFIFO(mode)  (((mode) & _S_IFIFO) == _S_IFIFO)
4660 
4661 // This code is a copy of JDK's sysAvailable
4662 // from src/windows/hpi/src/sys_api_md.c
4663 
4664 int os::available(int fd, jlong *bytes) {
4665   jlong cur, end;
4666   struct _stati64 stbuf64;
4667 
4668   if (::_fstati64(fd, &stbuf64) >= 0) {
4669     int mode = stbuf64.st_mode;
4670     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4671       int ret;
4672       long lpbytes;
4673       if (fd == 0) {
4674         ret = stdinAvailable(fd, &lpbytes);
4675       } else {
4676         ret = nonSeekAvailable(fd, &lpbytes);
4677       }
4678       (*bytes) = (jlong)(lpbytes);
4679       return ret;
4680     }
4681     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4682       return FALSE;
4683     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4684       return FALSE;
4685     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4686       return FALSE;
4687     }
4688     *bytes = end - cur;
4689     return TRUE;
4690   } else {
4691     return FALSE;
4692   }
4693 }
4694 
4695 void os::flockfile(FILE* fp) {
4696   _lock_file(fp);
4697 }
4698 
4699 void os::funlockfile(FILE* fp) {
4700   _unlock_file(fp);
4701 }
4702 
4703 // This code is a copy of JDK's nonSeekAvailable
4704 // from src/windows/hpi/src/sys_api_md.c
4705 
4706 static int nonSeekAvailable(int fd, long *pbytes) {
4707   // This is used for available on non-seekable devices
4708   // (like both named and anonymous pipes, such as pipes
4709   //  connected to an exec'd process).
4710   // Standard Input is a special case.
4711   HANDLE han;
4712 
4713   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4714     return FALSE;
4715   }
4716 
4717   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4718     // PeekNamedPipe fails when at EOF.  In that case we
4719     // simply make *pbytes = 0 which is consistent with the
4720     // behavior we get on Solaris when an fd is at EOF.
4721     // The only alternative is to raise an Exception,
4722     // which isn't really warranted.
4723     //
4724     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4725       return FALSE;
4726     }
4727     *pbytes = 0;
4728   }
4729   return TRUE;
4730 }
4731 
4732 #define MAX_INPUT_EVENTS 2000
4733 
4734 // This code is a copy of JDK's stdinAvailable
4735 // from src/windows/hpi/src/sys_api_md.c
4736 
4737 static int stdinAvailable(int fd, long *pbytes) {
4738   HANDLE han;
4739   DWORD numEventsRead = 0;  // Number of events read from buffer
4740   DWORD numEvents = 0;      // Number of events in buffer
4741   DWORD i = 0;              // Loop index
4742   DWORD curLength = 0;      // Position marker
4743   DWORD actualLength = 0;   // Number of bytes readable
4744   BOOL error = FALSE;       // Error holder
4745   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4746 
4747   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4748     return FALSE;
4749   }
4750 
4751   // Construct an array of input records in the console buffer
4752   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4753   if (error == 0) {
4754     return nonSeekAvailable(fd, pbytes);
4755   }
4756 
4757   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4758   if (numEvents > MAX_INPUT_EVENTS) {
4759     numEvents = MAX_INPUT_EVENTS;
4760   }
4761 
4762   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4763   if (lpBuffer == NULL) {
4764     return FALSE;
4765   }
4766 
4767   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4768   if (error == 0) {
4769     os::free(lpBuffer);
4770     return FALSE;
4771   }
4772 
4773   // Examine input records for the number of bytes available
4774   for (i=0; i<numEvents; i++) {
4775     if (lpBuffer[i].EventType == KEY_EVENT) {
4776 
4777       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4778                                       &(lpBuffer[i].Event);
4779       if (keyRecord->bKeyDown == TRUE) {
4780         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4781         curLength++;
4782         if (*keyPressed == '\r') {
4783           actualLength = curLength;
4784         }
4785       }
4786     }
4787   }
4788 
4789   if (lpBuffer != NULL) {
4790     os::free(lpBuffer);
4791   }
4792 
4793   *pbytes = (long) actualLength;
4794   return TRUE;
4795 }
4796 
4797 // Map a block of memory.
4798 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4799                         char *addr, size_t bytes, bool read_only,
4800                         bool allow_exec) {
4801   HANDLE hFile;
4802   char* base;
4803 
4804   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4805                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4806   if (hFile == NULL) {
4807     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4808     return NULL;
4809   }
4810 
4811   if (allow_exec) {
4812     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4813     // unless it comes from a PE image (which the shared archive is not.)
4814     // Even VirtualProtect refuses to give execute access to mapped memory
4815     // that was not previously executable.
4816     //
4817     // Instead, stick the executable region in anonymous memory.  Yuck.
4818     // Penalty is that ~4 pages will not be shareable - in the future
4819     // we might consider DLLizing the shared archive with a proper PE
4820     // header so that mapping executable + sharing is possible.
4821 
4822     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4823                                 PAGE_READWRITE);
4824     if (base == NULL) {
4825       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4826       CloseHandle(hFile);
4827       return NULL;
4828     }
4829 
4830     DWORD bytes_read;
4831     OVERLAPPED overlapped;
4832     overlapped.Offset = (DWORD)file_offset;
4833     overlapped.OffsetHigh = 0;
4834     overlapped.hEvent = NULL;
4835     // ReadFile guarantees that if the return value is true, the requested
4836     // number of bytes were read before returning.
4837     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4838     if (!res) {
4839       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4840       release_memory(base, bytes);
4841       CloseHandle(hFile);
4842       return NULL;
4843     }
4844   } else {
4845     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4846                                     NULL /* file_name */);
4847     if (hMap == NULL) {
4848       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4849       CloseHandle(hFile);
4850       return NULL;
4851     }
4852 
4853     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4854     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4855                                   (DWORD)bytes, addr);
4856     if (base == NULL) {
4857       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4858       CloseHandle(hMap);
4859       CloseHandle(hFile);
4860       return NULL;
4861     }
4862 
4863     if (CloseHandle(hMap) == 0) {
4864       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4865       CloseHandle(hFile);
4866       return base;
4867     }
4868   }
4869 
4870   if (allow_exec) {
4871     DWORD old_protect;
4872     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4873     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4874 
4875     if (!res) {
4876       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4877       // Don't consider this a hard error, on IA32 even if the
4878       // VirtualProtect fails, we should still be able to execute
4879       CloseHandle(hFile);
4880       return base;
4881     }
4882   }
4883 
4884   if (CloseHandle(hFile) == 0) {
4885     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4886     return base;
4887   }
4888 
4889   return base;
4890 }
4891 
4892 
4893 // Remap a block of memory.
4894 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4895                           char *addr, size_t bytes, bool read_only,
4896                           bool allow_exec) {
4897   // This OS does not allow existing memory maps to be remapped so we
4898   // have to unmap the memory before we remap it.
4899   if (!os::unmap_memory(addr, bytes)) {
4900     return NULL;
4901   }
4902 
4903   // There is a very small theoretical window between the unmap_memory()
4904   // call above and the map_memory() call below where a thread in native
4905   // code may be able to access an address that is no longer mapped.
4906 
4907   return os::map_memory(fd, file_name, file_offset, addr, bytes,
4908                         read_only, allow_exec);
4909 }
4910 
4911 
4912 // Unmap a block of memory.
4913 // Returns true=success, otherwise false.
4914 
4915 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4916   MEMORY_BASIC_INFORMATION mem_info;
4917   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4918     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
4919     return false;
4920   }
4921 
4922   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4923   // Instead, executable region was allocated using VirtualAlloc(). See
4924   // pd_map_memory() above.
4925   //
4926   // The following flags should match the 'exec_access' flages used for
4927   // VirtualProtect() in pd_map_memory().
4928   if (mem_info.Protect == PAGE_EXECUTE_READ ||
4929       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4930     return pd_release_memory(addr, bytes);
4931   }
4932 
4933   BOOL result = UnmapViewOfFile(addr);
4934   if (result == 0) {
4935     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
4936     return false;
4937   }
4938   return true;
4939 }
4940 
4941 void os::pause() {
4942   char filename[MAX_PATH];
4943   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4944     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4945   } else {
4946     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4947   }
4948 
4949   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4950   if (fd != -1) {
4951     struct stat buf;
4952     ::close(fd);
4953     while (::stat(filename, &buf) == 0) {
4954       Sleep(100);
4955     }
4956   } else {
4957     jio_fprintf(stderr,
4958                 "Could not open pause file '%s', continuing immediately.\n", filename);
4959   }
4960 }
4961 
4962 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4963   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4964 }
4965 
4966 // See the caveats for this class in os_windows.hpp
4967 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4968 // into this method and returns false. If no OS EXCEPTION was raised, returns
4969 // true.
4970 // The callback is supposed to provide the method that should be protected.
4971 //
4972 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4973   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4974   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4975          "crash_protection already set?");
4976 
4977   bool success = true;
4978   __try {
4979     WatcherThread::watcher_thread()->set_crash_protection(this);
4980     cb.call();
4981   } __except(EXCEPTION_EXECUTE_HANDLER) {
4982     // only for protection, nothing to do
4983     success = false;
4984   }
4985   WatcherThread::watcher_thread()->set_crash_protection(NULL);
4986   return success;
4987 }
4988 
4989 // An Event wraps a win32 "CreateEvent" kernel handle.
4990 //
4991 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
4992 //
4993 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
4994 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
4995 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
4996 //     In addition, an unpark() operation might fetch the handle field, but the
4997 //     event could recycle between the fetch and the SetEvent() operation.
4998 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
4999 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5000 //     on an stale but recycled handle would be harmless, but in practice this might
5001 //     confuse other non-Sun code, so it's not a viable approach.
5002 //
5003 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5004 //     with the Event.  The event handle is never closed.  This could be construed
5005 //     as handle leakage, but only up to the maximum # of threads that have been extant
5006 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5007 //     permit a process to have hundreds of thousands of open handles.
5008 //
5009 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5010 //     and release unused handles.
5011 //
5012 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5013 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5014 //
5015 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5016 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5017 //
5018 // We use (2).
5019 //
5020 // TODO-FIXME:
5021 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5022 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5023 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5024 // 3.  Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
5025 //     into a single win32 CreateEvent() handle.
5026 //
5027 // Assumption:
5028 //    Only one parker can exist on an event, which is why we allocate
5029 //    them per-thread. Multiple unparkers can coexist.
5030 //
5031 // _Event transitions in park()
5032 //   -1 => -1 : illegal
5033 //    1 =>  0 : pass - return immediately
5034 //    0 => -1 : block; then set _Event to 0 before returning
5035 //
5036 // _Event transitions in unpark()
5037 //    0 => 1 : just return
5038 //    1 => 1 : just return
5039 //   -1 => either 0 or 1; must signal target thread
5040 //         That is, we can safely transition _Event from -1 to either
5041 //         0 or 1.
5042 //
5043 // _Event serves as a restricted-range semaphore.
5044 //   -1 : thread is blocked, i.e. there is a waiter
5045 //    0 : neutral: thread is running or ready,
5046 //        could have been signaled after a wait started
5047 //    1 : signaled - thread is running or ready
5048 //
5049 // Another possible encoding of _Event would be with
5050 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5051 //
5052 
5053 int os::PlatformEvent::park(jlong Millis) {
5054   // Transitions for _Event:
5055   //   -1 => -1 : illegal
5056   //    1 =>  0 : pass - return immediately
5057   //    0 => -1 : block; then set _Event to 0 before returning
5058 
5059   guarantee(_ParkHandle != NULL , "Invariant");
5060   guarantee(Millis > 0          , "Invariant");
5061 
5062   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5063   // the initial park() operation.
5064   // Consider: use atomic decrement instead of CAS-loop
5065 
5066   int v;
5067   for (;;) {
5068     v = _Event;
5069     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5070   }
5071   guarantee((v == 0) || (v == 1), "invariant");
5072   if (v != 0) return OS_OK;
5073 
5074   // Do this the hard way by blocking ...
5075   // TODO: consider a brief spin here, gated on the success of recent
5076   // spin attempts by this thread.
5077   //
5078   // We decompose long timeouts into series of shorter timed waits.
5079   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5080   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5081   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5082   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5083   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5084   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5085   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5086   // for the already waited time.  This policy does not admit any new outcomes.
5087   // In the future, however, we might want to track the accumulated wait time and
5088   // adjust Millis accordingly if we encounter a spurious wakeup.
5089 
5090   const int MAXTIMEOUT = 0x10000000;
5091   DWORD rv = WAIT_TIMEOUT;
5092   while (_Event < 0 && Millis > 0) {
5093     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5094     if (Millis > MAXTIMEOUT) {
5095       prd = MAXTIMEOUT;
5096     }
5097     rv = ::WaitForSingleObject(_ParkHandle, prd);
5098     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5099     if (rv == WAIT_TIMEOUT) {
5100       Millis -= prd;
5101     }
5102   }
5103   v = _Event;
5104   _Event = 0;
5105   // see comment at end of os::PlatformEvent::park() below:
5106   OrderAccess::fence();
5107   // If we encounter a nearly simultanous timeout expiry and unpark()
5108   // we return OS_OK indicating we awoke via unpark().
5109   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5110   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5111 }
5112 
5113 void os::PlatformEvent::park() {
5114   // Transitions for _Event:
5115   //   -1 => -1 : illegal
5116   //    1 =>  0 : pass - return immediately
5117   //    0 => -1 : block; then set _Event to 0 before returning
5118 
5119   guarantee(_ParkHandle != NULL, "Invariant");
5120   // Invariant: Only the thread associated with the Event/PlatformEvent
5121   // may call park().
5122   // Consider: use atomic decrement instead of CAS-loop
5123   int v;
5124   for (;;) {
5125     v = _Event;
5126     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5127   }
5128   guarantee((v == 0) || (v == 1), "invariant");
5129   if (v != 0) return;
5130 
5131   // Do this the hard way by blocking ...
5132   // TODO: consider a brief spin here, gated on the success of recent
5133   // spin attempts by this thread.
5134   while (_Event < 0) {
5135     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5136     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5137   }
5138 
5139   // Usually we'll find _Event == 0 at this point, but as
5140   // an optional optimization we clear it, just in case can
5141   // multiple unpark() operations drove _Event up to 1.
5142   _Event = 0;
5143   OrderAccess::fence();
5144   guarantee(_Event >= 0, "invariant");
5145 }
5146 
5147 void os::PlatformEvent::unpark() {
5148   guarantee(_ParkHandle != NULL, "Invariant");
5149 
5150   // Transitions for _Event:
5151   //    0 => 1 : just return
5152   //    1 => 1 : just return
5153   //   -1 => either 0 or 1; must signal target thread
5154   //         That is, we can safely transition _Event from -1 to either
5155   //         0 or 1.
5156   // See also: "Semaphores in Plan 9" by Mullender & Cox
5157   //
5158   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5159   // that it will take two back-to-back park() calls for the owning
5160   // thread to block. This has the benefit of forcing a spurious return
5161   // from the first park() call after an unpark() call which will help
5162   // shake out uses of park() and unpark() without condition variables.
5163 
5164   if (Atomic::xchg(1, &_Event) >= 0) return;
5165 
5166   ::SetEvent(_ParkHandle);
5167 }
5168 
5169 
5170 // JSR166
5171 // -------------------------------------------------------
5172 
5173 // The Windows implementation of Park is very straightforward: Basic
5174 // operations on Win32 Events turn out to have the right semantics to
5175 // use them directly. We opportunistically resuse the event inherited
5176 // from Monitor.
5177 
5178 void Parker::park(bool isAbsolute, jlong time) {
5179   guarantee(_ParkEvent != NULL, "invariant");
5180   // First, demultiplex/decode time arguments
5181   if (time < 0) { // don't wait
5182     return;
5183   } else if (time == 0 && !isAbsolute) {
5184     time = INFINITE;
5185   } else if (isAbsolute) {
5186     time -= os::javaTimeMillis(); // convert to relative time
5187     if (time <= 0) {  // already elapsed
5188       return;
5189     }
5190   } else { // relative
5191     time /= 1000000;  // Must coarsen from nanos to millis
5192     if (time == 0) {  // Wait for the minimal time unit if zero
5193       time = 1;
5194     }
5195   }
5196 
5197   JavaThread* thread = JavaThread::current();
5198 
5199   // Don't wait if interrupted or already triggered
5200   if (Thread::is_interrupted(thread, false) ||
5201       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5202     ResetEvent(_ParkEvent);
5203     return;
5204   } else {
5205     ThreadBlockInVM tbivm(thread);
5206     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5207     thread->set_suspend_equivalent();
5208 
5209     WaitForSingleObject(_ParkEvent, time);
5210     ResetEvent(_ParkEvent);
5211 
5212     // If externally suspended while waiting, re-suspend
5213     if (thread->handle_special_suspend_equivalent_condition()) {
5214       thread->java_suspend_self();
5215     }
5216   }
5217 }
5218 
5219 void Parker::unpark() {
5220   guarantee(_ParkEvent != NULL, "invariant");
5221   SetEvent(_ParkEvent);
5222 }
5223 
5224 // Run the specified command in a separate process. Return its exit value,
5225 // or -1 on failure (e.g. can't create a new process).
5226 int os::fork_and_exec(char* cmd) {
5227   STARTUPINFO si;
5228   PROCESS_INFORMATION pi;
5229 
5230   memset(&si, 0, sizeof(si));
5231   si.cb = sizeof(si);
5232   memset(&pi, 0, sizeof(pi));
5233   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5234                             cmd,    // command line
5235                             NULL,   // process security attribute
5236                             NULL,   // thread security attribute
5237                             TRUE,   // inherits system handles
5238                             0,      // no creation flags
5239                             NULL,   // use parent's environment block
5240                             NULL,   // use parent's starting directory
5241                             &si,    // (in) startup information
5242                             &pi);   // (out) process information
5243 
5244   if (rslt) {
5245     // Wait until child process exits.
5246     WaitForSingleObject(pi.hProcess, INFINITE);
5247 
5248     DWORD exit_code;
5249     GetExitCodeProcess(pi.hProcess, &exit_code);
5250 
5251     // Close process and thread handles.
5252     CloseHandle(pi.hProcess);
5253     CloseHandle(pi.hThread);
5254 
5255     return (int)exit_code;
5256   } else {
5257     return -1;
5258   }
5259 }
5260 
5261 //--------------------------------------------------------------------------------------------------
5262 // Non-product code
5263 
5264 static int mallocDebugIntervalCounter = 0;
5265 static int mallocDebugCounter = 0;
5266 
5267 // For debugging possible bugs inside HeapWalk (a ring buffer)
5268 #define SAVE_COUNT 8
5269 static PROCESS_HEAP_ENTRY saved_heap_entries[SAVE_COUNT];
5270 static int saved_heap_entry_index;
5271 
5272 bool os::check_heap(bool force) {
5273   if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
5274   if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
5275     // Note: HeapValidate executes two hardware breakpoints when it finds something
5276     // wrong; at these points, eax contains the address of the offending block (I think).
5277     // To get to the exlicit error message(s) below, just continue twice.
5278     //
5279     // Note:  we want to check the CRT heap, which is not necessarily located in the
5280     // process default heap.
5281     HANDLE heap = (HANDLE) _get_heap_handle();
5282     if (!heap) {
5283       return true;
5284     }
5285 
5286     // If we fail to lock the heap, then gflags.exe has been used
5287     // or some other special heap flag has been set that prevents
5288     // locking. We don't try to walk a heap we can't lock.
5289     if (HeapLock(heap) != 0) {
5290       PROCESS_HEAP_ENTRY phe;
5291       phe.lpData = NULL;
5292       memset(saved_heap_entries, 0, sizeof(saved_heap_entries));
5293       saved_heap_entry_index = 0;
5294       int count = 0;
5295 
5296       while (HeapWalk(heap, &phe) != 0) {
5297         count ++;
5298         if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
5299             !HeapValidate(heap, 0, phe.lpData)) {
5300           tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
5301           tty->print_cr("corrupted block near address %#x, length %d, count %d", phe.lpData, phe.cbData, count);
5302           HeapUnlock(heap);
5303           fatal("corrupted C heap");
5304         } else {
5305           // Save previous seen entries in a ring buffer. We have seen strange
5306           // heap corruption fatal errors that produced mdmp files, but when we load
5307           // these mdmp files in WinDBG, "!heap -triage" shows no error.
5308           // We can examine the saved_heap_entries[] array in the mdmp file to
5309           // diagnose such seemingly spurious errors reported by HeapWalk.
5310           saved_heap_entries[saved_heap_entry_index++] = phe;
5311           if (saved_heap_entry_index >= SAVE_COUNT) {
5312             saved_heap_entry_index = 0;
5313           }
5314         }
5315       }
5316       DWORD err = GetLastError();
5317       if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED &&
5318          (err == ERROR_INVALID_FUNCTION && phe.lpData != NULL)) {
5319         HeapUnlock(heap);
5320         fatal("heap walk aborted with error %d", err);
5321       }
5322       HeapUnlock(heap);
5323     }
5324     mallocDebugIntervalCounter = 0;
5325   }
5326   return true;
5327 }
5328 
5329 
5330 bool os::find(address addr, outputStream* st) {
5331   int offset = -1;
5332   bool result = false;
5333   char buf[256];
5334   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5335     st->print(PTR_FORMAT " ", addr);
5336     if (strlen(buf) < sizeof(buf) - 1) {
5337       char* p = strrchr(buf, '\\');
5338       if (p) {
5339         st->print("%s", p + 1);
5340       } else {
5341         st->print("%s", buf);
5342       }
5343     } else {
5344         // The library name is probably truncated. Let's omit the library name.
5345         // See also JDK-8147512.
5346     }
5347     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5348       st->print("::%s + 0x%x", buf, offset);
5349     }
5350     st->cr();
5351     result = true;
5352   }
5353   return result;
5354 }
5355 
5356 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5357   DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5358 
5359   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
5360     JavaThread* thread = JavaThread::current();
5361     PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5362     address addr = (address) exceptionRecord->ExceptionInformation[1];
5363 
5364     if (os::is_memory_serialize_page(thread, addr)) {
5365       return EXCEPTION_CONTINUE_EXECUTION;
5366     }
5367   }
5368 
5369   return EXCEPTION_CONTINUE_SEARCH;
5370 }
5371 
5372 // We don't build a headless jre for Windows
5373 bool os::is_headless_jre() { return false; }
5374 
5375 static jint initSock() {
5376   WSADATA wsadata;
5377 
5378   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5379     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5380                 ::GetLastError());
5381     return JNI_ERR;
5382   }
5383   return JNI_OK;
5384 }
5385 
5386 struct hostent* os::get_host_by_name(char* name) {
5387   return (struct hostent*)gethostbyname(name);
5388 }
5389 
5390 int os::socket_close(int fd) {
5391   return ::closesocket(fd);
5392 }
5393 
5394 int os::socket(int domain, int type, int protocol) {
5395   return ::socket(domain, type, protocol);
5396 }
5397 
5398 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5399   return ::connect(fd, him, len);
5400 }
5401 
5402 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5403   return ::recv(fd, buf, (int)nBytes, flags);
5404 }
5405 
5406 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5407   return ::send(fd, buf, (int)nBytes, flags);
5408 }
5409 
5410 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5411   return ::send(fd, buf, (int)nBytes, flags);
5412 }
5413 
5414 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5415 #if defined(IA32)
5416   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5417 #elif defined (AMD64)
5418   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5419 #endif
5420 
5421 // returns true if thread could be suspended,
5422 // false otherwise
5423 static bool do_suspend(HANDLE* h) {
5424   if (h != NULL) {
5425     if (SuspendThread(*h) != ~0) {
5426       return true;
5427     }
5428   }
5429   return false;
5430 }
5431 
5432 // resume the thread
5433 // calling resume on an active thread is a no-op
5434 static void do_resume(HANDLE* h) {
5435   if (h != NULL) {
5436     ResumeThread(*h);
5437   }
5438 }
5439 
5440 // retrieve a suspend/resume context capable handle
5441 // from the tid. Caller validates handle return value.
5442 void get_thread_handle_for_extended_context(HANDLE* h,
5443                                             OSThread::thread_id_t tid) {
5444   if (h != NULL) {
5445     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5446   }
5447 }
5448 
5449 // Thread sampling implementation
5450 //
5451 void os::SuspendedThreadTask::internal_do_task() {
5452   CONTEXT    ctxt;
5453   HANDLE     h = NULL;
5454 
5455   // get context capable handle for thread
5456   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5457 
5458   // sanity
5459   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5460     return;
5461   }
5462 
5463   // suspend the thread
5464   if (do_suspend(&h)) {
5465     ctxt.ContextFlags = sampling_context_flags;
5466     // get thread context
5467     GetThreadContext(h, &ctxt);
5468     SuspendedThreadTaskContext context(_thread, &ctxt);
5469     // pass context to Thread Sampling impl
5470     do_task(context);
5471     // resume thread
5472     do_resume(&h);
5473   }
5474 
5475   // close handle
5476   CloseHandle(h);
5477 }
5478 
5479 bool os::start_debugging(char *buf, int buflen) {
5480   int len = (int)strlen(buf);
5481   char *p = &buf[len];
5482 
5483   jio_snprintf(p, buflen-len,
5484              "\n\n"
5485              "Do you want to debug the problem?\n\n"
5486              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5487              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5488              "Otherwise, select 'No' to abort...",
5489              os::current_process_id(), os::current_thread_id());
5490 
5491   bool yes = os::message_box("Unexpected Error", buf);
5492 
5493   if (yes) {
5494     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5495     // exception. If VM is running inside a debugger, the debugger will
5496     // catch the exception. Otherwise, the breakpoint exception will reach
5497     // the default windows exception handler, which can spawn a debugger and
5498     // automatically attach to the dying VM.
5499     os::breakpoint();
5500     yes = false;
5501   }
5502   return yes;
5503 }
5504 
5505 void* os::get_default_process_handle() {
5506   return (void*)GetModuleHandle(NULL);
5507 }
5508 
5509 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5510 // which is used to find statically linked in agents.
5511 // Additionally for windows, takes into account __stdcall names.
5512 // Parameters:
5513 //            sym_name: Symbol in library we are looking for
5514 //            lib_name: Name of library to look in, NULL for shared libs.
5515 //            is_absolute_path == true if lib_name is absolute path to agent
5516 //                                     such as "C:/a/b/L.dll"
5517 //            == false if only the base name of the library is passed in
5518 //               such as "L"
5519 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5520                                     bool is_absolute_path) {
5521   char *agent_entry_name;
5522   size_t len;
5523   size_t name_len;
5524   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5525   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5526   const char *start;
5527 
5528   if (lib_name != NULL) {
5529     len = name_len = strlen(lib_name);
5530     if (is_absolute_path) {
5531       // Need to strip path, prefix and suffix
5532       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5533         lib_name = ++start;
5534       } else {
5535         // Need to check for drive prefix
5536         if ((start = strchr(lib_name, ':')) != NULL) {
5537           lib_name = ++start;
5538         }
5539       }
5540       if (len <= (prefix_len + suffix_len)) {
5541         return NULL;
5542       }
5543       lib_name += prefix_len;
5544       name_len = strlen(lib_name) - suffix_len;
5545     }
5546   }
5547   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5548   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5549   if (agent_entry_name == NULL) {
5550     return NULL;
5551   }
5552   if (lib_name != NULL) {
5553     const char *p = strrchr(sym_name, '@');
5554     if (p != NULL && p != sym_name) {
5555       // sym_name == _Agent_OnLoad@XX
5556       strncpy(agent_entry_name, sym_name, (p - sym_name));
5557       agent_entry_name[(p-sym_name)] = '\0';
5558       // agent_entry_name == _Agent_OnLoad
5559       strcat(agent_entry_name, "_");
5560       strncat(agent_entry_name, lib_name, name_len);
5561       strcat(agent_entry_name, p);
5562       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5563     } else {
5564       strcpy(agent_entry_name, sym_name);
5565       strcat(agent_entry_name, "_");
5566       strncat(agent_entry_name, lib_name, name_len);
5567     }
5568   } else {
5569     strcpy(agent_entry_name, sym_name);
5570   }
5571   return agent_entry_name;
5572 }
5573 
5574 #ifndef PRODUCT
5575 
5576 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5577 // contiguous memory block at a particular address.
5578 // The test first tries to find a good approximate address to allocate at by using the same
5579 // method to allocate some memory at any address. The test then tries to allocate memory in
5580 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5581 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5582 // the previously allocated memory is available for allocation. The only actual failure
5583 // that is reported is when the test tries to allocate at a particular location but gets a
5584 // different valid one. A NULL return value at this point is not considered an error but may
5585 // be legitimate.
5586 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5587 void TestReserveMemorySpecial_test() {
5588   if (!UseLargePages) {
5589     if (VerboseInternalVMTests) {
5590       tty->print("Skipping test because large pages are disabled");
5591     }
5592     return;
5593   }
5594   // save current value of globals
5595   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5596   bool old_use_numa_interleaving = UseNUMAInterleaving;
5597 
5598   // set globals to make sure we hit the correct code path
5599   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5600 
5601   // do an allocation at an address selected by the OS to get a good one.
5602   const size_t large_allocation_size = os::large_page_size() * 4;
5603   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5604   if (result == NULL) {
5605     if (VerboseInternalVMTests) {
5606       tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5607                           large_allocation_size);
5608     }
5609   } else {
5610     os::release_memory_special(result, large_allocation_size);
5611 
5612     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5613     // we managed to get it once.
5614     const size_t expected_allocation_size = os::large_page_size();
5615     char* expected_location = result + os::large_page_size();
5616     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5617     if (actual_location == NULL) {
5618       if (VerboseInternalVMTests) {
5619         tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5620                             expected_location, large_allocation_size);
5621       }
5622     } else {
5623       // release memory
5624       os::release_memory_special(actual_location, expected_allocation_size);
5625       // only now check, after releasing any memory to avoid any leaks.
5626       assert(actual_location == expected_location,
5627              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5628              expected_location, expected_allocation_size, actual_location);
5629     }
5630   }
5631 
5632   // restore globals
5633   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5634   UseNUMAInterleaving = old_use_numa_interleaving;
5635 }
5636 #endif // PRODUCT
5637 
5638 /*
5639   All the defined signal names for Windows.
5640 
5641   NOTE that not all of these names are accepted by FindSignal!
5642 
5643   For various reasons some of these may be rejected at runtime.
5644 
5645   Here are the names currently accepted by a user of sun.misc.Signal with
5646   1.4.1 (ignoring potential interaction with use of chaining, etc):
5647 
5648      (LIST TBD)
5649 
5650 */
5651 int os::get_signal_number(const char* name) {
5652   static const struct {
5653     char* name;
5654     int   number;
5655   } siglabels [] =
5656     // derived from version 6.0 VC98/include/signal.h
5657   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5658   "FPE",        SIGFPE,         // floating point exception
5659   "SEGV",       SIGSEGV,        // segment violation
5660   "INT",        SIGINT,         // interrupt
5661   "TERM",       SIGTERM,        // software term signal from kill
5662   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5663   "ILL",        SIGILL};        // illegal instruction
5664   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5665     if (strcmp(name, siglabels[i].name) == 0) {
5666       return siglabels[i].number;
5667     }
5668   }
5669   return -1;
5670 }
5671 
5672 // Fast current thread access
5673 
5674 int os::win32::_thread_ptr_offset = 0;
5675 
5676 static void call_wrapper_dummy() {}
5677 
5678 // We need to call the os_exception_wrapper once so that it sets
5679 // up the offset from FS of the thread pointer.
5680 void os::win32::initialize_thread_ptr_offset() {
5681   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5682                            NULL, NULL, NULL, NULL);
5683 }