1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "classfile/classLoader.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/disassembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "jvm_windows.h"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/filemap.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "os_share_windows.hpp"
  43 #include "os_windows.inline.hpp"
  44 #include "prims/jniFastGetField.hpp"
  45 #include "prims/jvm.h"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.inline.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "semaphore_windows.hpp"
  67 #include "services/attachListener.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/growableArray.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 
  77 #ifdef _DEBUG
  78 #include <crtdbg.h>
  79 #endif
  80 
  81 
  82 #include <windows.h>
  83 #include <sys/types.h>
  84 #include <sys/stat.h>
  85 #include <sys/timeb.h>
  86 #include <objidl.h>
  87 #include <shlobj.h>
  88 
  89 #include <malloc.h>
  90 #include <signal.h>
  91 #include <direct.h>
  92 #include <errno.h>
  93 #include <fcntl.h>
  94 #include <io.h>
  95 #include <process.h>              // For _beginthreadex(), _endthreadex()
  96 #include <imagehlp.h>             // For os::dll_address_to_function_name
  97 // for enumerating dll libraries
  98 #include <vdmdbg.h>
  99 
 100 // for timer info max values which include all bits
 101 #define ALL_64_BITS CONST64(-1)
 102 
 103 // For DLL loading/load error detection
 104 // Values of PE COFF
 105 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 106 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 107 
 108 static HANDLE main_process;
 109 static HANDLE main_thread;
 110 static int    main_thread_id;
 111 
 112 static FILETIME process_creation_time;
 113 static FILETIME process_exit_time;
 114 static FILETIME process_user_time;
 115 static FILETIME process_kernel_time;
 116 
 117 #ifdef _M_IA64
 118   #define __CPU__ ia64
 119 #else
 120   #ifdef _M_AMD64
 121     #define __CPU__ amd64
 122   #else
 123     #define __CPU__ i486
 124   #endif
 125 #endif
 126 
 127 // save DLL module handle, used by GetModuleFileName
 128 
 129 HINSTANCE vm_lib_handle;
 130 
 131 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 132   switch (reason) {
 133   case DLL_PROCESS_ATTACH:
 134     vm_lib_handle = hinst;
 135     if (ForceTimeHighResolution) {
 136       timeBeginPeriod(1L);
 137     }
 138     break;
 139   case DLL_PROCESS_DETACH:
 140     if (ForceTimeHighResolution) {
 141       timeEndPeriod(1L);
 142     }
 143     break;
 144   default:
 145     break;
 146   }
 147   return true;
 148 }
 149 
 150 static inline double fileTimeAsDouble(FILETIME* time) {
 151   const double high  = (double) ((unsigned int) ~0);
 152   const double split = 10000000.0;
 153   double result = (time->dwLowDateTime / split) +
 154                    time->dwHighDateTime * (high/split);
 155   return result;
 156 }
 157 
 158 // Implementation of os
 159 
 160 bool os::unsetenv(const char* name) {
 161   assert(name != NULL, "Null pointer");
 162   return (SetEnvironmentVariable(name, NULL) == TRUE);
 163 }
 164 
 165 // No setuid programs under Windows.
 166 bool os::have_special_privileges() {
 167   return false;
 168 }
 169 
 170 
 171 // This method is  a periodic task to check for misbehaving JNI applications
 172 // under CheckJNI, we can add any periodic checks here.
 173 // For Windows at the moment does nothing
 174 void os::run_periodic_checks() {
 175   return;
 176 }
 177 
 178 // previous UnhandledExceptionFilter, if there is one
 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 180 
 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 182 
 183 void os::init_system_properties_values() {
 184   // sysclasspath, java_home, dll_dir
 185   {
 186     char *home_path;
 187     char *dll_path;
 188     char *pslash;
 189     char *bin = "\\bin";
 190     char home_dir[MAX_PATH + 1];
 191     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 192 
 193     if (alt_home_dir != NULL)  {
 194       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 195       home_dir[MAX_PATH] = '\0';
 196     } else {
 197       os::jvm_path(home_dir, sizeof(home_dir));
 198       // Found the full path to jvm.dll.
 199       // Now cut the path to <java_home>/jre if we can.
 200       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 201       pslash = strrchr(home_dir, '\\');
 202       if (pslash != NULL) {
 203         *pslash = '\0';                   // get rid of \{client|server}
 204         pslash = strrchr(home_dir, '\\');
 205         if (pslash != NULL) {
 206           *pslash = '\0';                 // get rid of \bin
 207         }
 208       }
 209     }
 210 
 211     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 212     if (home_path == NULL) {
 213       return;
 214     }
 215     strcpy(home_path, home_dir);
 216     Arguments::set_java_home(home_path);
 217     FREE_C_HEAP_ARRAY(char, home_path);
 218 
 219     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 220                                 mtInternal);
 221     if (dll_path == NULL) {
 222       return;
 223     }
 224     strcpy(dll_path, home_dir);
 225     strcat(dll_path, bin);
 226     Arguments::set_dll_dir(dll_path);
 227     FREE_C_HEAP_ARRAY(char, dll_path);
 228 
 229     if (!set_boot_path('\\', ';')) {
 230       return;
 231     }
 232   }
 233 
 234 // library_path
 235 #define EXT_DIR "\\lib\\ext"
 236 #define BIN_DIR "\\bin"
 237 #define PACKAGE_DIR "\\Sun\\Java"
 238   {
 239     // Win32 library search order (See the documentation for LoadLibrary):
 240     //
 241     // 1. The directory from which application is loaded.
 242     // 2. The system wide Java Extensions directory (Java only)
 243     // 3. System directory (GetSystemDirectory)
 244     // 4. Windows directory (GetWindowsDirectory)
 245     // 5. The PATH environment variable
 246     // 6. The current directory
 247 
 248     char *library_path;
 249     char tmp[MAX_PATH];
 250     char *path_str = ::getenv("PATH");
 251 
 252     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 253                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 254 
 255     library_path[0] = '\0';
 256 
 257     GetModuleFileName(NULL, tmp, sizeof(tmp));
 258     *(strrchr(tmp, '\\')) = '\0';
 259     strcat(library_path, tmp);
 260 
 261     GetWindowsDirectory(tmp, sizeof(tmp));
 262     strcat(library_path, ";");
 263     strcat(library_path, tmp);
 264     strcat(library_path, PACKAGE_DIR BIN_DIR);
 265 
 266     GetSystemDirectory(tmp, sizeof(tmp));
 267     strcat(library_path, ";");
 268     strcat(library_path, tmp);
 269 
 270     GetWindowsDirectory(tmp, sizeof(tmp));
 271     strcat(library_path, ";");
 272     strcat(library_path, tmp);
 273 
 274     if (path_str) {
 275       strcat(library_path, ";");
 276       strcat(library_path, path_str);
 277     }
 278 
 279     strcat(library_path, ";.");
 280 
 281     Arguments::set_library_path(library_path);
 282     FREE_C_HEAP_ARRAY(char, library_path);
 283   }
 284 
 285   // Default extensions directory
 286   {
 287     char path[MAX_PATH];
 288     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 289     GetWindowsDirectory(path, MAX_PATH);
 290     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 291             path, PACKAGE_DIR, EXT_DIR);
 292     Arguments::set_ext_dirs(buf);
 293   }
 294   #undef EXT_DIR
 295   #undef BIN_DIR
 296   #undef PACKAGE_DIR
 297 
 298 #ifndef _WIN64
 299   // set our UnhandledExceptionFilter and save any previous one
 300   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 301 #endif
 302 
 303   // Done
 304   return;
 305 }
 306 
 307 void os::breakpoint() {
 308   DebugBreak();
 309 }
 310 
 311 // Invoked from the BREAKPOINT Macro
 312 extern "C" void breakpoint() {
 313   os::breakpoint();
 314 }
 315 
 316 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 317 // So far, this method is only used by Native Memory Tracking, which is
 318 // only supported on Windows XP or later.
 319 //
 320 int os::get_native_stack(address* stack, int frames, int toSkip) {
 321   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 322   for (int index = captured; index < frames; index ++) {
 323     stack[index] = NULL;
 324   }
 325   return captured;
 326 }
 327 
 328 
 329 // os::current_stack_base()
 330 //
 331 //   Returns the base of the stack, which is the stack's
 332 //   starting address.  This function must be called
 333 //   while running on the stack of the thread being queried.
 334 
 335 address os::current_stack_base() {
 336   MEMORY_BASIC_INFORMATION minfo;
 337   address stack_bottom;
 338   size_t stack_size;
 339 
 340   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 341   stack_bottom =  (address)minfo.AllocationBase;
 342   stack_size = minfo.RegionSize;
 343 
 344   // Add up the sizes of all the regions with the same
 345   // AllocationBase.
 346   while (1) {
 347     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 348     if (stack_bottom == (address)minfo.AllocationBase) {
 349       stack_size += minfo.RegionSize;
 350     } else {
 351       break;
 352     }
 353   }
 354 
 355 #ifdef _M_IA64
 356   // IA64 has memory and register stacks
 357   //
 358   // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
 359   // at thread creation (1MB backing store growing upwards, 1MB memory stack
 360   // growing downwards, 2MB summed up)
 361   //
 362   // ...
 363   // ------- top of stack (high address) -----
 364   // |
 365   // |      1MB
 366   // |      Backing Store (Register Stack)
 367   // |
 368   // |         / \
 369   // |          |
 370   // |          |
 371   // |          |
 372   // ------------------------ stack base -----
 373   // |      1MB
 374   // |      Memory Stack
 375   // |
 376   // |          |
 377   // |          |
 378   // |          |
 379   // |         \ /
 380   // |
 381   // ----- bottom of stack (low address) -----
 382   // ...
 383 
 384   stack_size = stack_size / 2;
 385 #endif
 386   return stack_bottom + stack_size;
 387 }
 388 
 389 size_t os::current_stack_size() {
 390   size_t sz;
 391   MEMORY_BASIC_INFORMATION minfo;
 392   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 393   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 394   return sz;
 395 }
 396 
 397 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 398   const struct tm* time_struct_ptr = localtime(clock);
 399   if (time_struct_ptr != NULL) {
 400     *res = *time_struct_ptr;
 401     return res;
 402   }
 403   return NULL;
 404 }
 405 
 406 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 407   const struct tm* time_struct_ptr = gmtime(clock);
 408   if (time_struct_ptr != NULL) {
 409     *res = *time_struct_ptr;
 410     return res;
 411   }
 412   return NULL;
 413 }
 414 
 415 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 416 
 417 // Thread start routine for all newly created threads
 418 static unsigned __stdcall thread_native_entry(Thread* thread) {
 419   // Try to randomize the cache line index of hot stack frames.
 420   // This helps when threads of the same stack traces evict each other's
 421   // cache lines. The threads can be either from the same JVM instance, or
 422   // from different JVM instances. The benefit is especially true for
 423   // processors with hyperthreading technology.
 424   static int counter = 0;
 425   int pid = os::current_process_id();
 426   _alloca(((pid ^ counter++) & 7) * 128);
 427 
 428   thread->initialize_thread_current();
 429 
 430   OSThread* osthr = thread->osthread();
 431   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 432 
 433   if (UseNUMA) {
 434     int lgrp_id = os::numa_get_group_id();
 435     if (lgrp_id != -1) {
 436       thread->set_lgrp_id(lgrp_id);
 437     }
 438   }
 439 
 440   // Diagnostic code to investigate JDK-6573254
 441   int res = 30115;  // non-java thread
 442   if (thread->is_Java_thread()) {
 443     res = 20115;    // java thread
 444   }
 445 
 446   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 447 
 448   // Install a win32 structured exception handler around every thread created
 449   // by VM, so VM can generate error dump when an exception occurred in non-
 450   // Java thread (e.g. VM thread).
 451   __try {
 452     thread->run();
 453   } __except(topLevelExceptionFilter(
 454                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 455     // Nothing to do.
 456   }
 457 
 458   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 459 
 460   // One less thread is executing
 461   // When the VMThread gets here, the main thread may have already exited
 462   // which frees the CodeHeap containing the Atomic::add code
 463   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 464     Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
 465   }
 466 
 467   // If a thread has not deleted itself ("delete this") as part of its
 468   // termination sequence, we have to ensure thread-local-storage is
 469   // cleared before we actually terminate. No threads should ever be
 470   // deleted asynchronously with respect to their termination.
 471   if (Thread::current_or_null_safe() != NULL) {
 472     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 473     thread->clear_thread_current();
 474   }
 475 
 476   // Thread must not return from exit_process_or_thread(), but if it does,
 477   // let it proceed to exit normally
 478   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 479 }
 480 
 481 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 482                                   int thread_id) {
 483   // Allocate the OSThread object
 484   OSThread* osthread = new OSThread(NULL, NULL);
 485   if (osthread == NULL) return NULL;
 486 
 487   // Initialize support for Java interrupts
 488   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 489   if (interrupt_event == NULL) {
 490     delete osthread;
 491     return NULL;
 492   }
 493   osthread->set_interrupt_event(interrupt_event);
 494 
 495   // Store info on the Win32 thread into the OSThread
 496   osthread->set_thread_handle(thread_handle);
 497   osthread->set_thread_id(thread_id);
 498 
 499   if (UseNUMA) {
 500     int lgrp_id = os::numa_get_group_id();
 501     if (lgrp_id != -1) {
 502       thread->set_lgrp_id(lgrp_id);
 503     }
 504   }
 505 
 506   // Initial thread state is INITIALIZED, not SUSPENDED
 507   osthread->set_state(INITIALIZED);
 508 
 509   return osthread;
 510 }
 511 
 512 
 513 bool os::create_attached_thread(JavaThread* thread) {
 514 #ifdef ASSERT
 515   thread->verify_not_published();
 516 #endif
 517   HANDLE thread_h;
 518   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 519                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 520     fatal("DuplicateHandle failed\n");
 521   }
 522   OSThread* osthread = create_os_thread(thread, thread_h,
 523                                         (int)current_thread_id());
 524   if (osthread == NULL) {
 525     return false;
 526   }
 527 
 528   // Initial thread state is RUNNABLE
 529   osthread->set_state(RUNNABLE);
 530 
 531   thread->set_osthread(osthread);
 532 
 533   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 534     os::current_thread_id());
 535 
 536   return true;
 537 }
 538 
 539 bool os::create_main_thread(JavaThread* thread) {
 540 #ifdef ASSERT
 541   thread->verify_not_published();
 542 #endif
 543   if (_starting_thread == NULL) {
 544     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 545     if (_starting_thread == NULL) {
 546       return false;
 547     }
 548   }
 549 
 550   // The primordial thread is runnable from the start)
 551   _starting_thread->set_state(RUNNABLE);
 552 
 553   thread->set_osthread(_starting_thread);
 554   return true;
 555 }
 556 
 557 // Helper function to trace _beginthreadex attributes,
 558 //  similar to os::Posix::describe_pthread_attr()
 559 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 560                                                size_t stacksize, unsigned initflag) {
 561   stringStream ss(buf, buflen);
 562   if (stacksize == 0) {
 563     ss.print("stacksize: default, ");
 564   } else {
 565     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 566   }
 567   ss.print("flags: ");
 568   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 569   #define ALL(X) \
 570     X(CREATE_SUSPENDED) \
 571     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 572   ALL(PRINT_FLAG)
 573   #undef ALL
 574   #undef PRINT_FLAG
 575   return buf;
 576 }
 577 
 578 // Allocate and initialize a new OSThread
 579 bool os::create_thread(Thread* thread, ThreadType thr_type,
 580                        size_t stack_size) {
 581   unsigned thread_id;
 582 
 583   // Allocate the OSThread object
 584   OSThread* osthread = new OSThread(NULL, NULL);
 585   if (osthread == NULL) {
 586     return false;
 587   }
 588 
 589   // Initialize support for Java interrupts
 590   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 591   if (interrupt_event == NULL) {
 592     delete osthread;
 593     return NULL;
 594   }
 595   osthread->set_interrupt_event(interrupt_event);
 596   osthread->set_interrupted(false);
 597 
 598   thread->set_osthread(osthread);
 599 
 600   if (stack_size == 0) {
 601     switch (thr_type) {
 602     case os::java_thread:
 603       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 604       if (JavaThread::stack_size_at_create() > 0) {
 605         stack_size = JavaThread::stack_size_at_create();
 606       }
 607       break;
 608     case os::compiler_thread:
 609       if (CompilerThreadStackSize > 0) {
 610         stack_size = (size_t)(CompilerThreadStackSize * K);
 611         break;
 612       } // else fall through:
 613         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 614     case os::vm_thread:
 615     case os::pgc_thread:
 616     case os::cgc_thread:
 617     case os::watcher_thread:
 618       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 619       break;
 620     }
 621   }
 622 
 623   // Create the Win32 thread
 624   //
 625   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 626   // does not specify stack size. Instead, it specifies the size of
 627   // initially committed space. The stack size is determined by
 628   // PE header in the executable. If the committed "stack_size" is larger
 629   // than default value in the PE header, the stack is rounded up to the
 630   // nearest multiple of 1MB. For example if the launcher has default
 631   // stack size of 320k, specifying any size less than 320k does not
 632   // affect the actual stack size at all, it only affects the initial
 633   // commitment. On the other hand, specifying 'stack_size' larger than
 634   // default value may cause significant increase in memory usage, because
 635   // not only the stack space will be rounded up to MB, but also the
 636   // entire space is committed upfront.
 637   //
 638   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 639   // for CreateThread() that can treat 'stack_size' as stack size. However we
 640   // are not supposed to call CreateThread() directly according to MSDN
 641   // document because JVM uses C runtime library. The good news is that the
 642   // flag appears to work with _beginthredex() as well.
 643 
 644   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 645   HANDLE thread_handle =
 646     (HANDLE)_beginthreadex(NULL,
 647                            (unsigned)stack_size,
 648                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 649                            thread,
 650                            initflag,
 651                            &thread_id);
 652 
 653   char buf[64];
 654   if (thread_handle != NULL) {
 655     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 656       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 657   } else {
 658     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 659       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 660   }
 661 
 662   if (thread_handle == NULL) {
 663     // Need to clean up stuff we've allocated so far
 664     CloseHandle(osthread->interrupt_event());
 665     thread->set_osthread(NULL);
 666     delete osthread;
 667     return NULL;
 668   }
 669 
 670   Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
 671 
 672   // Store info on the Win32 thread into the OSThread
 673   osthread->set_thread_handle(thread_handle);
 674   osthread->set_thread_id(thread_id);
 675 
 676   // Initial thread state is INITIALIZED, not SUSPENDED
 677   osthread->set_state(INITIALIZED);
 678 
 679   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 680   return true;
 681 }
 682 
 683 
 684 // Free Win32 resources related to the OSThread
 685 void os::free_thread(OSThread* osthread) {
 686   assert(osthread != NULL, "osthread not set");
 687 
 688   // We are told to free resources of the argument thread,
 689   // but we can only really operate on the current thread.
 690   assert(Thread::current()->osthread() == osthread,
 691          "os::free_thread but not current thread");
 692 
 693   CloseHandle(osthread->thread_handle());
 694   CloseHandle(osthread->interrupt_event());
 695   delete osthread;
 696 }
 697 
 698 static jlong first_filetime;
 699 static jlong initial_performance_count;
 700 static jlong performance_frequency;
 701 
 702 
 703 jlong as_long(LARGE_INTEGER x) {
 704   jlong result = 0; // initialization to avoid warning
 705   set_high(&result, x.HighPart);
 706   set_low(&result, x.LowPart);
 707   return result;
 708 }
 709 
 710 
 711 jlong os::elapsed_counter() {
 712   LARGE_INTEGER count;
 713   QueryPerformanceCounter(&count);
 714   return as_long(count) - initial_performance_count;
 715 }
 716 
 717 
 718 jlong os::elapsed_frequency() {
 719   return performance_frequency;
 720 }
 721 
 722 
 723 julong os::available_memory() {
 724   return win32::available_memory();
 725 }
 726 
 727 julong os::win32::available_memory() {
 728   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 729   // value if total memory is larger than 4GB
 730   MEMORYSTATUSEX ms;
 731   ms.dwLength = sizeof(ms);
 732   GlobalMemoryStatusEx(&ms);
 733 
 734   return (julong)ms.ullAvailPhys;
 735 }
 736 
 737 julong os::physical_memory() {
 738   return win32::physical_memory();
 739 }
 740 
 741 bool os::has_allocatable_memory_limit(julong* limit) {
 742   MEMORYSTATUSEX ms;
 743   ms.dwLength = sizeof(ms);
 744   GlobalMemoryStatusEx(&ms);
 745 #ifdef _LP64
 746   *limit = (julong)ms.ullAvailVirtual;
 747   return true;
 748 #else
 749   // Limit to 1400m because of the 2gb address space wall
 750   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 751   return true;
 752 #endif
 753 }
 754 
 755 int os::active_processor_count() {
 756   DWORD_PTR lpProcessAffinityMask = 0;
 757   DWORD_PTR lpSystemAffinityMask = 0;
 758   int proc_count = processor_count();
 759   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 760       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 761     // Nof active processors is number of bits in process affinity mask
 762     int bitcount = 0;
 763     while (lpProcessAffinityMask != 0) {
 764       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 765       bitcount++;
 766     }
 767     return bitcount;
 768   } else {
 769     return proc_count;
 770   }
 771 }
 772 
 773 void os::set_native_thread_name(const char *name) {
 774 
 775   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 776   //
 777   // Note that unfortunately this only works if the process
 778   // is already attached to a debugger; debugger must observe
 779   // the exception below to show the correct name.
 780 
 781   // If there is no debugger attached skip raising the exception
 782   if (!IsDebuggerPresent()) {
 783     return;
 784   }
 785 
 786   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 787   struct {
 788     DWORD dwType;     // must be 0x1000
 789     LPCSTR szName;    // pointer to name (in user addr space)
 790     DWORD dwThreadID; // thread ID (-1=caller thread)
 791     DWORD dwFlags;    // reserved for future use, must be zero
 792   } info;
 793 
 794   info.dwType = 0x1000;
 795   info.szName = name;
 796   info.dwThreadID = -1;
 797   info.dwFlags = 0;
 798 
 799   __try {
 800     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 801   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 802 }
 803 
 804 bool os::distribute_processes(uint length, uint* distribution) {
 805   // Not yet implemented.
 806   return false;
 807 }
 808 
 809 bool os::bind_to_processor(uint processor_id) {
 810   // Not yet implemented.
 811   return false;
 812 }
 813 
 814 void os::win32::initialize_performance_counter() {
 815   LARGE_INTEGER count;
 816   QueryPerformanceFrequency(&count);
 817   performance_frequency = as_long(count);
 818   QueryPerformanceCounter(&count);
 819   initial_performance_count = as_long(count);
 820 }
 821 
 822 
 823 double os::elapsedTime() {
 824   return (double) elapsed_counter() / (double) elapsed_frequency();
 825 }
 826 
 827 
 828 // Windows format:
 829 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 830 // Java format:
 831 //   Java standards require the number of milliseconds since 1/1/1970
 832 
 833 // Constant offset - calculated using offset()
 834 static jlong  _offset   = 116444736000000000;
 835 // Fake time counter for reproducible results when debugging
 836 static jlong  fake_time = 0;
 837 
 838 #ifdef ASSERT
 839 // Just to be safe, recalculate the offset in debug mode
 840 static jlong _calculated_offset = 0;
 841 static int   _has_calculated_offset = 0;
 842 
 843 jlong offset() {
 844   if (_has_calculated_offset) return _calculated_offset;
 845   SYSTEMTIME java_origin;
 846   java_origin.wYear          = 1970;
 847   java_origin.wMonth         = 1;
 848   java_origin.wDayOfWeek     = 0; // ignored
 849   java_origin.wDay           = 1;
 850   java_origin.wHour          = 0;
 851   java_origin.wMinute        = 0;
 852   java_origin.wSecond        = 0;
 853   java_origin.wMilliseconds  = 0;
 854   FILETIME jot;
 855   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 856     fatal("Error = %d\nWindows error", GetLastError());
 857   }
 858   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 859   _has_calculated_offset = 1;
 860   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 861   return _calculated_offset;
 862 }
 863 #else
 864 jlong offset() {
 865   return _offset;
 866 }
 867 #endif
 868 
 869 jlong windows_to_java_time(FILETIME wt) {
 870   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 871   return (a - offset()) / 10000;
 872 }
 873 
 874 // Returns time ticks in (10th of micro seconds)
 875 jlong windows_to_time_ticks(FILETIME wt) {
 876   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 877   return (a - offset());
 878 }
 879 
 880 FILETIME java_to_windows_time(jlong l) {
 881   jlong a = (l * 10000) + offset();
 882   FILETIME result;
 883   result.dwHighDateTime = high(a);
 884   result.dwLowDateTime  = low(a);
 885   return result;
 886 }
 887 
 888 bool os::supports_vtime() { return true; }
 889 bool os::enable_vtime() { return false; }
 890 bool os::vtime_enabled() { return false; }
 891 
 892 double os::elapsedVTime() {
 893   FILETIME created;
 894   FILETIME exited;
 895   FILETIME kernel;
 896   FILETIME user;
 897   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 898     // the resolution of windows_to_java_time() should be sufficient (ms)
 899     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 900   } else {
 901     return elapsedTime();
 902   }
 903 }
 904 
 905 jlong os::javaTimeMillis() {
 906   if (UseFakeTimers) {
 907     return fake_time++;
 908   } else {
 909     FILETIME wt;
 910     GetSystemTimeAsFileTime(&wt);
 911     return windows_to_java_time(wt);
 912   }
 913 }
 914 
 915 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 916   FILETIME wt;
 917   GetSystemTimeAsFileTime(&wt);
 918   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 919   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 920   seconds = secs;
 921   nanos = jlong(ticks - (secs*10000000)) * 100;
 922 }
 923 
 924 jlong os::javaTimeNanos() {
 925     LARGE_INTEGER current_count;
 926     QueryPerformanceCounter(&current_count);
 927     double current = as_long(current_count);
 928     double freq = performance_frequency;
 929     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 930     return time;
 931 }
 932 
 933 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 934   jlong freq = performance_frequency;
 935   if (freq < NANOSECS_PER_SEC) {
 936     // the performance counter is 64 bits and we will
 937     // be multiplying it -- so no wrap in 64 bits
 938     info_ptr->max_value = ALL_64_BITS;
 939   } else if (freq > NANOSECS_PER_SEC) {
 940     // use the max value the counter can reach to
 941     // determine the max value which could be returned
 942     julong max_counter = (julong)ALL_64_BITS;
 943     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 944   } else {
 945     // the performance counter is 64 bits and we will
 946     // be using it directly -- so no wrap in 64 bits
 947     info_ptr->max_value = ALL_64_BITS;
 948   }
 949 
 950   // using a counter, so no skipping
 951   info_ptr->may_skip_backward = false;
 952   info_ptr->may_skip_forward = false;
 953 
 954   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 955 }
 956 
 957 char* os::local_time_string(char *buf, size_t buflen) {
 958   SYSTEMTIME st;
 959   GetLocalTime(&st);
 960   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 961                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 962   return buf;
 963 }
 964 
 965 bool os::getTimesSecs(double* process_real_time,
 966                       double* process_user_time,
 967                       double* process_system_time) {
 968   HANDLE h_process = GetCurrentProcess();
 969   FILETIME create_time, exit_time, kernel_time, user_time;
 970   BOOL result = GetProcessTimes(h_process,
 971                                 &create_time,
 972                                 &exit_time,
 973                                 &kernel_time,
 974                                 &user_time);
 975   if (result != 0) {
 976     FILETIME wt;
 977     GetSystemTimeAsFileTime(&wt);
 978     jlong rtc_millis = windows_to_java_time(wt);
 979     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 980     *process_user_time =
 981       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 982     *process_system_time =
 983       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
 984     return true;
 985   } else {
 986     return false;
 987   }
 988 }
 989 
 990 void os::shutdown() {
 991   // allow PerfMemory to attempt cleanup of any persistent resources
 992   perfMemory_exit();
 993 
 994   // flush buffered output, finish log files
 995   ostream_abort();
 996 
 997   // Check for abort hook
 998   abort_hook_t abort_hook = Arguments::abort_hook();
 999   if (abort_hook != NULL) {
1000     abort_hook();
1001   }
1002 }
1003 
1004 
1005 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1006                                          PMINIDUMP_EXCEPTION_INFORMATION,
1007                                          PMINIDUMP_USER_STREAM_INFORMATION,
1008                                          PMINIDUMP_CALLBACK_INFORMATION);
1009 
1010 static HANDLE dumpFile = NULL;
1011 
1012 // Check if dump file can be created.
1013 void os::check_dump_limit(char* buffer, size_t buffsz) {
1014   bool status = true;
1015   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1016     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1017     status = false;
1018   }
1019 
1020 #ifndef ASSERT
1021   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1022     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1023     status = false;
1024   }
1025 #endif
1026 
1027   if (status) {
1028     const char* cwd = get_current_directory(NULL, 0);
1029     int pid = current_process_id();
1030     if (cwd != NULL) {
1031       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1032     } else {
1033       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1034     }
1035 
1036     if (dumpFile == NULL &&
1037        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1038                  == INVALID_HANDLE_VALUE) {
1039       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1040       status = false;
1041     }
1042   }
1043   VMError::record_coredump_status(buffer, status);
1044 }
1045 
1046 void os::abort(bool dump_core, void* siginfo, const void* context) {
1047   HINSTANCE dbghelp;
1048   EXCEPTION_POINTERS ep;
1049   MINIDUMP_EXCEPTION_INFORMATION mei;
1050   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1051 
1052   HANDLE hProcess = GetCurrentProcess();
1053   DWORD processId = GetCurrentProcessId();
1054   MINIDUMP_TYPE dumpType;
1055 
1056   shutdown();
1057   if (!dump_core || dumpFile == NULL) {
1058     if (dumpFile != NULL) {
1059       CloseHandle(dumpFile);
1060     }
1061     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1062   }
1063 
1064   dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
1065 
1066   if (dbghelp == NULL) {
1067     jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
1068     CloseHandle(dumpFile);
1069     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1070   }
1071 
1072   _MiniDumpWriteDump =
1073       CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1074                                     PMINIDUMP_EXCEPTION_INFORMATION,
1075                                     PMINIDUMP_USER_STREAM_INFORMATION,
1076                                     PMINIDUMP_CALLBACK_INFORMATION),
1077                                     GetProcAddress(dbghelp,
1078                                     "MiniDumpWriteDump"));
1079 
1080   if (_MiniDumpWriteDump == NULL) {
1081     jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
1082     CloseHandle(dumpFile);
1083     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1084   }
1085 
1086   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1087     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1088 
1089   if (siginfo != NULL && context != NULL) {
1090     ep.ContextRecord = (PCONTEXT) context;
1091     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1092 
1093     mei.ThreadId = GetCurrentThreadId();
1094     mei.ExceptionPointers = &ep;
1095     pmei = &mei;
1096   } else {
1097     pmei = NULL;
1098   }
1099 
1100   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1101   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1102   if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1103       _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1104     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1105   }
1106   CloseHandle(dumpFile);
1107   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1108 }
1109 
1110 // Die immediately, no exit hook, no abort hook, no cleanup.
1111 void os::die() {
1112   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1113 }
1114 
1115 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1116 //  * dirent_md.c       1.15 00/02/02
1117 //
1118 // The declarations for DIR and struct dirent are in jvm_win32.h.
1119 
1120 // Caller must have already run dirname through JVM_NativePath, which removes
1121 // duplicate slashes and converts all instances of '/' into '\\'.
1122 
1123 DIR * os::opendir(const char *dirname) {
1124   assert(dirname != NULL, "just checking");   // hotspot change
1125   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1126   DWORD fattr;                                // hotspot change
1127   char alt_dirname[4] = { 0, 0, 0, 0 };
1128 
1129   if (dirp == 0) {
1130     errno = ENOMEM;
1131     return 0;
1132   }
1133 
1134   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1135   // as a directory in FindFirstFile().  We detect this case here and
1136   // prepend the current drive name.
1137   //
1138   if (dirname[1] == '\0' && dirname[0] == '\\') {
1139     alt_dirname[0] = _getdrive() + 'A' - 1;
1140     alt_dirname[1] = ':';
1141     alt_dirname[2] = '\\';
1142     alt_dirname[3] = '\0';
1143     dirname = alt_dirname;
1144   }
1145 
1146   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1147   if (dirp->path == 0) {
1148     free(dirp);
1149     errno = ENOMEM;
1150     return 0;
1151   }
1152   strcpy(dirp->path, dirname);
1153 
1154   fattr = GetFileAttributes(dirp->path);
1155   if (fattr == 0xffffffff) {
1156     free(dirp->path);
1157     free(dirp);
1158     errno = ENOENT;
1159     return 0;
1160   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1161     free(dirp->path);
1162     free(dirp);
1163     errno = ENOTDIR;
1164     return 0;
1165   }
1166 
1167   // Append "*.*", or possibly "\\*.*", to path
1168   if (dirp->path[1] == ':' &&
1169       (dirp->path[2] == '\0' ||
1170       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1171     // No '\\' needed for cases like "Z:" or "Z:\"
1172     strcat(dirp->path, "*.*");
1173   } else {
1174     strcat(dirp->path, "\\*.*");
1175   }
1176 
1177   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1178   if (dirp->handle == INVALID_HANDLE_VALUE) {
1179     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1180       free(dirp->path);
1181       free(dirp);
1182       errno = EACCES;
1183       return 0;
1184     }
1185   }
1186   return dirp;
1187 }
1188 
1189 // parameter dbuf unused on Windows
1190 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) {
1191   assert(dirp != NULL, "just checking");      // hotspot change
1192   if (dirp->handle == INVALID_HANDLE_VALUE) {
1193     return 0;
1194   }
1195 
1196   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1197 
1198   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1199     if (GetLastError() == ERROR_INVALID_HANDLE) {
1200       errno = EBADF;
1201       return 0;
1202     }
1203     FindClose(dirp->handle);
1204     dirp->handle = INVALID_HANDLE_VALUE;
1205   }
1206 
1207   return &dirp->dirent;
1208 }
1209 
1210 int os::closedir(DIR *dirp) {
1211   assert(dirp != NULL, "just checking");      // hotspot change
1212   if (dirp->handle != INVALID_HANDLE_VALUE) {
1213     if (!FindClose(dirp->handle)) {
1214       errno = EBADF;
1215       return -1;
1216     }
1217     dirp->handle = INVALID_HANDLE_VALUE;
1218   }
1219   free(dirp->path);
1220   free(dirp);
1221   return 0;
1222 }
1223 
1224 // This must be hard coded because it's the system's temporary
1225 // directory not the java application's temp directory, ala java.io.tmpdir.
1226 const char* os::get_temp_directory() {
1227   static char path_buf[MAX_PATH];
1228   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1229     return path_buf;
1230   } else {
1231     path_buf[0] = '\0';
1232     return path_buf;
1233   }
1234 }
1235 
1236 static bool file_exists(const char* filename) {
1237   if (filename == NULL || strlen(filename) == 0) {
1238     return false;
1239   }
1240   return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1241 }
1242 
1243 bool os::dll_build_name(char *buffer, size_t buflen,
1244                         const char* pname, const char* fname) {
1245   bool retval = false;
1246   const size_t pnamelen = pname ? strlen(pname) : 0;
1247   const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1248 
1249   // Return error on buffer overflow.
1250   if (pnamelen + strlen(fname) + 10 > buflen) {
1251     return retval;
1252   }
1253 
1254   if (pnamelen == 0) {
1255     jio_snprintf(buffer, buflen, "%s.dll", fname);
1256     retval = true;
1257   } else if (c == ':' || c == '\\') {
1258     jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1259     retval = true;
1260   } else if (strchr(pname, *os::path_separator()) != NULL) {
1261     int n;
1262     char** pelements = split_path(pname, &n);
1263     if (pelements == NULL) {
1264       return false;
1265     }
1266     for (int i = 0; i < n; i++) {
1267       char* path = pelements[i];
1268       // Really shouldn't be NULL, but check can't hurt
1269       size_t plen = (path == NULL) ? 0 : strlen(path);
1270       if (plen == 0) {
1271         continue; // skip the empty path values
1272       }
1273       const char lastchar = path[plen - 1];
1274       if (lastchar == ':' || lastchar == '\\') {
1275         jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1276       } else {
1277         jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1278       }
1279       if (file_exists(buffer)) {
1280         retval = true;
1281         break;
1282       }
1283     }
1284     // release the storage
1285     for (int i = 0; i < n; i++) {
1286       if (pelements[i] != NULL) {
1287         FREE_C_HEAP_ARRAY(char, pelements[i]);
1288       }
1289     }
1290     if (pelements != NULL) {
1291       FREE_C_HEAP_ARRAY(char*, pelements);
1292     }
1293   } else {
1294     jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1295     retval = true;
1296   }
1297   return retval;
1298 }
1299 
1300 // Needs to be in os specific directory because windows requires another
1301 // header file <direct.h>
1302 const char* os::get_current_directory(char *buf, size_t buflen) {
1303   int n = static_cast<int>(buflen);
1304   if (buflen > INT_MAX)  n = INT_MAX;
1305   return _getcwd(buf, n);
1306 }
1307 
1308 //-----------------------------------------------------------
1309 // Helper functions for fatal error handler
1310 #ifdef _WIN64
1311 // Helper routine which returns true if address in
1312 // within the NTDLL address space.
1313 //
1314 static bool _addr_in_ntdll(address addr) {
1315   HMODULE hmod;
1316   MODULEINFO minfo;
1317 
1318   hmod = GetModuleHandle("NTDLL.DLL");
1319   if (hmod == NULL) return false;
1320   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1321                                           &minfo, sizeof(MODULEINFO))) {
1322     return false;
1323   }
1324 
1325   if ((addr >= minfo.lpBaseOfDll) &&
1326       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1327     return true;
1328   } else {
1329     return false;
1330   }
1331 }
1332 #endif
1333 
1334 struct _modinfo {
1335   address addr;
1336   char*   full_path;   // point to a char buffer
1337   int     buflen;      // size of the buffer
1338   address base_addr;
1339 };
1340 
1341 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1342                                   address top_address, void * param) {
1343   struct _modinfo *pmod = (struct _modinfo *)param;
1344   if (!pmod) return -1;
1345 
1346   if (base_addr   <= pmod->addr &&
1347       top_address > pmod->addr) {
1348     // if a buffer is provided, copy path name to the buffer
1349     if (pmod->full_path) {
1350       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1351     }
1352     pmod->base_addr = base_addr;
1353     return 1;
1354   }
1355   return 0;
1356 }
1357 
1358 bool os::dll_address_to_library_name(address addr, char* buf,
1359                                      int buflen, int* offset) {
1360   // buf is not optional, but offset is optional
1361   assert(buf != NULL, "sanity check");
1362 
1363 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1364 //       return the full path to the DLL file, sometimes it returns path
1365 //       to the corresponding PDB file (debug info); sometimes it only
1366 //       returns partial path, which makes life painful.
1367 
1368   struct _modinfo mi;
1369   mi.addr      = addr;
1370   mi.full_path = buf;
1371   mi.buflen    = buflen;
1372   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1373     // buf already contains path name
1374     if (offset) *offset = addr - mi.base_addr;
1375     return true;
1376   }
1377 
1378   buf[0] = '\0';
1379   if (offset) *offset = -1;
1380   return false;
1381 }
1382 
1383 bool os::dll_address_to_function_name(address addr, char *buf,
1384                                       int buflen, int *offset,
1385                                       bool demangle) {
1386   // buf is not optional, but offset is optional
1387   assert(buf != NULL, "sanity check");
1388 
1389   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1390     return true;
1391   }
1392   if (offset != NULL)  *offset  = -1;
1393   buf[0] = '\0';
1394   return false;
1395 }
1396 
1397 // save the start and end address of jvm.dll into param[0] and param[1]
1398 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1399                            address top_address, void * param) {
1400   if (!param) return -1;
1401 
1402   if (base_addr   <= (address)_locate_jvm_dll &&
1403       top_address > (address)_locate_jvm_dll) {
1404     ((address*)param)[0] = base_addr;
1405     ((address*)param)[1] = top_address;
1406     return 1;
1407   }
1408   return 0;
1409 }
1410 
1411 address vm_lib_location[2];    // start and end address of jvm.dll
1412 
1413 // check if addr is inside jvm.dll
1414 bool os::address_is_in_vm(address addr) {
1415   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1416     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1417       assert(false, "Can't find jvm module.");
1418       return false;
1419     }
1420   }
1421 
1422   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1423 }
1424 
1425 // print module info; param is outputStream*
1426 static int _print_module(const char* fname, address base_address,
1427                          address top_address, void* param) {
1428   if (!param) return -1;
1429 
1430   outputStream* st = (outputStream*)param;
1431 
1432   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1433   return 0;
1434 }
1435 
1436 // Loads .dll/.so and
1437 // in case of error it checks if .dll/.so was built for the
1438 // same architecture as Hotspot is running on
1439 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1440   void * result = LoadLibrary(name);
1441   if (result != NULL) {
1442     return result;
1443   }
1444 
1445   DWORD errcode = GetLastError();
1446   if (errcode == ERROR_MOD_NOT_FOUND) {
1447     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1448     ebuf[ebuflen - 1] = '\0';
1449     return NULL;
1450   }
1451 
1452   // Parsing dll below
1453   // If we can read dll-info and find that dll was built
1454   // for an architecture other than Hotspot is running in
1455   // - then print to buffer "DLL was built for a different architecture"
1456   // else call os::lasterror to obtain system error message
1457 
1458   // Read system error message into ebuf
1459   // It may or may not be overwritten below (in the for loop and just above)
1460   lasterror(ebuf, (size_t) ebuflen);
1461   ebuf[ebuflen - 1] = '\0';
1462   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1463   if (fd < 0) {
1464     return NULL;
1465   }
1466 
1467   uint32_t signature_offset;
1468   uint16_t lib_arch = 0;
1469   bool failed_to_get_lib_arch =
1470     ( // Go to position 3c in the dll
1471      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1472      ||
1473      // Read location of signature
1474      (sizeof(signature_offset) !=
1475      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1476      ||
1477      // Go to COFF File Header in dll
1478      // that is located after "signature" (4 bytes long)
1479      (os::seek_to_file_offset(fd,
1480      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1481      ||
1482      // Read field that contains code of architecture
1483      // that dll was built for
1484      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1485     );
1486 
1487   ::close(fd);
1488   if (failed_to_get_lib_arch) {
1489     // file i/o error - report os::lasterror(...) msg
1490     return NULL;
1491   }
1492 
1493   typedef struct {
1494     uint16_t arch_code;
1495     char* arch_name;
1496   } arch_t;
1497 
1498   static const arch_t arch_array[] = {
1499     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1500     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1501     {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
1502   };
1503 #if   (defined _M_IA64)
1504   static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64;
1505 #elif (defined _M_AMD64)
1506   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1507 #elif (defined _M_IX86)
1508   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1509 #else
1510   #error Method os::dll_load requires that one of following \
1511          is defined :_M_IA64,_M_AMD64 or _M_IX86
1512 #endif
1513 
1514 
1515   // Obtain a string for printf operation
1516   // lib_arch_str shall contain string what platform this .dll was built for
1517   // running_arch_str shall string contain what platform Hotspot was built for
1518   char *running_arch_str = NULL, *lib_arch_str = NULL;
1519   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1520     if (lib_arch == arch_array[i].arch_code) {
1521       lib_arch_str = arch_array[i].arch_name;
1522     }
1523     if (running_arch == arch_array[i].arch_code) {
1524       running_arch_str = arch_array[i].arch_name;
1525     }
1526   }
1527 
1528   assert(running_arch_str,
1529          "Didn't find running architecture code in arch_array");
1530 
1531   // If the architecture is right
1532   // but some other error took place - report os::lasterror(...) msg
1533   if (lib_arch == running_arch) {
1534     return NULL;
1535   }
1536 
1537   if (lib_arch_str != NULL) {
1538     ::_snprintf(ebuf, ebuflen - 1,
1539                 "Can't load %s-bit .dll on a %s-bit platform",
1540                 lib_arch_str, running_arch_str);
1541   } else {
1542     // don't know what architecture this dll was build for
1543     ::_snprintf(ebuf, ebuflen - 1,
1544                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1545                 lib_arch, running_arch_str);
1546   }
1547 
1548   return NULL;
1549 }
1550 
1551 void os::print_dll_info(outputStream *st) {
1552   st->print_cr("Dynamic libraries:");
1553   get_loaded_modules_info(_print_module, (void *)st);
1554 }
1555 
1556 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1557   HANDLE   hProcess;
1558 
1559 # define MAX_NUM_MODULES 128
1560   HMODULE     modules[MAX_NUM_MODULES];
1561   static char filename[MAX_PATH];
1562   int         result = 0;
1563 
1564   int pid = os::current_process_id();
1565   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1566                          FALSE, pid);
1567   if (hProcess == NULL) return 0;
1568 
1569   DWORD size_needed;
1570   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1571     CloseHandle(hProcess);
1572     return 0;
1573   }
1574 
1575   // number of modules that are currently loaded
1576   int num_modules = size_needed / sizeof(HMODULE);
1577 
1578   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1579     // Get Full pathname:
1580     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1581       filename[0] = '\0';
1582     }
1583 
1584     MODULEINFO modinfo;
1585     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1586       modinfo.lpBaseOfDll = NULL;
1587       modinfo.SizeOfImage = 0;
1588     }
1589 
1590     // Invoke callback function
1591     result = callback(filename, (address)modinfo.lpBaseOfDll,
1592                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1593     if (result) break;
1594   }
1595 
1596   CloseHandle(hProcess);
1597   return result;
1598 }
1599 
1600 bool os::get_host_name(char* buf, size_t buflen) {
1601   DWORD size = (DWORD)buflen;
1602   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1603 }
1604 
1605 void os::get_summary_os_info(char* buf, size_t buflen) {
1606   stringStream sst(buf, buflen);
1607   os::win32::print_windows_version(&sst);
1608   // chop off newline character
1609   char* nl = strchr(buf, '\n');
1610   if (nl != NULL) *nl = '\0';
1611 }
1612 
1613 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1614   int ret = vsnprintf(buf, len, fmt, args);
1615   // Get the correct buffer size if buf is too small
1616   if (ret < 0) {
1617     return _vscprintf(fmt, args);
1618   }
1619   return ret;
1620 }
1621 
1622 static inline time_t get_mtime(const char* filename) {
1623   struct stat st;
1624   int ret = os::stat(filename, &st);
1625   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
1626   return st.st_mtime;
1627 }
1628 
1629 int os::compare_file_modified_times(const char* file1, const char* file2) {
1630   time_t t1 = get_mtime(file1);
1631   time_t t2 = get_mtime(file2);
1632   return t1 - t2;
1633 }
1634 
1635 void os::print_os_info_brief(outputStream* st) {
1636   os::print_os_info(st);
1637 }
1638 
1639 void os::print_os_info(outputStream* st) {
1640 #ifdef ASSERT
1641   char buffer[1024];
1642   st->print("HostName: ");
1643   if (get_host_name(buffer, sizeof(buffer))) {
1644     st->print("%s ", buffer);
1645   } else {
1646     st->print("N/A ");
1647   }
1648 #endif
1649   st->print("OS:");
1650   os::win32::print_windows_version(st);
1651 }
1652 
1653 void os::win32::print_windows_version(outputStream* st) {
1654   OSVERSIONINFOEX osvi;
1655   VS_FIXEDFILEINFO *file_info;
1656   TCHAR kernel32_path[MAX_PATH];
1657   UINT len, ret;
1658 
1659   // Use the GetVersionEx information to see if we're on a server or
1660   // workstation edition of Windows. Starting with Windows 8.1 we can't
1661   // trust the OS version information returned by this API.
1662   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1663   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1664   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1665     st->print_cr("Call to GetVersionEx failed");
1666     return;
1667   }
1668   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1669 
1670   // Get the full path to \Windows\System32\kernel32.dll and use that for
1671   // determining what version of Windows we're running on.
1672   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1673   ret = GetSystemDirectory(kernel32_path, len);
1674   if (ret == 0 || ret > len) {
1675     st->print_cr("Call to GetSystemDirectory failed");
1676     return;
1677   }
1678   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1679 
1680   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1681   if (version_size == 0) {
1682     st->print_cr("Call to GetFileVersionInfoSize failed");
1683     return;
1684   }
1685 
1686   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1687   if (version_info == NULL) {
1688     st->print_cr("Failed to allocate version_info");
1689     return;
1690   }
1691 
1692   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1693     os::free(version_info);
1694     st->print_cr("Call to GetFileVersionInfo failed");
1695     return;
1696   }
1697 
1698   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1699     os::free(version_info);
1700     st->print_cr("Call to VerQueryValue failed");
1701     return;
1702   }
1703 
1704   int major_version = HIWORD(file_info->dwProductVersionMS);
1705   int minor_version = LOWORD(file_info->dwProductVersionMS);
1706   int build_number = HIWORD(file_info->dwProductVersionLS);
1707   int build_minor = LOWORD(file_info->dwProductVersionLS);
1708   int os_vers = major_version * 1000 + minor_version;
1709   os::free(version_info);
1710 
1711   st->print(" Windows ");
1712   switch (os_vers) {
1713 
1714   case 6000:
1715     if (is_workstation) {
1716       st->print("Vista");
1717     } else {
1718       st->print("Server 2008");
1719     }
1720     break;
1721 
1722   case 6001:
1723     if (is_workstation) {
1724       st->print("7");
1725     } else {
1726       st->print("Server 2008 R2");
1727     }
1728     break;
1729 
1730   case 6002:
1731     if (is_workstation) {
1732       st->print("8");
1733     } else {
1734       st->print("Server 2012");
1735     }
1736     break;
1737 
1738   case 6003:
1739     if (is_workstation) {
1740       st->print("8.1");
1741     } else {
1742       st->print("Server 2012 R2");
1743     }
1744     break;
1745 
1746   case 10000:
1747     if (is_workstation) {
1748       st->print("10");
1749     } else {
1750       st->print("Server 2016");
1751     }
1752     break;
1753 
1754   default:
1755     // Unrecognized windows, print out its major and minor versions
1756     st->print("%d.%d", major_version, minor_version);
1757     break;
1758   }
1759 
1760   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1761   // find out whether we are running on 64 bit processor or not
1762   SYSTEM_INFO si;
1763   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1764   GetNativeSystemInfo(&si);
1765   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1766     st->print(" , 64 bit");
1767   }
1768 
1769   st->print(" Build %d", build_number);
1770   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1771   st->cr();
1772 }
1773 
1774 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1775   // Nothing to do for now.
1776 }
1777 
1778 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1779   HKEY key;
1780   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1781                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1782   if (status == ERROR_SUCCESS) {
1783     DWORD size = (DWORD)buflen;
1784     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1785     if (status != ERROR_SUCCESS) {
1786         strncpy(buf, "## __CPU__", buflen);
1787     }
1788     RegCloseKey(key);
1789   } else {
1790     // Put generic cpu info to return
1791     strncpy(buf, "## __CPU__", buflen);
1792   }
1793 }
1794 
1795 void os::print_memory_info(outputStream* st) {
1796   st->print("Memory:");
1797   st->print(" %dk page", os::vm_page_size()>>10);
1798 
1799   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1800   // value if total memory is larger than 4GB
1801   MEMORYSTATUSEX ms;
1802   ms.dwLength = sizeof(ms);
1803   GlobalMemoryStatusEx(&ms);
1804 
1805   st->print(", physical %uk", os::physical_memory() >> 10);
1806   st->print("(%uk free)", os::available_memory() >> 10);
1807 
1808   st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1809   st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1810   st->cr();
1811 }
1812 
1813 void os::print_siginfo(outputStream *st, const void* siginfo) {
1814   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1815   st->print("siginfo:");
1816 
1817   char tmp[64];
1818   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1819     strcpy(tmp, "EXCEPTION_??");
1820   }
1821   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1822 
1823   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1824        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1825        er->NumberParameters >= 2) {
1826     switch (er->ExceptionInformation[0]) {
1827     case 0: st->print(", reading address"); break;
1828     case 1: st->print(", writing address"); break;
1829     case 8: st->print(", data execution prevention violation at address"); break;
1830     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1831                        er->ExceptionInformation[0]);
1832     }
1833     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1834   } else {
1835     int num = er->NumberParameters;
1836     if (num > 0) {
1837       st->print(", ExceptionInformation=");
1838       for (int i = 0; i < num; i++) {
1839         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1840       }
1841     }
1842   }
1843   st->cr();
1844 }
1845 
1846 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1847   // do nothing
1848 }
1849 
1850 static char saved_jvm_path[MAX_PATH] = {0};
1851 
1852 // Find the full path to the current module, jvm.dll
1853 void os::jvm_path(char *buf, jint buflen) {
1854   // Error checking.
1855   if (buflen < MAX_PATH) {
1856     assert(false, "must use a large-enough buffer");
1857     buf[0] = '\0';
1858     return;
1859   }
1860   // Lazy resolve the path to current module.
1861   if (saved_jvm_path[0] != 0) {
1862     strcpy(buf, saved_jvm_path);
1863     return;
1864   }
1865 
1866   buf[0] = '\0';
1867   if (Arguments::sun_java_launcher_is_altjvm()) {
1868     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1869     // for a JAVA_HOME environment variable and fix up the path so it
1870     // looks like jvm.dll is installed there (append a fake suffix
1871     // hotspot/jvm.dll).
1872     char* java_home_var = ::getenv("JAVA_HOME");
1873     if (java_home_var != NULL && java_home_var[0] != 0 &&
1874         strlen(java_home_var) < (size_t)buflen) {
1875       strncpy(buf, java_home_var, buflen);
1876 
1877       // determine if this is a legacy image or modules image
1878       // modules image doesn't have "jre" subdirectory
1879       size_t len = strlen(buf);
1880       char* jrebin_p = buf + len;
1881       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1882       if (0 != _access(buf, 0)) {
1883         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1884       }
1885       len = strlen(buf);
1886       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1887     }
1888   }
1889 
1890   if (buf[0] == '\0') {
1891     GetModuleFileName(vm_lib_handle, buf, buflen);
1892   }
1893   strncpy(saved_jvm_path, buf, MAX_PATH);
1894   saved_jvm_path[MAX_PATH - 1] = '\0';
1895 }
1896 
1897 
1898 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1899 #ifndef _WIN64
1900   st->print("_");
1901 #endif
1902 }
1903 
1904 
1905 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1906 #ifndef _WIN64
1907   st->print("@%d", args_size  * sizeof(int));
1908 #endif
1909 }
1910 
1911 // This method is a copy of JDK's sysGetLastErrorString
1912 // from src/windows/hpi/src/system_md.c
1913 
1914 size_t os::lasterror(char* buf, size_t len) {
1915   DWORD errval;
1916 
1917   if ((errval = GetLastError()) != 0) {
1918     // DOS error
1919     size_t n = (size_t)FormatMessage(
1920                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1921                                      NULL,
1922                                      errval,
1923                                      0,
1924                                      buf,
1925                                      (DWORD)len,
1926                                      NULL);
1927     if (n > 3) {
1928       // Drop final '.', CR, LF
1929       if (buf[n - 1] == '\n') n--;
1930       if (buf[n - 1] == '\r') n--;
1931       if (buf[n - 1] == '.') n--;
1932       buf[n] = '\0';
1933     }
1934     return n;
1935   }
1936 
1937   if (errno != 0) {
1938     // C runtime error that has no corresponding DOS error code
1939     const char* s = os::strerror(errno);
1940     size_t n = strlen(s);
1941     if (n >= len) n = len - 1;
1942     strncpy(buf, s, n);
1943     buf[n] = '\0';
1944     return n;
1945   }
1946 
1947   return 0;
1948 }
1949 
1950 int os::get_last_error() {
1951   DWORD error = GetLastError();
1952   if (error == 0) {
1953     error = errno;
1954   }
1955   return (int)error;
1956 }
1957 
1958 WindowsSemaphore::WindowsSemaphore(uint value) {
1959   _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
1960 
1961   guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError());
1962 }
1963 
1964 WindowsSemaphore::~WindowsSemaphore() {
1965   ::CloseHandle(_semaphore);
1966 }
1967 
1968 void WindowsSemaphore::signal(uint count) {
1969   if (count > 0) {
1970     BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
1971 
1972     assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError());
1973   }
1974 }
1975 
1976 void WindowsSemaphore::wait() {
1977   DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
1978   assert(ret != WAIT_FAILED,   "WaitForSingleObject failed with error code: %lu", GetLastError());
1979   assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret);
1980 }
1981 
1982 // sun.misc.Signal
1983 // NOTE that this is a workaround for an apparent kernel bug where if
1984 // a signal handler for SIGBREAK is installed then that signal handler
1985 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1986 // See bug 4416763.
1987 static void (*sigbreakHandler)(int) = NULL;
1988 
1989 static void UserHandler(int sig, void *siginfo, void *context) {
1990   os::signal_notify(sig);
1991   // We need to reinstate the signal handler each time...
1992   os::signal(sig, (void*)UserHandler);
1993 }
1994 
1995 void* os::user_handler() {
1996   return (void*) UserHandler;
1997 }
1998 
1999 void* os::signal(int signal_number, void* handler) {
2000   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
2001     void (*oldHandler)(int) = sigbreakHandler;
2002     sigbreakHandler = (void (*)(int)) handler;
2003     return (void*) oldHandler;
2004   } else {
2005     return (void*)::signal(signal_number, (void (*)(int))handler);
2006   }
2007 }
2008 
2009 void os::signal_raise(int signal_number) {
2010   raise(signal_number);
2011 }
2012 
2013 // The Win32 C runtime library maps all console control events other than ^C
2014 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2015 // logoff, and shutdown events.  We therefore install our own console handler
2016 // that raises SIGTERM for the latter cases.
2017 //
2018 static BOOL WINAPI consoleHandler(DWORD event) {
2019   switch (event) {
2020   case CTRL_C_EVENT:
2021     if (is_error_reported()) {
2022       // Ctrl-C is pressed during error reporting, likely because the error
2023       // handler fails to abort. Let VM die immediately.
2024       os::die();
2025     }
2026 
2027     os::signal_raise(SIGINT);
2028     return TRUE;
2029     break;
2030   case CTRL_BREAK_EVENT:
2031     if (sigbreakHandler != NULL) {
2032       (*sigbreakHandler)(SIGBREAK);
2033     }
2034     return TRUE;
2035     break;
2036   case CTRL_LOGOFF_EVENT: {
2037     // Don't terminate JVM if it is running in a non-interactive session,
2038     // such as a service process.
2039     USEROBJECTFLAGS flags;
2040     HANDLE handle = GetProcessWindowStation();
2041     if (handle != NULL &&
2042         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2043         sizeof(USEROBJECTFLAGS), NULL)) {
2044       // If it is a non-interactive session, let next handler to deal
2045       // with it.
2046       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2047         return FALSE;
2048       }
2049     }
2050   }
2051   case CTRL_CLOSE_EVENT:
2052   case CTRL_SHUTDOWN_EVENT:
2053     os::signal_raise(SIGTERM);
2054     return TRUE;
2055     break;
2056   default:
2057     break;
2058   }
2059   return FALSE;
2060 }
2061 
2062 // The following code is moved from os.cpp for making this
2063 // code platform specific, which it is by its very nature.
2064 
2065 // Return maximum OS signal used + 1 for internal use only
2066 // Used as exit signal for signal_thread
2067 int os::sigexitnum_pd() {
2068   return NSIG;
2069 }
2070 
2071 // a counter for each possible signal value, including signal_thread exit signal
2072 static volatile jint pending_signals[NSIG+1] = { 0 };
2073 static HANDLE sig_sem = NULL;
2074 
2075 void os::signal_init_pd() {
2076   // Initialize signal structures
2077   memset((void*)pending_signals, 0, sizeof(pending_signals));
2078 
2079   sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2080 
2081   // Programs embedding the VM do not want it to attempt to receive
2082   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2083   // shutdown hooks mechanism introduced in 1.3.  For example, when
2084   // the VM is run as part of a Windows NT service (i.e., a servlet
2085   // engine in a web server), the correct behavior is for any console
2086   // control handler to return FALSE, not TRUE, because the OS's
2087   // "final" handler for such events allows the process to continue if
2088   // it is a service (while terminating it if it is not a service).
2089   // To make this behavior uniform and the mechanism simpler, we
2090   // completely disable the VM's usage of these console events if -Xrs
2091   // (=ReduceSignalUsage) is specified.  This means, for example, that
2092   // the CTRL-BREAK thread dump mechanism is also disabled in this
2093   // case.  See bugs 4323062, 4345157, and related bugs.
2094 
2095   if (!ReduceSignalUsage) {
2096     // Add a CTRL-C handler
2097     SetConsoleCtrlHandler(consoleHandler, TRUE);
2098   }
2099 }
2100 
2101 void os::signal_notify(int signal_number) {
2102   BOOL ret;
2103   if (sig_sem != NULL) {
2104     Atomic::inc(&pending_signals[signal_number]);
2105     ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2106     assert(ret != 0, "ReleaseSemaphore() failed");
2107   }
2108 }
2109 
2110 static int check_pending_signals(bool wait_for_signal) {
2111   DWORD ret;
2112   while (true) {
2113     for (int i = 0; i < NSIG + 1; i++) {
2114       jint n = pending_signals[i];
2115       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2116         return i;
2117       }
2118     }
2119     if (!wait_for_signal) {
2120       return -1;
2121     }
2122 
2123     JavaThread *thread = JavaThread::current();
2124 
2125     ThreadBlockInVM tbivm(thread);
2126 
2127     bool threadIsSuspended;
2128     do {
2129       thread->set_suspend_equivalent();
2130       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2131       ret = ::WaitForSingleObject(sig_sem, INFINITE);
2132       assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2133 
2134       // were we externally suspended while we were waiting?
2135       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2136       if (threadIsSuspended) {
2137         // The semaphore has been incremented, but while we were waiting
2138         // another thread suspended us. We don't want to continue running
2139         // while suspended because that would surprise the thread that
2140         // suspended us.
2141         ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2142         assert(ret != 0, "ReleaseSemaphore() failed");
2143 
2144         thread->java_suspend_self();
2145       }
2146     } while (threadIsSuspended);
2147   }
2148 }
2149 
2150 int os::signal_lookup() {
2151   return check_pending_signals(false);
2152 }
2153 
2154 int os::signal_wait() {
2155   return check_pending_signals(true);
2156 }
2157 
2158 // Implicit OS exception handling
2159 
2160 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2161                       address handler) {
2162     JavaThread* thread = (JavaThread*) Thread::current_or_null();
2163   // Save pc in thread
2164 #ifdef _M_IA64
2165   // Do not blow up if no thread info available.
2166   if (thread) {
2167     // Saving PRECISE pc (with slot information) in thread.
2168     uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2169     // Convert precise PC into "Unix" format
2170     precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2171     thread->set_saved_exception_pc((address)precise_pc);
2172   }
2173   // Set pc to handler
2174   exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2175   // Clear out psr.ri (= Restart Instruction) in order to continue
2176   // at the beginning of the target bundle.
2177   exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2178   assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2179 #else
2180   #ifdef _M_AMD64
2181   // Do not blow up if no thread info available.
2182   if (thread) {
2183     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2184   }
2185   // Set pc to handler
2186   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2187   #else
2188   // Do not blow up if no thread info available.
2189   if (thread) {
2190     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2191   }
2192   // Set pc to handler
2193   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2194   #endif
2195 #endif
2196 
2197   // Continue the execution
2198   return EXCEPTION_CONTINUE_EXECUTION;
2199 }
2200 
2201 
2202 // Used for PostMortemDump
2203 extern "C" void safepoints();
2204 extern "C" void find(int x);
2205 extern "C" void events();
2206 
2207 // According to Windows API documentation, an illegal instruction sequence should generate
2208 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2209 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2210 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2211 
2212 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2213 
2214 // From "Execution Protection in the Windows Operating System" draft 0.35
2215 // Once a system header becomes available, the "real" define should be
2216 // included or copied here.
2217 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2218 
2219 // Handle NAT Bit consumption on IA64.
2220 #ifdef _M_IA64
2221   #define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
2222 #endif
2223 
2224 // Windows Vista/2008 heap corruption check
2225 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2226 
2227 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2228 // C++ compiler contain this error code. Because this is a compiler-generated
2229 // error, the code is not listed in the Win32 API header files.
2230 // The code is actually a cryptic mnemonic device, with the initial "E"
2231 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2232 // ASCII values of "msc".
2233 
2234 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2235 
2236 #define def_excpt(val) { #val, (val) }
2237 
2238 static const struct { char* name; uint number; } exceptlabels[] = {
2239     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2240     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2241     def_excpt(EXCEPTION_BREAKPOINT),
2242     def_excpt(EXCEPTION_SINGLE_STEP),
2243     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2244     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2245     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2246     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2247     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2248     def_excpt(EXCEPTION_FLT_OVERFLOW),
2249     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2250     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2251     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2252     def_excpt(EXCEPTION_INT_OVERFLOW),
2253     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2254     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2255     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2256     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2257     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2258     def_excpt(EXCEPTION_STACK_OVERFLOW),
2259     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2260     def_excpt(EXCEPTION_GUARD_PAGE),
2261     def_excpt(EXCEPTION_INVALID_HANDLE),
2262     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2263     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2264 #ifdef _M_IA64
2265     , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION)
2266 #endif
2267 };
2268 
2269 #undef def_excpt
2270 
2271 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2272   uint code = static_cast<uint>(exception_code);
2273   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2274     if (exceptlabels[i].number == code) {
2275       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2276       return buf;
2277     }
2278   }
2279 
2280   return NULL;
2281 }
2282 
2283 //-----------------------------------------------------------------------------
2284 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2285   // handle exception caused by idiv; should only happen for -MinInt/-1
2286   // (division by zero is handled explicitly)
2287 #ifdef _M_IA64
2288   assert(0, "Fix Handle_IDiv_Exception");
2289 #else
2290   #ifdef  _M_AMD64
2291   PCONTEXT ctx = exceptionInfo->ContextRecord;
2292   address pc = (address)ctx->Rip;
2293   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2294   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2295   if (pc[0] == 0xF7) {
2296     // set correct result values and continue after idiv instruction
2297     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2298   } else {
2299     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2300   }
2301   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2302   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2303   // idiv opcode (0xF7).
2304   ctx->Rdx = (DWORD)0;             // remainder
2305   // Continue the execution
2306   #else
2307   PCONTEXT ctx = exceptionInfo->ContextRecord;
2308   address pc = (address)ctx->Eip;
2309   assert(pc[0] == 0xF7, "not an idiv opcode");
2310   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2311   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2312   // set correct result values and continue after idiv instruction
2313   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2314   ctx->Eax = (DWORD)min_jint;      // result
2315   ctx->Edx = (DWORD)0;             // remainder
2316   // Continue the execution
2317   #endif
2318 #endif
2319   return EXCEPTION_CONTINUE_EXECUTION;
2320 }
2321 
2322 //-----------------------------------------------------------------------------
2323 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2324   PCONTEXT ctx = exceptionInfo->ContextRecord;
2325 #ifndef  _WIN64
2326   // handle exception caused by native method modifying control word
2327   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2328 
2329   switch (exception_code) {
2330   case EXCEPTION_FLT_DENORMAL_OPERAND:
2331   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2332   case EXCEPTION_FLT_INEXACT_RESULT:
2333   case EXCEPTION_FLT_INVALID_OPERATION:
2334   case EXCEPTION_FLT_OVERFLOW:
2335   case EXCEPTION_FLT_STACK_CHECK:
2336   case EXCEPTION_FLT_UNDERFLOW:
2337     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2338     if (fp_control_word != ctx->FloatSave.ControlWord) {
2339       // Restore FPCW and mask out FLT exceptions
2340       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2341       // Mask out pending FLT exceptions
2342       ctx->FloatSave.StatusWord &=  0xffffff00;
2343       return EXCEPTION_CONTINUE_EXECUTION;
2344     }
2345   }
2346 
2347   if (prev_uef_handler != NULL) {
2348     // We didn't handle this exception so pass it to the previous
2349     // UnhandledExceptionFilter.
2350     return (prev_uef_handler)(exceptionInfo);
2351   }
2352 #else // !_WIN64
2353   // On Windows, the mxcsr control bits are non-volatile across calls
2354   // See also CR 6192333
2355   //
2356   jint MxCsr = INITIAL_MXCSR;
2357   // we can't use StubRoutines::addr_mxcsr_std()
2358   // because in Win64 mxcsr is not saved there
2359   if (MxCsr != ctx->MxCsr) {
2360     ctx->MxCsr = MxCsr;
2361     return EXCEPTION_CONTINUE_EXECUTION;
2362   }
2363 #endif // !_WIN64
2364 
2365   return EXCEPTION_CONTINUE_SEARCH;
2366 }
2367 
2368 static inline void report_error(Thread* t, DWORD exception_code,
2369                                 address addr, void* siginfo, void* context) {
2370   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2371 
2372   // If UseOsErrorReporting, this will return here and save the error file
2373   // somewhere where we can find it in the minidump.
2374 }
2375 
2376 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2377         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2378   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2379   address addr = (address) exceptionRecord->ExceptionInformation[1];
2380   if (Interpreter::contains(pc)) {
2381     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2382     if (!fr->is_first_java_frame()) {
2383       // get_frame_at_stack_banging_point() is only called when we
2384       // have well defined stacks so java_sender() calls do not need
2385       // to assert safe_for_sender() first.
2386       *fr = fr->java_sender();
2387     }
2388   } else {
2389     // more complex code with compiled code
2390     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2391     CodeBlob* cb = CodeCache::find_blob(pc);
2392     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2393       // Not sure where the pc points to, fallback to default
2394       // stack overflow handling
2395       return false;
2396     } else {
2397       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2398       // in compiled code, the stack banging is performed just after the return pc
2399       // has been pushed on the stack
2400       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2401       if (!fr->is_java_frame()) {
2402         // See java_sender() comment above.
2403         *fr = fr->java_sender();
2404       }
2405     }
2406   }
2407   assert(fr->is_java_frame(), "Safety check");
2408   return true;
2409 }
2410 
2411 //-----------------------------------------------------------------------------
2412 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2413   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2414   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2415 #ifdef _M_IA64
2416   // On Itanium, we need the "precise pc", which has the slot number coded
2417   // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2418   address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2419   // Convert the pc to "Unix format", which has the slot number coded
2420   // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2421   // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2422   // information is saved in the Unix format.
2423   address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2424 #else
2425   #ifdef _M_AMD64
2426   address pc = (address) exceptionInfo->ContextRecord->Rip;
2427   #else
2428   address pc = (address) exceptionInfo->ContextRecord->Eip;
2429   #endif
2430 #endif
2431   Thread* t = Thread::current_or_null_safe();
2432 
2433   // Handle SafeFetch32 and SafeFetchN exceptions.
2434   if (StubRoutines::is_safefetch_fault(pc)) {
2435     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2436   }
2437 
2438 #ifndef _WIN64
2439   // Execution protection violation - win32 running on AMD64 only
2440   // Handled first to avoid misdiagnosis as a "normal" access violation;
2441   // This is safe to do because we have a new/unique ExceptionInformation
2442   // code for this condition.
2443   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2444     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2445     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2446     address addr = (address) exceptionRecord->ExceptionInformation[1];
2447 
2448     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2449       int page_size = os::vm_page_size();
2450 
2451       // Make sure the pc and the faulting address are sane.
2452       //
2453       // If an instruction spans a page boundary, and the page containing
2454       // the beginning of the instruction is executable but the following
2455       // page is not, the pc and the faulting address might be slightly
2456       // different - we still want to unguard the 2nd page in this case.
2457       //
2458       // 15 bytes seems to be a (very) safe value for max instruction size.
2459       bool pc_is_near_addr =
2460         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2461       bool instr_spans_page_boundary =
2462         (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2463                          (intptr_t) page_size) > 0);
2464 
2465       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2466         static volatile address last_addr =
2467           (address) os::non_memory_address_word();
2468 
2469         // In conservative mode, don't unguard unless the address is in the VM
2470         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2471             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2472 
2473           // Set memory to RWX and retry
2474           address page_start =
2475             (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2476           bool res = os::protect_memory((char*) page_start, page_size,
2477                                         os::MEM_PROT_RWX);
2478 
2479           log_debug(os)("Execution protection violation "
2480                         "at " INTPTR_FORMAT
2481                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2482                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2483 
2484           // Set last_addr so if we fault again at the same address, we don't
2485           // end up in an endless loop.
2486           //
2487           // There are two potential complications here.  Two threads trapping
2488           // at the same address at the same time could cause one of the
2489           // threads to think it already unguarded, and abort the VM.  Likely
2490           // very rare.
2491           //
2492           // The other race involves two threads alternately trapping at
2493           // different addresses and failing to unguard the page, resulting in
2494           // an endless loop.  This condition is probably even more unlikely
2495           // than the first.
2496           //
2497           // Although both cases could be avoided by using locks or thread
2498           // local last_addr, these solutions are unnecessary complication:
2499           // this handler is a best-effort safety net, not a complete solution.
2500           // It is disabled by default and should only be used as a workaround
2501           // in case we missed any no-execute-unsafe VM code.
2502 
2503           last_addr = addr;
2504 
2505           return EXCEPTION_CONTINUE_EXECUTION;
2506         }
2507       }
2508 
2509       // Last unguard failed or not unguarding
2510       tty->print_raw_cr("Execution protection violation");
2511       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2512                    exceptionInfo->ContextRecord);
2513       return EXCEPTION_CONTINUE_SEARCH;
2514     }
2515   }
2516 #endif // _WIN64
2517 
2518   // Check to see if we caught the safepoint code in the
2519   // process of write protecting the memory serialization page.
2520   // It write enables the page immediately after protecting it
2521   // so just return.
2522   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2523     if (t != NULL && t->is_Java_thread()) {
2524       JavaThread* thread = (JavaThread*) t;
2525       PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2526       address addr = (address) exceptionRecord->ExceptionInformation[1];
2527       if (os::is_memory_serialize_page(thread, addr)) {
2528         // Block current thread until the memory serialize page permission restored.
2529         os::block_on_serialize_page_trap();
2530         return EXCEPTION_CONTINUE_EXECUTION;
2531       }
2532     }
2533   }
2534 
2535   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2536       VM_Version::is_cpuinfo_segv_addr(pc)) {
2537     // Verify that OS save/restore AVX registers.
2538     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2539   }
2540 
2541   if (t != NULL && t->is_Java_thread()) {
2542     JavaThread* thread = (JavaThread*) t;
2543     bool in_java = thread->thread_state() == _thread_in_Java;
2544 
2545     // Handle potential stack overflows up front.
2546     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2547 #ifdef _M_IA64
2548       // Use guard page for register stack.
2549       PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2550       address addr = (address) exceptionRecord->ExceptionInformation[1];
2551       // Check for a register stack overflow on Itanium
2552       if (thread->addr_inside_register_stack_red_zone(addr)) {
2553         // Fatal red zone violation happens if the Java program
2554         // catches a StackOverflow error and does so much processing
2555         // that it runs beyond the unprotected yellow guard zone. As
2556         // a result, we are out of here.
2557         fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2558       } else if(thread->addr_inside_register_stack(addr)) {
2559         // Disable the yellow zone which sets the state that
2560         // we've got a stack overflow problem.
2561         if (thread->stack_yellow_reserved_zone_enabled()) {
2562           thread->disable_stack_yellow_reserved_zone();
2563         }
2564         // Give us some room to process the exception.
2565         thread->disable_register_stack_guard();
2566         // Tracing with +Verbose.
2567         if (Verbose) {
2568           tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2569           tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2570           tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2571           tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2572                         thread->register_stack_base(),
2573                         thread->register_stack_base() + thread->stack_size());
2574         }
2575 
2576         // Reguard the permanent register stack red zone just to be sure.
2577         // We saw Windows silently disabling this without telling us.
2578         thread->enable_register_stack_red_zone();
2579 
2580         return Handle_Exception(exceptionInfo,
2581                                 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2582       }
2583 #endif
2584       if (thread->stack_guards_enabled()) {
2585         if (in_java) {
2586           frame fr;
2587           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2588           address addr = (address) exceptionRecord->ExceptionInformation[1];
2589           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2590             assert(fr.is_java_frame(), "Must be a Java frame");
2591             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2592           }
2593         }
2594         // Yellow zone violation.  The o/s has unprotected the first yellow
2595         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2596         // update the enabled status, even if the zone contains only one page.
2597         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2598         thread->disable_stack_yellow_reserved_zone();
2599         // If not in java code, return and hope for the best.
2600         return in_java
2601             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2602             :  EXCEPTION_CONTINUE_EXECUTION;
2603       } else {
2604         // Fatal red zone violation.
2605         thread->disable_stack_red_zone();
2606         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2607         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2608                       exceptionInfo->ContextRecord);
2609         return EXCEPTION_CONTINUE_SEARCH;
2610       }
2611     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2612       // Either stack overflow or null pointer exception.
2613       if (in_java) {
2614         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2615         address addr = (address) exceptionRecord->ExceptionInformation[1];
2616         address stack_end = thread->stack_end();
2617         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2618           // Stack overflow.
2619           assert(!os::uses_stack_guard_pages(),
2620                  "should be caught by red zone code above.");
2621           return Handle_Exception(exceptionInfo,
2622                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2623         }
2624         // Check for safepoint polling and implicit null
2625         // We only expect null pointers in the stubs (vtable)
2626         // the rest are checked explicitly now.
2627         CodeBlob* cb = CodeCache::find_blob(pc);
2628         if (cb != NULL) {
2629           if (os::is_poll_address(addr)) {
2630             address stub = SharedRuntime::get_poll_stub(pc);
2631             return Handle_Exception(exceptionInfo, stub);
2632           }
2633         }
2634         {
2635 #ifdef _WIN64
2636           // If it's a legal stack address map the entire region in
2637           //
2638           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2639           address addr = (address) exceptionRecord->ExceptionInformation[1];
2640           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2641             addr = (address)((uintptr_t)addr &
2642                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2643             os::commit_memory((char *)addr, thread->stack_base() - addr,
2644                               !ExecMem);
2645             return EXCEPTION_CONTINUE_EXECUTION;
2646           } else
2647 #endif
2648           {
2649             // Null pointer exception.
2650 #ifdef _M_IA64
2651             // Process implicit null checks in compiled code. Note: Implicit null checks
2652             // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2653             if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2654               CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2655               // Handle implicit null check in UEP method entry
2656               if (cb && (cb->is_frame_complete_at(pc) ||
2657                          (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2658                 if (Verbose) {
2659                   intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2660                   tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2661                   tty->print_cr("      to addr " INTPTR_FORMAT, addr);
2662                   tty->print_cr("      bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2663                                 *(bundle_start + 1), *bundle_start);
2664                 }
2665                 return Handle_Exception(exceptionInfo,
2666                                         SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2667               }
2668             }
2669 
2670             // Implicit null checks were processed above.  Hence, we should not reach
2671             // here in the usual case => die!
2672             if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2673             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2674                          exceptionInfo->ContextRecord);
2675             return EXCEPTION_CONTINUE_SEARCH;
2676 
2677 #else // !IA64
2678 
2679             if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
2680               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2681               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2682             }
2683             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2684                          exceptionInfo->ContextRecord);
2685             return EXCEPTION_CONTINUE_SEARCH;
2686 #endif
2687           }
2688         }
2689       }
2690 
2691 #ifdef _WIN64
2692       // Special care for fast JNI field accessors.
2693       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2694       // in and the heap gets shrunk before the field access.
2695       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2696         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2697         if (addr != (address)-1) {
2698           return Handle_Exception(exceptionInfo, addr);
2699         }
2700       }
2701 #endif
2702 
2703       // Stack overflow or null pointer exception in native code.
2704       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2705                    exceptionInfo->ContextRecord);
2706       return EXCEPTION_CONTINUE_SEARCH;
2707     } // /EXCEPTION_ACCESS_VIOLATION
2708     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2709 #if defined _M_IA64
2710     else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2711               exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2712       M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2713 
2714       // Compiled method patched to be non entrant? Following conditions must apply:
2715       // 1. must be first instruction in bundle
2716       // 2. must be a break instruction with appropriate code
2717       if ((((uint64_t) pc & 0x0F) == 0) &&
2718           (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2719         return Handle_Exception(exceptionInfo,
2720                                 (address)SharedRuntime::get_handle_wrong_method_stub());
2721       }
2722     } // /EXCEPTION_ILLEGAL_INSTRUCTION
2723 #endif
2724 
2725 
2726     if (in_java) {
2727       switch (exception_code) {
2728       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2729         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2730 
2731       case EXCEPTION_INT_OVERFLOW:
2732         return Handle_IDiv_Exception(exceptionInfo);
2733 
2734       } // switch
2735     }
2736     if (((thread->thread_state() == _thread_in_Java) ||
2737          (thread->thread_state() == _thread_in_native)) &&
2738          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2739       LONG result=Handle_FLT_Exception(exceptionInfo);
2740       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2741     }
2742   }
2743 
2744   if (exception_code != EXCEPTION_BREAKPOINT) {
2745     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2746                  exceptionInfo->ContextRecord);
2747   }
2748   return EXCEPTION_CONTINUE_SEARCH;
2749 }
2750 
2751 #ifndef _WIN64
2752 // Special care for fast JNI accessors.
2753 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2754 // the heap gets shrunk before the field access.
2755 // Need to install our own structured exception handler since native code may
2756 // install its own.
2757 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2758   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2759   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2760     address pc = (address) exceptionInfo->ContextRecord->Eip;
2761     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2762     if (addr != (address)-1) {
2763       return Handle_Exception(exceptionInfo, addr);
2764     }
2765   }
2766   return EXCEPTION_CONTINUE_SEARCH;
2767 }
2768 
2769 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2770   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2771                                                      jobject obj,           \
2772                                                      jfieldID fieldID) {    \
2773     __try {                                                                 \
2774       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2775                                                                  obj,       \
2776                                                                  fieldID);  \
2777     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2778                                               _exception_info())) {         \
2779     }                                                                       \
2780     return 0;                                                               \
2781   }
2782 
2783 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2784 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2785 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2786 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2787 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2788 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2789 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2790 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2791 
2792 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2793   switch (type) {
2794   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2795   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2796   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2797   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2798   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2799   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2800   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2801   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2802   default:        ShouldNotReachHere();
2803   }
2804   return (address)-1;
2805 }
2806 #endif
2807 
2808 // Virtual Memory
2809 
2810 int os::vm_page_size() { return os::win32::vm_page_size(); }
2811 int os::vm_allocation_granularity() {
2812   return os::win32::vm_allocation_granularity();
2813 }
2814 
2815 // Windows large page support is available on Windows 2003. In order to use
2816 // large page memory, the administrator must first assign additional privilege
2817 // to the user:
2818 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2819 //   + select Local Policies -> User Rights Assignment
2820 //   + double click "Lock pages in memory", add users and/or groups
2821 //   + reboot
2822 // Note the above steps are needed for administrator as well, as administrators
2823 // by default do not have the privilege to lock pages in memory.
2824 //
2825 // Note about Windows 2003: although the API supports committing large page
2826 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2827 // scenario, I found through experiment it only uses large page if the entire
2828 // memory region is reserved and committed in a single VirtualAlloc() call.
2829 // This makes Windows large page support more or less like Solaris ISM, in
2830 // that the entire heap must be committed upfront. This probably will change
2831 // in the future, if so the code below needs to be revisited.
2832 
2833 #ifndef MEM_LARGE_PAGES
2834   #define MEM_LARGE_PAGES 0x20000000
2835 #endif
2836 
2837 static HANDLE    _hProcess;
2838 static HANDLE    _hToken;
2839 
2840 // Container for NUMA node list info
2841 class NUMANodeListHolder {
2842  private:
2843   int *_numa_used_node_list;  // allocated below
2844   int _numa_used_node_count;
2845 
2846   void free_node_list() {
2847     if (_numa_used_node_list != NULL) {
2848       FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2849     }
2850   }
2851 
2852  public:
2853   NUMANodeListHolder() {
2854     _numa_used_node_count = 0;
2855     _numa_used_node_list = NULL;
2856     // do rest of initialization in build routine (after function pointers are set up)
2857   }
2858 
2859   ~NUMANodeListHolder() {
2860     free_node_list();
2861   }
2862 
2863   bool build() {
2864     DWORD_PTR proc_aff_mask;
2865     DWORD_PTR sys_aff_mask;
2866     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2867     ULONG highest_node_number;
2868     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2869     free_node_list();
2870     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2871     for (unsigned int i = 0; i <= highest_node_number; i++) {
2872       ULONGLONG proc_mask_numa_node;
2873       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2874       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2875         _numa_used_node_list[_numa_used_node_count++] = i;
2876       }
2877     }
2878     return (_numa_used_node_count > 1);
2879   }
2880 
2881   int get_count() { return _numa_used_node_count; }
2882   int get_node_list_entry(int n) {
2883     // for indexes out of range, returns -1
2884     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2885   }
2886 
2887 } numa_node_list_holder;
2888 
2889 
2890 
2891 static size_t _large_page_size = 0;
2892 
2893 static bool request_lock_memory_privilege() {
2894   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2895                           os::current_process_id());
2896 
2897   LUID luid;
2898   if (_hProcess != NULL &&
2899       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2900       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2901 
2902     TOKEN_PRIVILEGES tp;
2903     tp.PrivilegeCount = 1;
2904     tp.Privileges[0].Luid = luid;
2905     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2906 
2907     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2908     // privilege. Check GetLastError() too. See MSDN document.
2909     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2910         (GetLastError() == ERROR_SUCCESS)) {
2911       return true;
2912     }
2913   }
2914 
2915   return false;
2916 }
2917 
2918 static void cleanup_after_large_page_init() {
2919   if (_hProcess) CloseHandle(_hProcess);
2920   _hProcess = NULL;
2921   if (_hToken) CloseHandle(_hToken);
2922   _hToken = NULL;
2923 }
2924 
2925 static bool numa_interleaving_init() {
2926   bool success = false;
2927   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2928 
2929   // print a warning if UseNUMAInterleaving flag is specified on command line
2930   bool warn_on_failure = use_numa_interleaving_specified;
2931 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2932 
2933   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2934   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2935   NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2936 
2937   if (numa_node_list_holder.build()) {
2938     if (log_is_enabled(Debug, os, cpu)) {
2939       Log(os, cpu) log;
2940       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2941       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2942         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2943       }
2944     }
2945     success = true;
2946   } else {
2947     WARN("Process does not cover multiple NUMA nodes.");
2948   }
2949   if (!success) {
2950     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2951   }
2952   return success;
2953 #undef WARN
2954 }
2955 
2956 // this routine is used whenever we need to reserve a contiguous VA range
2957 // but we need to make separate VirtualAlloc calls for each piece of the range
2958 // Reasons for doing this:
2959 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2960 //  * UseNUMAInterleaving requires a separate node for each piece
2961 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2962                                          DWORD prot,
2963                                          bool should_inject_error = false) {
2964   char * p_buf;
2965   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2966   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2967   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2968 
2969   // first reserve enough address space in advance since we want to be
2970   // able to break a single contiguous virtual address range into multiple
2971   // large page commits but WS2003 does not allow reserving large page space
2972   // so we just use 4K pages for reserve, this gives us a legal contiguous
2973   // address space. then we will deallocate that reservation, and re alloc
2974   // using large pages
2975   const size_t size_of_reserve = bytes + chunk_size;
2976   if (bytes > size_of_reserve) {
2977     // Overflowed.
2978     return NULL;
2979   }
2980   p_buf = (char *) VirtualAlloc(addr,
2981                                 size_of_reserve,  // size of Reserve
2982                                 MEM_RESERVE,
2983                                 PAGE_READWRITE);
2984   // If reservation failed, return NULL
2985   if (p_buf == NULL) return NULL;
2986   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2987   os::release_memory(p_buf, bytes + chunk_size);
2988 
2989   // we still need to round up to a page boundary (in case we are using large pages)
2990   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2991   // instead we handle this in the bytes_to_rq computation below
2992   p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2993 
2994   // now go through and allocate one chunk at a time until all bytes are
2995   // allocated
2996   size_t  bytes_remaining = bytes;
2997   // An overflow of align_size_up() would have been caught above
2998   // in the calculation of size_of_reserve.
2999   char * next_alloc_addr = p_buf;
3000   HANDLE hProc = GetCurrentProcess();
3001 
3002 #ifdef ASSERT
3003   // Variable for the failure injection
3004   long ran_num = os::random();
3005   size_t fail_after = ran_num % bytes;
3006 #endif
3007 
3008   int count=0;
3009   while (bytes_remaining) {
3010     // select bytes_to_rq to get to the next chunk_size boundary
3011 
3012     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
3013     // Note allocate and commit
3014     char * p_new;
3015 
3016 #ifdef ASSERT
3017     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3018 #else
3019     const bool inject_error_now = false;
3020 #endif
3021 
3022     if (inject_error_now) {
3023       p_new = NULL;
3024     } else {
3025       if (!UseNUMAInterleaving) {
3026         p_new = (char *) VirtualAlloc(next_alloc_addr,
3027                                       bytes_to_rq,
3028                                       flags,
3029                                       prot);
3030       } else {
3031         // get the next node to use from the used_node_list
3032         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3033         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3034         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3035       }
3036     }
3037 
3038     if (p_new == NULL) {
3039       // Free any allocated pages
3040       if (next_alloc_addr > p_buf) {
3041         // Some memory was committed so release it.
3042         size_t bytes_to_release = bytes - bytes_remaining;
3043         // NMT has yet to record any individual blocks, so it
3044         // need to create a dummy 'reserve' record to match
3045         // the release.
3046         MemTracker::record_virtual_memory_reserve((address)p_buf,
3047                                                   bytes_to_release, CALLER_PC);
3048         os::release_memory(p_buf, bytes_to_release);
3049       }
3050 #ifdef ASSERT
3051       if (should_inject_error) {
3052         log_develop_debug(pagesize)("Reserving pages individually failed.");
3053       }
3054 #endif
3055       return NULL;
3056     }
3057 
3058     bytes_remaining -= bytes_to_rq;
3059     next_alloc_addr += bytes_to_rq;
3060     count++;
3061   }
3062   // Although the memory is allocated individually, it is returned as one.
3063   // NMT records it as one block.
3064   if ((flags & MEM_COMMIT) != 0) {
3065     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3066   } else {
3067     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3068   }
3069 
3070   // made it this far, success
3071   return p_buf;
3072 }
3073 
3074 
3075 
3076 void os::large_page_init() {
3077   if (!UseLargePages) return;
3078 
3079   // print a warning if any large page related flag is specified on command line
3080   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3081                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3082   bool success = false;
3083 
3084 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3085   if (request_lock_memory_privilege()) {
3086     size_t s = GetLargePageMinimum();
3087     if (s) {
3088 #if defined(IA32) || defined(AMD64)
3089       if (s > 4*M || LargePageSizeInBytes > 4*M) {
3090         WARN("JVM cannot use large pages bigger than 4mb.");
3091       } else {
3092 #endif
3093         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3094           _large_page_size = LargePageSizeInBytes;
3095         } else {
3096           _large_page_size = s;
3097         }
3098         success = true;
3099 #if defined(IA32) || defined(AMD64)
3100       }
3101 #endif
3102     } else {
3103       WARN("Large page is not supported by the processor.");
3104     }
3105   } else {
3106     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3107   }
3108 #undef WARN
3109 
3110   const size_t default_page_size = (size_t) vm_page_size();
3111   if (success && _large_page_size > default_page_size) {
3112     _page_sizes[0] = _large_page_size;
3113     _page_sizes[1] = default_page_size;
3114     _page_sizes[2] = 0;
3115   }
3116 
3117   cleanup_after_large_page_init();
3118   UseLargePages = success;
3119 }
3120 
3121 // On win32, one cannot release just a part of reserved memory, it's an
3122 // all or nothing deal.  When we split a reservation, we must break the
3123 // reservation into two reservations.
3124 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3125                                   bool realloc) {
3126   if (size > 0) {
3127     release_memory(base, size);
3128     if (realloc) {
3129       reserve_memory(split, base);
3130     }
3131     if (size != split) {
3132       reserve_memory(size - split, base + split);
3133     }
3134   }
3135 }
3136 
3137 // Multiple threads can race in this code but it's not possible to unmap small sections of
3138 // virtual space to get requested alignment, like posix-like os's.
3139 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3140 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3141   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3142          "Alignment must be a multiple of allocation granularity (page size)");
3143   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3144 
3145   size_t extra_size = size + alignment;
3146   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3147 
3148   char* aligned_base = NULL;
3149 
3150   do {
3151     char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3152     if (extra_base == NULL) {
3153       return NULL;
3154     }
3155     // Do manual alignment
3156     aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3157 
3158     os::release_memory(extra_base, extra_size);
3159 
3160     aligned_base = os::reserve_memory(size, aligned_base);
3161 
3162   } while (aligned_base == NULL);
3163 
3164   return aligned_base;
3165 }
3166 
3167 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3168   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3169          "reserve alignment");
3170   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3171   char* res;
3172   // note that if UseLargePages is on, all the areas that require interleaving
3173   // will go thru reserve_memory_special rather than thru here.
3174   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3175   if (!use_individual) {
3176     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3177   } else {
3178     elapsedTimer reserveTimer;
3179     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3180     // in numa interleaving, we have to allocate pages individually
3181     // (well really chunks of NUMAInterleaveGranularity size)
3182     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3183     if (res == NULL) {
3184       warning("NUMA page allocation failed");
3185     }
3186     if (Verbose && PrintMiscellaneous) {
3187       reserveTimer.stop();
3188       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3189                     reserveTimer.milliseconds(), reserveTimer.ticks());
3190     }
3191   }
3192   assert(res == NULL || addr == NULL || addr == res,
3193          "Unexpected address from reserve.");
3194 
3195   return res;
3196 }
3197 
3198 // Reserve memory at an arbitrary address, only if that area is
3199 // available (and not reserved for something else).
3200 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3201   // Windows os::reserve_memory() fails of the requested address range is
3202   // not avilable.
3203   return reserve_memory(bytes, requested_addr);
3204 }
3205 
3206 size_t os::large_page_size() {
3207   return _large_page_size;
3208 }
3209 
3210 bool os::can_commit_large_page_memory() {
3211   // Windows only uses large page memory when the entire region is reserved
3212   // and committed in a single VirtualAlloc() call. This may change in the
3213   // future, but with Windows 2003 it's not possible to commit on demand.
3214   return false;
3215 }
3216 
3217 bool os::can_execute_large_page_memory() {
3218   return true;
3219 }
3220 
3221 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3222                                  bool exec) {
3223   assert(UseLargePages, "only for large pages");
3224 
3225   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3226     return NULL; // Fallback to small pages.
3227   }
3228 
3229   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3230   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3231 
3232   // with large pages, there are two cases where we need to use Individual Allocation
3233   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3234   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3235   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3236     log_debug(pagesize)("Reserving large pages individually.");
3237 
3238     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3239     if (p_buf == NULL) {
3240       // give an appropriate warning message
3241       if (UseNUMAInterleaving) {
3242         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3243       }
3244       if (UseLargePagesIndividualAllocation) {
3245         warning("Individually allocated large pages failed, "
3246                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3247       }
3248       return NULL;
3249     }
3250 
3251     return p_buf;
3252 
3253   } else {
3254     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3255 
3256     // normal policy just allocate it all at once
3257     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3258     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3259     if (res != NULL) {
3260       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3261     }
3262 
3263     return res;
3264   }
3265 }
3266 
3267 bool os::release_memory_special(char* base, size_t bytes) {
3268   assert(base != NULL, "Sanity check");
3269   return release_memory(base, bytes);
3270 }
3271 
3272 void os::print_statistics() {
3273 }
3274 
3275 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3276   int err = os::get_last_error();
3277   char buf[256];
3278   size_t buf_len = os::lasterror(buf, sizeof(buf));
3279   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3280           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3281           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3282 }
3283 
3284 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3285   if (bytes == 0) {
3286     // Don't bother the OS with noops.
3287     return true;
3288   }
3289   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3290   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3291   // Don't attempt to print anything if the OS call fails. We're
3292   // probably low on resources, so the print itself may cause crashes.
3293 
3294   // unless we have NUMAInterleaving enabled, the range of a commit
3295   // is always within a reserve covered by a single VirtualAlloc
3296   // in that case we can just do a single commit for the requested size
3297   if (!UseNUMAInterleaving) {
3298     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3299       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3300       return false;
3301     }
3302     if (exec) {
3303       DWORD oldprot;
3304       // Windows doc says to use VirtualProtect to get execute permissions
3305       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3306         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3307         return false;
3308       }
3309     }
3310     return true;
3311   } else {
3312 
3313     // when NUMAInterleaving is enabled, the commit might cover a range that
3314     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3315     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3316     // returns represents the number of bytes that can be committed in one step.
3317     size_t bytes_remaining = bytes;
3318     char * next_alloc_addr = addr;
3319     while (bytes_remaining > 0) {
3320       MEMORY_BASIC_INFORMATION alloc_info;
3321       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3322       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3323       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3324                        PAGE_READWRITE) == NULL) {
3325         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3326                                             exec);)
3327         return false;
3328       }
3329       if (exec) {
3330         DWORD oldprot;
3331         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3332                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3333           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3334                                               exec);)
3335           return false;
3336         }
3337       }
3338       bytes_remaining -= bytes_to_rq;
3339       next_alloc_addr += bytes_to_rq;
3340     }
3341   }
3342   // if we made it this far, return true
3343   return true;
3344 }
3345 
3346 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3347                           bool exec) {
3348   // alignment_hint is ignored on this OS
3349   return pd_commit_memory(addr, size, exec);
3350 }
3351 
3352 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3353                                   const char* mesg) {
3354   assert(mesg != NULL, "mesg must be specified");
3355   if (!pd_commit_memory(addr, size, exec)) {
3356     warn_fail_commit_memory(addr, size, exec);
3357     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3358   }
3359 }
3360 
3361 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3362                                   size_t alignment_hint, bool exec,
3363                                   const char* mesg) {
3364   // alignment_hint is ignored on this OS
3365   pd_commit_memory_or_exit(addr, size, exec, mesg);
3366 }
3367 
3368 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3369   if (bytes == 0) {
3370     // Don't bother the OS with noops.
3371     return true;
3372   }
3373   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3374   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3375   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3376 }
3377 
3378 bool os::pd_release_memory(char* addr, size_t bytes) {
3379   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3380 }
3381 
3382 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3383   return os::commit_memory(addr, size, !ExecMem);
3384 }
3385 
3386 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3387   return os::uncommit_memory(addr, size);
3388 }
3389 
3390 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3391   uint count = 0;
3392   bool ret = false;
3393   size_t bytes_remaining = bytes;
3394   char * next_protect_addr = addr;
3395 
3396   // Use VirtualQuery() to get the chunk size.
3397   while (bytes_remaining) {
3398     MEMORY_BASIC_INFORMATION alloc_info;
3399     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3400       return false;
3401     }
3402 
3403     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3404     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3405     // but we don't distinguish here as both cases are protected by same API.
3406     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3407     warning("Failed protecting pages individually for chunk #%u", count);
3408     if (!ret) {
3409       return false;
3410     }
3411 
3412     bytes_remaining -= bytes_to_protect;
3413     next_protect_addr += bytes_to_protect;
3414     count++;
3415   }
3416   return ret;
3417 }
3418 
3419 // Set protections specified
3420 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3421                         bool is_committed) {
3422   unsigned int p = 0;
3423   switch (prot) {
3424   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3425   case MEM_PROT_READ: p = PAGE_READONLY; break;
3426   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3427   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3428   default:
3429     ShouldNotReachHere();
3430   }
3431 
3432   DWORD old_status;
3433 
3434   // Strange enough, but on Win32 one can change protection only for committed
3435   // memory, not a big deal anyway, as bytes less or equal than 64K
3436   if (!is_committed) {
3437     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3438                           "cannot commit protection page");
3439   }
3440   // One cannot use os::guard_memory() here, as on Win32 guard page
3441   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3442   //
3443   // Pages in the region become guard pages. Any attempt to access a guard page
3444   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3445   // the guard page status. Guard pages thus act as a one-time access alarm.
3446   bool ret;
3447   if (UseNUMAInterleaving) {
3448     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3449     // so we must protect the chunks individually.
3450     ret = protect_pages_individually(addr, bytes, p, &old_status);
3451   } else {
3452     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3453   }
3454 #ifdef ASSERT
3455   if (!ret) {
3456     int err = os::get_last_error();
3457     char buf[256];
3458     size_t buf_len = os::lasterror(buf, sizeof(buf));
3459     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3460           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3461           buf_len != 0 ? buf : "<no_error_string>", err);
3462   }
3463 #endif
3464   return ret;
3465 }
3466 
3467 bool os::guard_memory(char* addr, size_t bytes) {
3468   DWORD old_status;
3469   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3470 }
3471 
3472 bool os::unguard_memory(char* addr, size_t bytes) {
3473   DWORD old_status;
3474   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3475 }
3476 
3477 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3478 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3479 void os::numa_make_global(char *addr, size_t bytes)    { }
3480 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3481 bool os::numa_topology_changed()                       { return false; }
3482 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3483 int os::numa_get_group_id()                            { return 0; }
3484 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3485   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3486     // Provide an answer for UMA systems
3487     ids[0] = 0;
3488     return 1;
3489   } else {
3490     // check for size bigger than actual groups_num
3491     size = MIN2(size, numa_get_groups_num());
3492     for (int i = 0; i < (int)size; i++) {
3493       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3494     }
3495     return size;
3496   }
3497 }
3498 
3499 bool os::get_page_info(char *start, page_info* info) {
3500   return false;
3501 }
3502 
3503 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3504                      page_info* page_found) {
3505   return end;
3506 }
3507 
3508 char* os::non_memory_address_word() {
3509   // Must never look like an address returned by reserve_memory,
3510   // even in its subfields (as defined by the CPU immediate fields,
3511   // if the CPU splits constants across multiple instructions).
3512   return (char*)-1;
3513 }
3514 
3515 #define MAX_ERROR_COUNT 100
3516 #define SYS_THREAD_ERROR 0xffffffffUL
3517 
3518 void os::pd_start_thread(Thread* thread) {
3519   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3520   // Returns previous suspend state:
3521   // 0:  Thread was not suspended
3522   // 1:  Thread is running now
3523   // >1: Thread is still suspended.
3524   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3525 }
3526 
3527 class HighResolutionInterval : public CHeapObj<mtThread> {
3528   // The default timer resolution seems to be 10 milliseconds.
3529   // (Where is this written down?)
3530   // If someone wants to sleep for only a fraction of the default,
3531   // then we set the timer resolution down to 1 millisecond for
3532   // the duration of their interval.
3533   // We carefully set the resolution back, since otherwise we
3534   // seem to incur an overhead (3%?) that we don't need.
3535   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3536   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3537   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3538   // timeBeginPeriod() if the relative error exceeded some threshold.
3539   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3540   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
3541   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3542   // resolution timers running.
3543  private:
3544   jlong resolution;
3545  public:
3546   HighResolutionInterval(jlong ms) {
3547     resolution = ms % 10L;
3548     if (resolution != 0) {
3549       MMRESULT result = timeBeginPeriod(1L);
3550     }
3551   }
3552   ~HighResolutionInterval() {
3553     if (resolution != 0) {
3554       MMRESULT result = timeEndPeriod(1L);
3555     }
3556     resolution = 0L;
3557   }
3558 };
3559 
3560 int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3561   jlong limit = (jlong) MAXDWORD;
3562 
3563   while (ms > limit) {
3564     int res;
3565     if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) {
3566       return res;
3567     }
3568     ms -= limit;
3569   }
3570 
3571   assert(thread == Thread::current(), "thread consistency check");
3572   OSThread* osthread = thread->osthread();
3573   OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3574   int result;
3575   if (interruptable) {
3576     assert(thread->is_Java_thread(), "must be java thread");
3577     JavaThread *jt = (JavaThread *) thread;
3578     ThreadBlockInVM tbivm(jt);
3579 
3580     jt->set_suspend_equivalent();
3581     // cleared by handle_special_suspend_equivalent_condition() or
3582     // java_suspend_self() via check_and_wait_while_suspended()
3583 
3584     HANDLE events[1];
3585     events[0] = osthread->interrupt_event();
3586     HighResolutionInterval *phri=NULL;
3587     if (!ForceTimeHighResolution) {
3588       phri = new HighResolutionInterval(ms);
3589     }
3590     if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3591       result = OS_TIMEOUT;
3592     } else {
3593       ResetEvent(osthread->interrupt_event());
3594       osthread->set_interrupted(false);
3595       result = OS_INTRPT;
3596     }
3597     delete phri; //if it is NULL, harmless
3598 
3599     // were we externally suspended while we were waiting?
3600     jt->check_and_wait_while_suspended();
3601   } else {
3602     assert(!thread->is_Java_thread(), "must not be java thread");
3603     Sleep((long) ms);
3604     result = OS_TIMEOUT;
3605   }
3606   return result;
3607 }
3608 
3609 // Short sleep, direct OS call.
3610 //
3611 // ms = 0, means allow others (if any) to run.
3612 //
3613 void os::naked_short_sleep(jlong ms) {
3614   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3615   Sleep(ms);
3616 }
3617 
3618 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3619 void os::infinite_sleep() {
3620   while (true) {    // sleep forever ...
3621     Sleep(100000);  // ... 100 seconds at a time
3622   }
3623 }
3624 
3625 typedef BOOL (WINAPI * STTSignature)(void);
3626 
3627 void os::naked_yield() {
3628   // Consider passing back the return value from SwitchToThread().
3629   SwitchToThread();
3630 }
3631 
3632 // Win32 only gives you access to seven real priorities at a time,
3633 // so we compress Java's ten down to seven.  It would be better
3634 // if we dynamically adjusted relative priorities.
3635 
3636 int os::java_to_os_priority[CriticalPriority + 1] = {
3637   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3638   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3639   THREAD_PRIORITY_LOWEST,                       // 2
3640   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3641   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3642   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3643   THREAD_PRIORITY_NORMAL,                       // 6
3644   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3645   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3646   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3647   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3648   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3649 };
3650 
3651 int prio_policy1[CriticalPriority + 1] = {
3652   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3653   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3654   THREAD_PRIORITY_LOWEST,                       // 2
3655   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3656   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3657   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3658   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3659   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3660   THREAD_PRIORITY_HIGHEST,                      // 8
3661   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3662   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3663   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3664 };
3665 
3666 static int prio_init() {
3667   // If ThreadPriorityPolicy is 1, switch tables
3668   if (ThreadPriorityPolicy == 1) {
3669     int i;
3670     for (i = 0; i < CriticalPriority + 1; i++) {
3671       os::java_to_os_priority[i] = prio_policy1[i];
3672     }
3673   }
3674   if (UseCriticalJavaThreadPriority) {
3675     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3676   }
3677   return 0;
3678 }
3679 
3680 OSReturn os::set_native_priority(Thread* thread, int priority) {
3681   if (!UseThreadPriorities) return OS_OK;
3682   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3683   return ret ? OS_OK : OS_ERR;
3684 }
3685 
3686 OSReturn os::get_native_priority(const Thread* const thread,
3687                                  int* priority_ptr) {
3688   if (!UseThreadPriorities) {
3689     *priority_ptr = java_to_os_priority[NormPriority];
3690     return OS_OK;
3691   }
3692   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3693   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3694     assert(false, "GetThreadPriority failed");
3695     return OS_ERR;
3696   }
3697   *priority_ptr = os_prio;
3698   return OS_OK;
3699 }
3700 
3701 
3702 // Hint to the underlying OS that a task switch would not be good.
3703 // Void return because it's a hint and can fail.
3704 void os::hint_no_preempt() {}
3705 
3706 void os::interrupt(Thread* thread) {
3707   assert(!thread->is_Java_thread() || Thread::current() == thread ||
3708          Threads_lock->owned_by_self(),
3709          "possibility of dangling Thread pointer");
3710 
3711   OSThread* osthread = thread->osthread();
3712   osthread->set_interrupted(true);
3713   // More than one thread can get here with the same value of osthread,
3714   // resulting in multiple notifications.  We do, however, want the store
3715   // to interrupted() to be visible to other threads before we post
3716   // the interrupt event.
3717   OrderAccess::release();
3718   SetEvent(osthread->interrupt_event());
3719   // For JSR166:  unpark after setting status
3720   if (thread->is_Java_thread()) {
3721     ((JavaThread*)thread)->parker()->unpark();
3722   }
3723 
3724   ParkEvent * ev = thread->_ParkEvent;
3725   if (ev != NULL) ev->unpark();
3726 }
3727 
3728 
3729 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3730   assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3731          "possibility of dangling Thread pointer");
3732 
3733   OSThread* osthread = thread->osthread();
3734   // There is no synchronization between the setting of the interrupt
3735   // and it being cleared here. It is critical - see 6535709 - that
3736   // we only clear the interrupt state, and reset the interrupt event,
3737   // if we are going to report that we were indeed interrupted - else
3738   // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3739   // depending on the timing. By checking thread interrupt event to see
3740   // if the thread gets real interrupt thus prevent spurious wakeup.
3741   bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3742   if (interrupted && clear_interrupted) {
3743     osthread->set_interrupted(false);
3744     ResetEvent(osthread->interrupt_event());
3745   } // Otherwise leave the interrupted state alone
3746 
3747   return interrupted;
3748 }
3749 
3750 // Get's a pc (hint) for a running thread. Currently used only for profiling.
3751 ExtendedPC os::get_thread_pc(Thread* thread) {
3752   CONTEXT context;
3753   context.ContextFlags = CONTEXT_CONTROL;
3754   HANDLE handle = thread->osthread()->thread_handle();
3755 #ifdef _M_IA64
3756   assert(0, "Fix get_thread_pc");
3757   return ExtendedPC(NULL);
3758 #else
3759   if (GetThreadContext(handle, &context)) {
3760 #ifdef _M_AMD64
3761     return ExtendedPC((address) context.Rip);
3762 #else
3763     return ExtendedPC((address) context.Eip);
3764 #endif
3765   } else {
3766     return ExtendedPC(NULL);
3767   }
3768 #endif
3769 }
3770 
3771 // GetCurrentThreadId() returns DWORD
3772 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3773 
3774 static int _initial_pid = 0;
3775 
3776 int os::current_process_id() {
3777   return (_initial_pid ? _initial_pid : _getpid());
3778 }
3779 
3780 int    os::win32::_vm_page_size              = 0;
3781 int    os::win32::_vm_allocation_granularity = 0;
3782 int    os::win32::_processor_type            = 0;
3783 // Processor level is not available on non-NT systems, use vm_version instead
3784 int    os::win32::_processor_level           = 0;
3785 julong os::win32::_physical_memory           = 0;
3786 size_t os::win32::_default_stack_size        = 0;
3787 
3788 intx          os::win32::_os_thread_limit    = 0;
3789 volatile intx os::win32::_os_thread_count    = 0;
3790 
3791 bool   os::win32::_is_windows_server         = false;
3792 
3793 // 6573254
3794 // Currently, the bug is observed across all the supported Windows releases,
3795 // including the latest one (as of this writing - Windows Server 2012 R2)
3796 bool   os::win32::_has_exit_bug              = true;
3797 
3798 void os::win32::initialize_system_info() {
3799   SYSTEM_INFO si;
3800   GetSystemInfo(&si);
3801   _vm_page_size    = si.dwPageSize;
3802   _vm_allocation_granularity = si.dwAllocationGranularity;
3803   _processor_type  = si.dwProcessorType;
3804   _processor_level = si.wProcessorLevel;
3805   set_processor_count(si.dwNumberOfProcessors);
3806 
3807   MEMORYSTATUSEX ms;
3808   ms.dwLength = sizeof(ms);
3809 
3810   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3811   // dwMemoryLoad (% of memory in use)
3812   GlobalMemoryStatusEx(&ms);
3813   _physical_memory = ms.ullTotalPhys;
3814 
3815   if (FLAG_IS_DEFAULT(MaxRAM)) {
3816     // Adjust MaxRAM according to the maximum virtual address space available.
3817     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3818   }
3819 
3820   OSVERSIONINFOEX oi;
3821   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3822   GetVersionEx((OSVERSIONINFO*)&oi);
3823   switch (oi.dwPlatformId) {
3824   case VER_PLATFORM_WIN32_NT:
3825     {
3826       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3827       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3828           oi.wProductType == VER_NT_SERVER) {
3829         _is_windows_server = true;
3830       }
3831     }
3832     break;
3833   default: fatal("Unknown platform");
3834   }
3835 
3836   _default_stack_size = os::current_stack_size();
3837   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3838   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3839          "stack size not a multiple of page size");
3840 
3841   initialize_performance_counter();
3842 }
3843 
3844 
3845 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3846                                       int ebuflen) {
3847   char path[MAX_PATH];
3848   DWORD size;
3849   DWORD pathLen = (DWORD)sizeof(path);
3850   HINSTANCE result = NULL;
3851 
3852   // only allow library name without path component
3853   assert(strchr(name, '\\') == NULL, "path not allowed");
3854   assert(strchr(name, ':') == NULL, "path not allowed");
3855   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3856     jio_snprintf(ebuf, ebuflen,
3857                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3858     return NULL;
3859   }
3860 
3861   // search system directory
3862   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3863     if (size >= pathLen) {
3864       return NULL; // truncated
3865     }
3866     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3867       return NULL; // truncated
3868     }
3869     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3870       return result;
3871     }
3872   }
3873 
3874   // try Windows directory
3875   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3876     if (size >= pathLen) {
3877       return NULL; // truncated
3878     }
3879     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3880       return NULL; // truncated
3881     }
3882     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3883       return result;
3884     }
3885   }
3886 
3887   jio_snprintf(ebuf, ebuflen,
3888                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3889   return NULL;
3890 }
3891 
3892 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3893 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3894 
3895 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3896   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3897   return TRUE;
3898 }
3899 
3900 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3901   // Basic approach:
3902   //  - Each exiting thread registers its intent to exit and then does so.
3903   //  - A thread trying to terminate the process must wait for all
3904   //    threads currently exiting to complete their exit.
3905 
3906   if (os::win32::has_exit_bug()) {
3907     // The array holds handles of the threads that have started exiting by calling
3908     // _endthreadex().
3909     // Should be large enough to avoid blocking the exiting thread due to lack of
3910     // a free slot.
3911     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3912     static int handle_count = 0;
3913 
3914     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3915     static CRITICAL_SECTION crit_sect;
3916     static volatile jint process_exiting = 0;
3917     int i, j;
3918     DWORD res;
3919     HANDLE hproc, hthr;
3920 
3921     // We only attempt to register threads until a process exiting
3922     // thread manages to set the process_exiting flag. Any threads
3923     // that come through here after the process_exiting flag is set
3924     // are unregistered and will be caught in the SuspendThread()
3925     // infinite loop below.
3926     bool registered = false;
3927 
3928     // The first thread that reached this point, initializes the critical section.
3929     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3930       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3931     } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3932       if (what != EPT_THREAD) {
3933         // Atomically set process_exiting before the critical section
3934         // to increase the visibility between racing threads.
3935         Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0);
3936       }
3937       EnterCriticalSection(&crit_sect);
3938 
3939       if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3940         // Remove from the array those handles of the threads that have completed exiting.
3941         for (i = 0, j = 0; i < handle_count; ++i) {
3942           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3943           if (res == WAIT_TIMEOUT) {
3944             handles[j++] = handles[i];
3945           } else {
3946             if (res == WAIT_FAILED) {
3947               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3948                       GetLastError(), __FILE__, __LINE__);
3949             }
3950             // Don't keep the handle, if we failed waiting for it.
3951             CloseHandle(handles[i]);
3952           }
3953         }
3954 
3955         // If there's no free slot in the array of the kept handles, we'll have to
3956         // wait until at least one thread completes exiting.
3957         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3958           // Raise the priority of the oldest exiting thread to increase its chances
3959           // to complete sooner.
3960           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3961           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3962           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3963             i = (res - WAIT_OBJECT_0);
3964             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3965             for (; i < handle_count; ++i) {
3966               handles[i] = handles[i + 1];
3967             }
3968           } else {
3969             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3970                     (res == WAIT_FAILED ? "failed" : "timed out"),
3971                     GetLastError(), __FILE__, __LINE__);
3972             // Don't keep handles, if we failed waiting for them.
3973             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3974               CloseHandle(handles[i]);
3975             }
3976             handle_count = 0;
3977           }
3978         }
3979 
3980         // Store a duplicate of the current thread handle in the array of handles.
3981         hproc = GetCurrentProcess();
3982         hthr = GetCurrentThread();
3983         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3984                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3985           warning("DuplicateHandle failed (%u) in %s: %d\n",
3986                   GetLastError(), __FILE__, __LINE__);
3987 
3988           // We can't register this thread (no more handles) so this thread
3989           // may be racing with a thread that is calling exit(). If the thread
3990           // that is calling exit() has managed to set the process_exiting
3991           // flag, then this thread will be caught in the SuspendThread()
3992           // infinite loop below which closes that race. A small timing
3993           // window remains before the process_exiting flag is set, but it
3994           // is only exposed when we are out of handles.
3995         } else {
3996           ++handle_count;
3997           registered = true;
3998 
3999           // The current exiting thread has stored its handle in the array, and now
4000           // should leave the critical section before calling _endthreadex().
4001         }
4002 
4003       } else if (what != EPT_THREAD && handle_count > 0) {
4004         jlong start_time, finish_time, timeout_left;
4005         // Before ending the process, make sure all the threads that had called
4006         // _endthreadex() completed.
4007 
4008         // Set the priority level of the current thread to the same value as
4009         // the priority level of exiting threads.
4010         // This is to ensure it will be given a fair chance to execute if
4011         // the timeout expires.
4012         hthr = GetCurrentThread();
4013         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
4014         start_time = os::javaTimeNanos();
4015         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
4016         for (i = 0; ; ) {
4017           int portion_count = handle_count - i;
4018           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
4019             portion_count = MAXIMUM_WAIT_OBJECTS;
4020           }
4021           for (j = 0; j < portion_count; ++j) {
4022             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4023           }
4024           timeout_left = (finish_time - start_time) / 1000000L;
4025           if (timeout_left < 0) {
4026             timeout_left = 0;
4027           }
4028           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4029           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4030             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4031                     (res == WAIT_FAILED ? "failed" : "timed out"),
4032                     GetLastError(), __FILE__, __LINE__);
4033             // Reset portion_count so we close the remaining
4034             // handles due to this error.
4035             portion_count = handle_count - i;
4036           }
4037           for (j = 0; j < portion_count; ++j) {
4038             CloseHandle(handles[i + j]);
4039           }
4040           if ((i += portion_count) >= handle_count) {
4041             break;
4042           }
4043           start_time = os::javaTimeNanos();
4044         }
4045         handle_count = 0;
4046       }
4047 
4048       LeaveCriticalSection(&crit_sect);
4049     }
4050 
4051     if (!registered &&
4052         OrderAccess::load_acquire(&process_exiting) != 0 &&
4053         process_exiting != (jint)GetCurrentThreadId()) {
4054       // Some other thread is about to call exit(), so we don't let
4055       // the current unregistered thread proceed to exit() or _endthreadex()
4056       while (true) {
4057         SuspendThread(GetCurrentThread());
4058         // Avoid busy-wait loop, if SuspendThread() failed.
4059         Sleep(EXIT_TIMEOUT);
4060       }
4061     }
4062   }
4063 
4064   // We are here if either
4065   // - there's no 'race at exit' bug on this OS release;
4066   // - initialization of the critical section failed (unlikely);
4067   // - the current thread has registered itself and left the critical section;
4068   // - the process-exiting thread has raised the flag and left the critical section.
4069   if (what == EPT_THREAD) {
4070     _endthreadex((unsigned)exit_code);
4071   } else if (what == EPT_PROCESS) {
4072     ::exit(exit_code);
4073   } else {
4074     _exit(exit_code);
4075   }
4076 
4077   // Should not reach here
4078   return exit_code;
4079 }
4080 
4081 #undef EXIT_TIMEOUT
4082 
4083 void os::win32::setmode_streams() {
4084   _setmode(_fileno(stdin), _O_BINARY);
4085   _setmode(_fileno(stdout), _O_BINARY);
4086   _setmode(_fileno(stderr), _O_BINARY);
4087 }
4088 
4089 
4090 bool os::is_debugger_attached() {
4091   return IsDebuggerPresent() ? true : false;
4092 }
4093 
4094 
4095 void os::wait_for_keypress_at_exit(void) {
4096   if (PauseAtExit) {
4097     fprintf(stderr, "Press any key to continue...\n");
4098     fgetc(stdin);
4099   }
4100 }
4101 
4102 
4103 bool os::message_box(const char* title, const char* message) {
4104   int result = MessageBox(NULL, message, title,
4105                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4106   return result == IDYES;
4107 }
4108 
4109 #ifndef PRODUCT
4110 #ifndef _WIN64
4111 // Helpers to check whether NX protection is enabled
4112 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4113   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4114       pex->ExceptionRecord->NumberParameters > 0 &&
4115       pex->ExceptionRecord->ExceptionInformation[0] ==
4116       EXCEPTION_INFO_EXEC_VIOLATION) {
4117     return EXCEPTION_EXECUTE_HANDLER;
4118   }
4119   return EXCEPTION_CONTINUE_SEARCH;
4120 }
4121 
4122 void nx_check_protection() {
4123   // If NX is enabled we'll get an exception calling into code on the stack
4124   char code[] = { (char)0xC3 }; // ret
4125   void *code_ptr = (void *)code;
4126   __try {
4127     __asm call code_ptr
4128   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4129     tty->print_raw_cr("NX protection detected.");
4130   }
4131 }
4132 #endif // _WIN64
4133 #endif // PRODUCT
4134 
4135 // This is called _before_ the global arguments have been parsed
4136 void os::init(void) {
4137   _initial_pid = _getpid();
4138 
4139   init_random(1234567);
4140 
4141   win32::initialize_system_info();
4142   win32::setmode_streams();
4143   init_page_sizes((size_t) win32::vm_page_size());
4144 
4145   // This may be overridden later when argument processing is done.
4146   FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false);
4147 
4148   // Initialize main_process and main_thread
4149   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4150   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4151                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4152     fatal("DuplicateHandle failed\n");
4153   }
4154   main_thread_id = (int) GetCurrentThreadId();
4155 
4156   // initialize fast thread access - only used for 32-bit
4157   win32::initialize_thread_ptr_offset();
4158 }
4159 
4160 // To install functions for atexit processing
4161 extern "C" {
4162   static void perfMemory_exit_helper() {
4163     perfMemory_exit();
4164   }
4165 }
4166 
4167 static jint initSock();
4168 
4169 // this is called _after_ the global arguments have been parsed
4170 jint os::init_2(void) {
4171   // Allocate a single page and mark it as readable for safepoint polling
4172   address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
4173   guarantee(polling_page != NULL, "Reserve Failed for polling page");
4174 
4175   address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
4176   guarantee(return_page != NULL, "Commit Failed for polling page");
4177 
4178   os::set_polling_page(polling_page);
4179   log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
4180 
4181   if (!UseMembar) {
4182     address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
4183     guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
4184 
4185     return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
4186     guarantee(return_page != NULL, "Commit Failed for memory serialize page");
4187 
4188     os::set_memory_serialize_page(mem_serialize_page);
4189     log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
4190   }
4191 
4192   // Setup Windows Exceptions
4193 
4194   // for debugging float code generation bugs
4195   if (ForceFloatExceptions) {
4196 #ifndef  _WIN64
4197     static long fp_control_word = 0;
4198     __asm { fstcw fp_control_word }
4199     // see Intel PPro Manual, Vol. 2, p 7-16
4200     const long precision = 0x20;
4201     const long underflow = 0x10;
4202     const long overflow  = 0x08;
4203     const long zero_div  = 0x04;
4204     const long denorm    = 0x02;
4205     const long invalid   = 0x01;
4206     fp_control_word |= invalid;
4207     __asm { fldcw fp_control_word }
4208 #endif
4209   }
4210 
4211   // If stack_commit_size is 0, windows will reserve the default size,
4212   // but only commit a small portion of it.
4213   size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
4214   size_t default_reserve_size = os::win32::default_stack_size();
4215   size_t actual_reserve_size = stack_commit_size;
4216   if (stack_commit_size < default_reserve_size) {
4217     // If stack_commit_size == 0, we want this too
4218     actual_reserve_size = default_reserve_size;
4219   }
4220 
4221   // Check minimum allowable stack size for thread creation and to initialize
4222   // the java system classes, including StackOverflowError - depends on page
4223   // size.  Add two 4K pages for compiler2 recursion in main thread.
4224   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4225   // class initialization depending on 32 or 64 bit VM.
4226   size_t min_stack_allowed =
4227             (size_t)(JavaThread::stack_guard_zone_size() +
4228                      JavaThread::stack_shadow_zone_size() +
4229                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4230 
4231   min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
4232 
4233   if (actual_reserve_size < min_stack_allowed) {
4234     tty->print_cr("\nThe Java thread stack size specified is too small. "
4235                   "Specify at least %dk",
4236                   min_stack_allowed / K);
4237     return JNI_ERR;
4238   }
4239 
4240   JavaThread::set_stack_size_at_create(stack_commit_size);
4241 
4242   // Calculate theoretical max. size of Threads to guard gainst artifical
4243   // out-of-memory situations, where all available address-space has been
4244   // reserved by thread stacks.
4245   assert(actual_reserve_size != 0, "Must have a stack");
4246 
4247   // Calculate the thread limit when we should start doing Virtual Memory
4248   // banging. Currently when the threads will have used all but 200Mb of space.
4249   //
4250   // TODO: consider performing a similar calculation for commit size instead
4251   // as reserve size, since on a 64-bit platform we'll run into that more
4252   // often than running out of virtual memory space.  We can use the
4253   // lower value of the two calculations as the os_thread_limit.
4254   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4255   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4256 
4257   // at exit methods are called in the reverse order of their registration.
4258   // there is no limit to the number of functions registered. atexit does
4259   // not set errno.
4260 
4261   if (PerfAllowAtExitRegistration) {
4262     // only register atexit functions if PerfAllowAtExitRegistration is set.
4263     // atexit functions can be delayed until process exit time, which
4264     // can be problematic for embedded VM situations. Embedded VMs should
4265     // call DestroyJavaVM() to assure that VM resources are released.
4266 
4267     // note: perfMemory_exit_helper atexit function may be removed in
4268     // the future if the appropriate cleanup code can be added to the
4269     // VM_Exit VMOperation's doit method.
4270     if (atexit(perfMemory_exit_helper) != 0) {
4271       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4272     }
4273   }
4274 
4275 #ifndef _WIN64
4276   // Print something if NX is enabled (win32 on AMD64)
4277   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4278 #endif
4279 
4280   // initialize thread priority policy
4281   prio_init();
4282 
4283   if (UseNUMA && !ForceNUMA) {
4284     UseNUMA = false; // We don't fully support this yet
4285   }
4286 
4287   if (UseNUMAInterleaving) {
4288     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4289     bool success = numa_interleaving_init();
4290     if (!success) UseNUMAInterleaving = false;
4291   }
4292 
4293   if (initSock() != JNI_OK) {
4294     return JNI_ERR;
4295   }
4296 
4297   return JNI_OK;
4298 }
4299 
4300 // Mark the polling page as unreadable
4301 void os::make_polling_page_unreadable(void) {
4302   DWORD old_status;
4303   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4304                       PAGE_NOACCESS, &old_status)) {
4305     fatal("Could not disable polling page");
4306   }
4307 }
4308 
4309 // Mark the polling page as readable
4310 void os::make_polling_page_readable(void) {
4311   DWORD old_status;
4312   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4313                       PAGE_READONLY, &old_status)) {
4314     fatal("Could not enable polling page");
4315   }
4316 }
4317 
4318 
4319 int os::stat(const char *path, struct stat *sbuf) {
4320   char pathbuf[MAX_PATH];
4321   if (strlen(path) > MAX_PATH - 1) {
4322     errno = ENAMETOOLONG;
4323     return -1;
4324   }
4325   os::native_path(strcpy(pathbuf, path));
4326   int ret = ::stat(pathbuf, sbuf);
4327   if (sbuf != NULL && UseUTCFileTimestamp) {
4328     // Fix for 6539723.  st_mtime returned from stat() is dependent on
4329     // the system timezone and so can return different values for the
4330     // same file if/when daylight savings time changes.  This adjustment
4331     // makes sure the same timestamp is returned regardless of the TZ.
4332     //
4333     // See:
4334     // http://msdn.microsoft.com/library/
4335     //   default.asp?url=/library/en-us/sysinfo/base/
4336     //   time_zone_information_str.asp
4337     // and
4338     // http://msdn.microsoft.com/library/default.asp?url=
4339     //   /library/en-us/sysinfo/base/settimezoneinformation.asp
4340     //
4341     // NOTE: there is a insidious bug here:  If the timezone is changed
4342     // after the call to stat() but before 'GetTimeZoneInformation()', then
4343     // the adjustment we do here will be wrong and we'll return the wrong
4344     // value (which will likely end up creating an invalid class data
4345     // archive).  Absent a better API for this, or some time zone locking
4346     // mechanism, we'll have to live with this risk.
4347     TIME_ZONE_INFORMATION tz;
4348     DWORD tzid = GetTimeZoneInformation(&tz);
4349     int daylightBias =
4350       (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
4351     sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4352   }
4353   return ret;
4354 }
4355 
4356 
4357 #define FT2INT64(ft) \
4358   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4359 
4360 
4361 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4362 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4363 // of a thread.
4364 //
4365 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4366 // the fast estimate available on the platform.
4367 
4368 // current_thread_cpu_time() is not optimized for Windows yet
4369 jlong os::current_thread_cpu_time() {
4370   // return user + sys since the cost is the same
4371   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4372 }
4373 
4374 jlong os::thread_cpu_time(Thread* thread) {
4375   // consistent with what current_thread_cpu_time() returns.
4376   return os::thread_cpu_time(thread, true /* user+sys */);
4377 }
4378 
4379 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4380   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4381 }
4382 
4383 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4384   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4385   // If this function changes, os::is_thread_cpu_time_supported() should too
4386   FILETIME CreationTime;
4387   FILETIME ExitTime;
4388   FILETIME KernelTime;
4389   FILETIME UserTime;
4390 
4391   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4392                       &ExitTime, &KernelTime, &UserTime) == 0) {
4393     return -1;
4394   } else if (user_sys_cpu_time) {
4395     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4396   } else {
4397     return FT2INT64(UserTime) * 100;
4398   }
4399 }
4400 
4401 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4402   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4403   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4404   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4405   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4406 }
4407 
4408 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4409   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4410   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4411   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4412   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4413 }
4414 
4415 bool os::is_thread_cpu_time_supported() {
4416   // see os::thread_cpu_time
4417   FILETIME CreationTime;
4418   FILETIME ExitTime;
4419   FILETIME KernelTime;
4420   FILETIME UserTime;
4421 
4422   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4423                       &KernelTime, &UserTime) == 0) {
4424     return false;
4425   } else {
4426     return true;
4427   }
4428 }
4429 
4430 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4431 // It does have primitives (PDH API) to get CPU usage and run queue length.
4432 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4433 // If we wanted to implement loadavg on Windows, we have a few options:
4434 //
4435 // a) Query CPU usage and run queue length and "fake" an answer by
4436 //    returning the CPU usage if it's under 100%, and the run queue
4437 //    length otherwise.  It turns out that querying is pretty slow
4438 //    on Windows, on the order of 200 microseconds on a fast machine.
4439 //    Note that on the Windows the CPU usage value is the % usage
4440 //    since the last time the API was called (and the first call
4441 //    returns 100%), so we'd have to deal with that as well.
4442 //
4443 // b) Sample the "fake" answer using a sampling thread and store
4444 //    the answer in a global variable.  The call to loadavg would
4445 //    just return the value of the global, avoiding the slow query.
4446 //
4447 // c) Sample a better answer using exponential decay to smooth the
4448 //    value.  This is basically the algorithm used by UNIX kernels.
4449 //
4450 // Note that sampling thread starvation could affect both (b) and (c).
4451 int os::loadavg(double loadavg[], int nelem) {
4452   return -1;
4453 }
4454 
4455 
4456 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4457 bool os::dont_yield() {
4458   return DontYieldALot;
4459 }
4460 
4461 // This method is a slightly reworked copy of JDK's sysOpen
4462 // from src/windows/hpi/src/sys_api_md.c
4463 
4464 int os::open(const char *path, int oflag, int mode) {
4465   char pathbuf[MAX_PATH];
4466 
4467   if (strlen(path) > MAX_PATH - 1) {
4468     errno = ENAMETOOLONG;
4469     return -1;
4470   }
4471   os::native_path(strcpy(pathbuf, path));
4472   return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4473 }
4474 
4475 FILE* os::open(int fd, const char* mode) {
4476   return ::_fdopen(fd, mode);
4477 }
4478 
4479 // Is a (classpath) directory empty?
4480 bool os::dir_is_empty(const char* path) {
4481   WIN32_FIND_DATA fd;
4482   HANDLE f = FindFirstFile(path, &fd);
4483   if (f == INVALID_HANDLE_VALUE) {
4484     return true;
4485   }
4486   FindClose(f);
4487   return false;
4488 }
4489 
4490 // create binary file, rewriting existing file if required
4491 int os::create_binary_file(const char* path, bool rewrite_existing) {
4492   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4493   if (!rewrite_existing) {
4494     oflags |= _O_EXCL;
4495   }
4496   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4497 }
4498 
4499 // return current position of file pointer
4500 jlong os::current_file_offset(int fd) {
4501   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4502 }
4503 
4504 // move file pointer to the specified offset
4505 jlong os::seek_to_file_offset(int fd, jlong offset) {
4506   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4507 }
4508 
4509 
4510 jlong os::lseek(int fd, jlong offset, int whence) {
4511   return (jlong) ::_lseeki64(fd, offset, whence);
4512 }
4513 
4514 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4515   OVERLAPPED ov;
4516   DWORD nread;
4517   BOOL result;
4518 
4519   ZeroMemory(&ov, sizeof(ov));
4520   ov.Offset = (DWORD)offset;
4521   ov.OffsetHigh = (DWORD)(offset >> 32);
4522 
4523   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4524 
4525   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4526 
4527   return result ? nread : 0;
4528 }
4529 
4530 
4531 // This method is a slightly reworked copy of JDK's sysNativePath
4532 // from src/windows/hpi/src/path_md.c
4533 
4534 // Convert a pathname to native format.  On win32, this involves forcing all
4535 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4536 // sometimes rejects '/') and removing redundant separators.  The input path is
4537 // assumed to have been converted into the character encoding used by the local
4538 // system.  Because this might be a double-byte encoding, care is taken to
4539 // treat double-byte lead characters correctly.
4540 //
4541 // This procedure modifies the given path in place, as the result is never
4542 // longer than the original.  There is no error return; this operation always
4543 // succeeds.
4544 char * os::native_path(char *path) {
4545   char *src = path, *dst = path, *end = path;
4546   char *colon = NULL;  // If a drive specifier is found, this will
4547                        // point to the colon following the drive letter
4548 
4549   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4550   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4551           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4552 
4553   // Check for leading separators
4554 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4555   while (isfilesep(*src)) {
4556     src++;
4557   }
4558 
4559   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4560     // Remove leading separators if followed by drive specifier.  This
4561     // hack is necessary to support file URLs containing drive
4562     // specifiers (e.g., "file://c:/path").  As a side effect,
4563     // "/c:/path" can be used as an alternative to "c:/path".
4564     *dst++ = *src++;
4565     colon = dst;
4566     *dst++ = ':';
4567     src++;
4568   } else {
4569     src = path;
4570     if (isfilesep(src[0]) && isfilesep(src[1])) {
4571       // UNC pathname: Retain first separator; leave src pointed at
4572       // second separator so that further separators will be collapsed
4573       // into the second separator.  The result will be a pathname
4574       // beginning with "\\\\" followed (most likely) by a host name.
4575       src = dst = path + 1;
4576       path[0] = '\\';     // Force first separator to '\\'
4577     }
4578   }
4579 
4580   end = dst;
4581 
4582   // Remove redundant separators from remainder of path, forcing all
4583   // separators to be '\\' rather than '/'. Also, single byte space
4584   // characters are removed from the end of the path because those
4585   // are not legal ending characters on this operating system.
4586   //
4587   while (*src != '\0') {
4588     if (isfilesep(*src)) {
4589       *dst++ = '\\'; src++;
4590       while (isfilesep(*src)) src++;
4591       if (*src == '\0') {
4592         // Check for trailing separator
4593         end = dst;
4594         if (colon == dst - 2) break;  // "z:\\"
4595         if (dst == path + 1) break;   // "\\"
4596         if (dst == path + 2 && isfilesep(path[0])) {
4597           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4598           // beginning of a UNC pathname.  Even though it is not, by
4599           // itself, a valid UNC pathname, we leave it as is in order
4600           // to be consistent with the path canonicalizer as well
4601           // as the win32 APIs, which treat this case as an invalid
4602           // UNC pathname rather than as an alias for the root
4603           // directory of the current drive.
4604           break;
4605         }
4606         end = --dst;  // Path does not denote a root directory, so
4607                       // remove trailing separator
4608         break;
4609       }
4610       end = dst;
4611     } else {
4612       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4613         *dst++ = *src++;
4614         if (*src) *dst++ = *src++;
4615         end = dst;
4616       } else {  // Copy a single-byte character
4617         char c = *src++;
4618         *dst++ = c;
4619         // Space is not a legal ending character
4620         if (c != ' ') end = dst;
4621       }
4622     }
4623   }
4624 
4625   *end = '\0';
4626 
4627   // For "z:", add "." to work around a bug in the C runtime library
4628   if (colon == dst - 1) {
4629     path[2] = '.';
4630     path[3] = '\0';
4631   }
4632 
4633   return path;
4634 }
4635 
4636 // This code is a copy of JDK's sysSetLength
4637 // from src/windows/hpi/src/sys_api_md.c
4638 
4639 int os::ftruncate(int fd, jlong length) {
4640   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4641   long high = (long)(length >> 32);
4642   DWORD ret;
4643 
4644   if (h == (HANDLE)(-1)) {
4645     return -1;
4646   }
4647 
4648   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4649   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4650     return -1;
4651   }
4652 
4653   if (::SetEndOfFile(h) == FALSE) {
4654     return -1;
4655   }
4656 
4657   return 0;
4658 }
4659 
4660 int os::get_fileno(FILE* fp) {
4661   return _fileno(fp);
4662 }
4663 
4664 // This code is a copy of JDK's sysSync
4665 // from src/windows/hpi/src/sys_api_md.c
4666 // except for the legacy workaround for a bug in Win 98
4667 
4668 int os::fsync(int fd) {
4669   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4670 
4671   if ((!::FlushFileBuffers(handle)) &&
4672       (GetLastError() != ERROR_ACCESS_DENIED)) {
4673     // from winerror.h
4674     return -1;
4675   }
4676   return 0;
4677 }
4678 
4679 static int nonSeekAvailable(int, long *);
4680 static int stdinAvailable(int, long *);
4681 
4682 #define S_ISCHR(mode)   (((mode) & _S_IFCHR) == _S_IFCHR)
4683 #define S_ISFIFO(mode)  (((mode) & _S_IFIFO) == _S_IFIFO)
4684 
4685 // This code is a copy of JDK's sysAvailable
4686 // from src/windows/hpi/src/sys_api_md.c
4687 
4688 int os::available(int fd, jlong *bytes) {
4689   jlong cur, end;
4690   struct _stati64 stbuf64;
4691 
4692   if (::_fstati64(fd, &stbuf64) >= 0) {
4693     int mode = stbuf64.st_mode;
4694     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4695       int ret;
4696       long lpbytes;
4697       if (fd == 0) {
4698         ret = stdinAvailable(fd, &lpbytes);
4699       } else {
4700         ret = nonSeekAvailable(fd, &lpbytes);
4701       }
4702       (*bytes) = (jlong)(lpbytes);
4703       return ret;
4704     }
4705     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4706       return FALSE;
4707     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4708       return FALSE;
4709     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4710       return FALSE;
4711     }
4712     *bytes = end - cur;
4713     return TRUE;
4714   } else {
4715     return FALSE;
4716   }
4717 }
4718 
4719 void os::flockfile(FILE* fp) {
4720   _lock_file(fp);
4721 }
4722 
4723 void os::funlockfile(FILE* fp) {
4724   _unlock_file(fp);
4725 }
4726 
4727 // This code is a copy of JDK's nonSeekAvailable
4728 // from src/windows/hpi/src/sys_api_md.c
4729 
4730 static int nonSeekAvailable(int fd, long *pbytes) {
4731   // This is used for available on non-seekable devices
4732   // (like both named and anonymous pipes, such as pipes
4733   //  connected to an exec'd process).
4734   // Standard Input is a special case.
4735   HANDLE han;
4736 
4737   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4738     return FALSE;
4739   }
4740 
4741   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4742     // PeekNamedPipe fails when at EOF.  In that case we
4743     // simply make *pbytes = 0 which is consistent with the
4744     // behavior we get on Solaris when an fd is at EOF.
4745     // The only alternative is to raise an Exception,
4746     // which isn't really warranted.
4747     //
4748     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4749       return FALSE;
4750     }
4751     *pbytes = 0;
4752   }
4753   return TRUE;
4754 }
4755 
4756 #define MAX_INPUT_EVENTS 2000
4757 
4758 // This code is a copy of JDK's stdinAvailable
4759 // from src/windows/hpi/src/sys_api_md.c
4760 
4761 static int stdinAvailable(int fd, long *pbytes) {
4762   HANDLE han;
4763   DWORD numEventsRead = 0;  // Number of events read from buffer
4764   DWORD numEvents = 0;      // Number of events in buffer
4765   DWORD i = 0;              // Loop index
4766   DWORD curLength = 0;      // Position marker
4767   DWORD actualLength = 0;   // Number of bytes readable
4768   BOOL error = FALSE;       // Error holder
4769   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4770 
4771   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4772     return FALSE;
4773   }
4774 
4775   // Construct an array of input records in the console buffer
4776   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4777   if (error == 0) {
4778     return nonSeekAvailable(fd, pbytes);
4779   }
4780 
4781   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4782   if (numEvents > MAX_INPUT_EVENTS) {
4783     numEvents = MAX_INPUT_EVENTS;
4784   }
4785 
4786   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4787   if (lpBuffer == NULL) {
4788     return FALSE;
4789   }
4790 
4791   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4792   if (error == 0) {
4793     os::free(lpBuffer);
4794     return FALSE;
4795   }
4796 
4797   // Examine input records for the number of bytes available
4798   for (i=0; i<numEvents; i++) {
4799     if (lpBuffer[i].EventType == KEY_EVENT) {
4800 
4801       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4802                                       &(lpBuffer[i].Event);
4803       if (keyRecord->bKeyDown == TRUE) {
4804         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4805         curLength++;
4806         if (*keyPressed == '\r') {
4807           actualLength = curLength;
4808         }
4809       }
4810     }
4811   }
4812 
4813   if (lpBuffer != NULL) {
4814     os::free(lpBuffer);
4815   }
4816 
4817   *pbytes = (long) actualLength;
4818   return TRUE;
4819 }
4820 
4821 // Map a block of memory.
4822 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4823                         char *addr, size_t bytes, bool read_only,
4824                         bool allow_exec) {
4825   HANDLE hFile;
4826   char* base;
4827 
4828   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4829                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4830   if (hFile == NULL) {
4831     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4832     return NULL;
4833   }
4834 
4835   if (allow_exec) {
4836     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4837     // unless it comes from a PE image (which the shared archive is not.)
4838     // Even VirtualProtect refuses to give execute access to mapped memory
4839     // that was not previously executable.
4840     //
4841     // Instead, stick the executable region in anonymous memory.  Yuck.
4842     // Penalty is that ~4 pages will not be shareable - in the future
4843     // we might consider DLLizing the shared archive with a proper PE
4844     // header so that mapping executable + sharing is possible.
4845 
4846     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4847                                 PAGE_READWRITE);
4848     if (base == NULL) {
4849       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4850       CloseHandle(hFile);
4851       return NULL;
4852     }
4853 
4854     DWORD bytes_read;
4855     OVERLAPPED overlapped;
4856     overlapped.Offset = (DWORD)file_offset;
4857     overlapped.OffsetHigh = 0;
4858     overlapped.hEvent = NULL;
4859     // ReadFile guarantees that if the return value is true, the requested
4860     // number of bytes were read before returning.
4861     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4862     if (!res) {
4863       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4864       release_memory(base, bytes);
4865       CloseHandle(hFile);
4866       return NULL;
4867     }
4868   } else {
4869     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4870                                     NULL /* file_name */);
4871     if (hMap == NULL) {
4872       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4873       CloseHandle(hFile);
4874       return NULL;
4875     }
4876 
4877     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4878     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4879                                   (DWORD)bytes, addr);
4880     if (base == NULL) {
4881       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4882       CloseHandle(hMap);
4883       CloseHandle(hFile);
4884       return NULL;
4885     }
4886 
4887     if (CloseHandle(hMap) == 0) {
4888       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4889       CloseHandle(hFile);
4890       return base;
4891     }
4892   }
4893 
4894   if (allow_exec) {
4895     DWORD old_protect;
4896     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4897     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4898 
4899     if (!res) {
4900       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4901       // Don't consider this a hard error, on IA32 even if the
4902       // VirtualProtect fails, we should still be able to execute
4903       CloseHandle(hFile);
4904       return base;
4905     }
4906   }
4907 
4908   if (CloseHandle(hFile) == 0) {
4909     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4910     return base;
4911   }
4912 
4913   return base;
4914 }
4915 
4916 
4917 // Remap a block of memory.
4918 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4919                           char *addr, size_t bytes, bool read_only,
4920                           bool allow_exec) {
4921   // This OS does not allow existing memory maps to be remapped so we
4922   // have to unmap the memory before we remap it.
4923   if (!os::unmap_memory(addr, bytes)) {
4924     return NULL;
4925   }
4926 
4927   // There is a very small theoretical window between the unmap_memory()
4928   // call above and the map_memory() call below where a thread in native
4929   // code may be able to access an address that is no longer mapped.
4930 
4931   return os::map_memory(fd, file_name, file_offset, addr, bytes,
4932                         read_only, allow_exec);
4933 }
4934 
4935 
4936 // Unmap a block of memory.
4937 // Returns true=success, otherwise false.
4938 
4939 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4940   MEMORY_BASIC_INFORMATION mem_info;
4941   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4942     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
4943     return false;
4944   }
4945 
4946   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4947   // Instead, executable region was allocated using VirtualAlloc(). See
4948   // pd_map_memory() above.
4949   //
4950   // The following flags should match the 'exec_access' flages used for
4951   // VirtualProtect() in pd_map_memory().
4952   if (mem_info.Protect == PAGE_EXECUTE_READ ||
4953       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4954     return pd_release_memory(addr, bytes);
4955   }
4956 
4957   BOOL result = UnmapViewOfFile(addr);
4958   if (result == 0) {
4959     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
4960     return false;
4961   }
4962   return true;
4963 }
4964 
4965 void os::pause() {
4966   char filename[MAX_PATH];
4967   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4968     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4969   } else {
4970     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4971   }
4972 
4973   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4974   if (fd != -1) {
4975     struct stat buf;
4976     ::close(fd);
4977     while (::stat(filename, &buf) == 0) {
4978       Sleep(100);
4979     }
4980   } else {
4981     jio_fprintf(stderr,
4982                 "Could not open pause file '%s', continuing immediately.\n", filename);
4983   }
4984 }
4985 
4986 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4987   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4988 }
4989 
4990 // See the caveats for this class in os_windows.hpp
4991 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4992 // into this method and returns false. If no OS EXCEPTION was raised, returns
4993 // true.
4994 // The callback is supposed to provide the method that should be protected.
4995 //
4996 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4997   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4998   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4999          "crash_protection already set?");
5000 
5001   bool success = true;
5002   __try {
5003     WatcherThread::watcher_thread()->set_crash_protection(this);
5004     cb.call();
5005   } __except(EXCEPTION_EXECUTE_HANDLER) {
5006     // only for protection, nothing to do
5007     success = false;
5008   }
5009   WatcherThread::watcher_thread()->set_crash_protection(NULL);
5010   return success;
5011 }
5012 
5013 // An Event wraps a win32 "CreateEvent" kernel handle.
5014 //
5015 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5016 //
5017 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5018 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5019 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5020 //     In addition, an unpark() operation might fetch the handle field, but the
5021 //     event could recycle between the fetch and the SetEvent() operation.
5022 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5023 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5024 //     on an stale but recycled handle would be harmless, but in practice this might
5025 //     confuse other non-Sun code, so it's not a viable approach.
5026 //
5027 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5028 //     with the Event.  The event handle is never closed.  This could be construed
5029 //     as handle leakage, but only up to the maximum # of threads that have been extant
5030 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5031 //     permit a process to have hundreds of thousands of open handles.
5032 //
5033 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5034 //     and release unused handles.
5035 //
5036 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5037 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5038 //
5039 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5040 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5041 //
5042 // We use (2).
5043 //
5044 // TODO-FIXME:
5045 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5046 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5047 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5048 // 3.  Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
5049 //     into a single win32 CreateEvent() handle.
5050 //
5051 // Assumption:
5052 //    Only one parker can exist on an event, which is why we allocate
5053 //    them per-thread. Multiple unparkers can coexist.
5054 //
5055 // _Event transitions in park()
5056 //   -1 => -1 : illegal
5057 //    1 =>  0 : pass - return immediately
5058 //    0 => -1 : block; then set _Event to 0 before returning
5059 //
5060 // _Event transitions in unpark()
5061 //    0 => 1 : just return
5062 //    1 => 1 : just return
5063 //   -1 => either 0 or 1; must signal target thread
5064 //         That is, we can safely transition _Event from -1 to either
5065 //         0 or 1.
5066 //
5067 // _Event serves as a restricted-range semaphore.
5068 //   -1 : thread is blocked, i.e. there is a waiter
5069 //    0 : neutral: thread is running or ready,
5070 //        could have been signaled after a wait started
5071 //    1 : signaled - thread is running or ready
5072 //
5073 // Another possible encoding of _Event would be with
5074 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5075 //
5076 
5077 int os::PlatformEvent::park(jlong Millis) {
5078   // Transitions for _Event:
5079   //   -1 => -1 : illegal
5080   //    1 =>  0 : pass - return immediately
5081   //    0 => -1 : block; then set _Event to 0 before returning
5082 
5083   guarantee(_ParkHandle != NULL , "Invariant");
5084   guarantee(Millis > 0          , "Invariant");
5085 
5086   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5087   // the initial park() operation.
5088   // Consider: use atomic decrement instead of CAS-loop
5089 
5090   int v;
5091   for (;;) {
5092     v = _Event;
5093     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5094   }
5095   guarantee((v == 0) || (v == 1), "invariant");
5096   if (v != 0) return OS_OK;
5097 
5098   // Do this the hard way by blocking ...
5099   // TODO: consider a brief spin here, gated on the success of recent
5100   // spin attempts by this thread.
5101   //
5102   // We decompose long timeouts into series of shorter timed waits.
5103   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5104   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5105   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5106   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5107   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5108   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5109   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5110   // for the already waited time.  This policy does not admit any new outcomes.
5111   // In the future, however, we might want to track the accumulated wait time and
5112   // adjust Millis accordingly if we encounter a spurious wakeup.
5113 
5114   const int MAXTIMEOUT = 0x10000000;
5115   DWORD rv = WAIT_TIMEOUT;
5116   while (_Event < 0 && Millis > 0) {
5117     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5118     if (Millis > MAXTIMEOUT) {
5119       prd = MAXTIMEOUT;
5120     }
5121     rv = ::WaitForSingleObject(_ParkHandle, prd);
5122     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5123     if (rv == WAIT_TIMEOUT) {
5124       Millis -= prd;
5125     }
5126   }
5127   v = _Event;
5128   _Event = 0;
5129   // see comment at end of os::PlatformEvent::park() below:
5130   OrderAccess::fence();
5131   // If we encounter a nearly simultanous timeout expiry and unpark()
5132   // we return OS_OK indicating we awoke via unpark().
5133   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5134   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5135 }
5136 
5137 void os::PlatformEvent::park() {
5138   // Transitions for _Event:
5139   //   -1 => -1 : illegal
5140   //    1 =>  0 : pass - return immediately
5141   //    0 => -1 : block; then set _Event to 0 before returning
5142 
5143   guarantee(_ParkHandle != NULL, "Invariant");
5144   // Invariant: Only the thread associated with the Event/PlatformEvent
5145   // may call park().
5146   // Consider: use atomic decrement instead of CAS-loop
5147   int v;
5148   for (;;) {
5149     v = _Event;
5150     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5151   }
5152   guarantee((v == 0) || (v == 1), "invariant");
5153   if (v != 0) return;
5154 
5155   // Do this the hard way by blocking ...
5156   // TODO: consider a brief spin here, gated on the success of recent
5157   // spin attempts by this thread.
5158   while (_Event < 0) {
5159     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5160     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5161   }
5162 
5163   // Usually we'll find _Event == 0 at this point, but as
5164   // an optional optimization we clear it, just in case can
5165   // multiple unpark() operations drove _Event up to 1.
5166   _Event = 0;
5167   OrderAccess::fence();
5168   guarantee(_Event >= 0, "invariant");
5169 }
5170 
5171 void os::PlatformEvent::unpark() {
5172   guarantee(_ParkHandle != NULL, "Invariant");
5173 
5174   // Transitions for _Event:
5175   //    0 => 1 : just return
5176   //    1 => 1 : just return
5177   //   -1 => either 0 or 1; must signal target thread
5178   //         That is, we can safely transition _Event from -1 to either
5179   //         0 or 1.
5180   // See also: "Semaphores in Plan 9" by Mullender & Cox
5181   //
5182   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5183   // that it will take two back-to-back park() calls for the owning
5184   // thread to block. This has the benefit of forcing a spurious return
5185   // from the first park() call after an unpark() call which will help
5186   // shake out uses of park() and unpark() without condition variables.
5187 
5188   if (Atomic::xchg(1, &_Event) >= 0) return;
5189 
5190   ::SetEvent(_ParkHandle);
5191 }
5192 
5193 
5194 // JSR166
5195 // -------------------------------------------------------
5196 
5197 // The Windows implementation of Park is very straightforward: Basic
5198 // operations on Win32 Events turn out to have the right semantics to
5199 // use them directly. We opportunistically resuse the event inherited
5200 // from Monitor.
5201 
5202 void Parker::park(bool isAbsolute, jlong time) {
5203   guarantee(_ParkEvent != NULL, "invariant");
5204   // First, demultiplex/decode time arguments
5205   if (time < 0) { // don't wait
5206     return;
5207   } else if (time == 0 && !isAbsolute) {
5208     time = INFINITE;
5209   } else if (isAbsolute) {
5210     time -= os::javaTimeMillis(); // convert to relative time
5211     if (time <= 0) {  // already elapsed
5212       return;
5213     }
5214   } else { // relative
5215     time /= 1000000;  // Must coarsen from nanos to millis
5216     if (time == 0) {  // Wait for the minimal time unit if zero
5217       time = 1;
5218     }
5219   }
5220 
5221   JavaThread* thread = JavaThread::current();
5222 
5223   // Don't wait if interrupted or already triggered
5224   if (Thread::is_interrupted(thread, false) ||
5225       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5226     ResetEvent(_ParkEvent);
5227     return;
5228   } else {
5229     ThreadBlockInVM tbivm(thread);
5230     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5231     thread->set_suspend_equivalent();
5232 
5233     WaitForSingleObject(_ParkEvent, time);
5234     ResetEvent(_ParkEvent);
5235 
5236     // If externally suspended while waiting, re-suspend
5237     if (thread->handle_special_suspend_equivalent_condition()) {
5238       thread->java_suspend_self();
5239     }
5240   }
5241 }
5242 
5243 void Parker::unpark() {
5244   guarantee(_ParkEvent != NULL, "invariant");
5245   SetEvent(_ParkEvent);
5246 }
5247 
5248 // Run the specified command in a separate process. Return its exit value,
5249 // or -1 on failure (e.g. can't create a new process).
5250 int os::fork_and_exec(char* cmd) {
5251   STARTUPINFO si;
5252   PROCESS_INFORMATION pi;
5253 
5254   memset(&si, 0, sizeof(si));
5255   si.cb = sizeof(si);
5256   memset(&pi, 0, sizeof(pi));
5257   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5258                             cmd,    // command line
5259                             NULL,   // process security attribute
5260                             NULL,   // thread security attribute
5261                             TRUE,   // inherits system handles
5262                             0,      // no creation flags
5263                             NULL,   // use parent's environment block
5264                             NULL,   // use parent's starting directory
5265                             &si,    // (in) startup information
5266                             &pi);   // (out) process information
5267 
5268   if (rslt) {
5269     // Wait until child process exits.
5270     WaitForSingleObject(pi.hProcess, INFINITE);
5271 
5272     DWORD exit_code;
5273     GetExitCodeProcess(pi.hProcess, &exit_code);
5274 
5275     // Close process and thread handles.
5276     CloseHandle(pi.hProcess);
5277     CloseHandle(pi.hThread);
5278 
5279     return (int)exit_code;
5280   } else {
5281     return -1;
5282   }
5283 }
5284 
5285 bool os::find(address addr, outputStream* st) {
5286   int offset = -1;
5287   bool result = false;
5288   char buf[256];
5289   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5290     st->print(PTR_FORMAT " ", addr);
5291     if (strlen(buf) < sizeof(buf) - 1) {
5292       char* p = strrchr(buf, '\\');
5293       if (p) {
5294         st->print("%s", p + 1);
5295       } else {
5296         st->print("%s", buf);
5297       }
5298     } else {
5299         // The library name is probably truncated. Let's omit the library name.
5300         // See also JDK-8147512.
5301     }
5302     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5303       st->print("::%s + 0x%x", buf, offset);
5304     }
5305     st->cr();
5306     result = true;
5307   }
5308   return result;
5309 }
5310 
5311 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5312   DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5313 
5314   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
5315     JavaThread* thread = JavaThread::current();
5316     PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5317     address addr = (address) exceptionRecord->ExceptionInformation[1];
5318 
5319     if (os::is_memory_serialize_page(thread, addr)) {
5320       return EXCEPTION_CONTINUE_EXECUTION;
5321     }
5322   }
5323 
5324   return EXCEPTION_CONTINUE_SEARCH;
5325 }
5326 
5327 // We don't build a headless jre for Windows
5328 bool os::is_headless_jre() { return false; }
5329 
5330 static jint initSock() {
5331   WSADATA wsadata;
5332 
5333   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5334     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5335                 ::GetLastError());
5336     return JNI_ERR;
5337   }
5338   return JNI_OK;
5339 }
5340 
5341 struct hostent* os::get_host_by_name(char* name) {
5342   return (struct hostent*)gethostbyname(name);
5343 }
5344 
5345 int os::socket_close(int fd) {
5346   return ::closesocket(fd);
5347 }
5348 
5349 int os::socket(int domain, int type, int protocol) {
5350   return ::socket(domain, type, protocol);
5351 }
5352 
5353 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5354   return ::connect(fd, him, len);
5355 }
5356 
5357 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5358   return ::recv(fd, buf, (int)nBytes, flags);
5359 }
5360 
5361 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5362   return ::send(fd, buf, (int)nBytes, flags);
5363 }
5364 
5365 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5366   return ::send(fd, buf, (int)nBytes, flags);
5367 }
5368 
5369 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5370 #if defined(IA32)
5371   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5372 #elif defined (AMD64)
5373   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5374 #endif
5375 
5376 // returns true if thread could be suspended,
5377 // false otherwise
5378 static bool do_suspend(HANDLE* h) {
5379   if (h != NULL) {
5380     if (SuspendThread(*h) != ~0) {
5381       return true;
5382     }
5383   }
5384   return false;
5385 }
5386 
5387 // resume the thread
5388 // calling resume on an active thread is a no-op
5389 static void do_resume(HANDLE* h) {
5390   if (h != NULL) {
5391     ResumeThread(*h);
5392   }
5393 }
5394 
5395 // retrieve a suspend/resume context capable handle
5396 // from the tid. Caller validates handle return value.
5397 void get_thread_handle_for_extended_context(HANDLE* h,
5398                                             OSThread::thread_id_t tid) {
5399   if (h != NULL) {
5400     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5401   }
5402 }
5403 
5404 // Thread sampling implementation
5405 //
5406 void os::SuspendedThreadTask::internal_do_task() {
5407   CONTEXT    ctxt;
5408   HANDLE     h = NULL;
5409 
5410   // get context capable handle for thread
5411   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5412 
5413   // sanity
5414   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5415     return;
5416   }
5417 
5418   // suspend the thread
5419   if (do_suspend(&h)) {
5420     ctxt.ContextFlags = sampling_context_flags;
5421     // get thread context
5422     GetThreadContext(h, &ctxt);
5423     SuspendedThreadTaskContext context(_thread, &ctxt);
5424     // pass context to Thread Sampling impl
5425     do_task(context);
5426     // resume thread
5427     do_resume(&h);
5428   }
5429 
5430   // close handle
5431   CloseHandle(h);
5432 }
5433 
5434 bool os::start_debugging(char *buf, int buflen) {
5435   int len = (int)strlen(buf);
5436   char *p = &buf[len];
5437 
5438   jio_snprintf(p, buflen-len,
5439              "\n\n"
5440              "Do you want to debug the problem?\n\n"
5441              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5442              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5443              "Otherwise, select 'No' to abort...",
5444              os::current_process_id(), os::current_thread_id());
5445 
5446   bool yes = os::message_box("Unexpected Error", buf);
5447 
5448   if (yes) {
5449     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5450     // exception. If VM is running inside a debugger, the debugger will
5451     // catch the exception. Otherwise, the breakpoint exception will reach
5452     // the default windows exception handler, which can spawn a debugger and
5453     // automatically attach to the dying VM.
5454     os::breakpoint();
5455     yes = false;
5456   }
5457   return yes;
5458 }
5459 
5460 void* os::get_default_process_handle() {
5461   return (void*)GetModuleHandle(NULL);
5462 }
5463 
5464 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5465 // which is used to find statically linked in agents.
5466 // Additionally for windows, takes into account __stdcall names.
5467 // Parameters:
5468 //            sym_name: Symbol in library we are looking for
5469 //            lib_name: Name of library to look in, NULL for shared libs.
5470 //            is_absolute_path == true if lib_name is absolute path to agent
5471 //                                     such as "C:/a/b/L.dll"
5472 //            == false if only the base name of the library is passed in
5473 //               such as "L"
5474 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5475                                     bool is_absolute_path) {
5476   char *agent_entry_name;
5477   size_t len;
5478   size_t name_len;
5479   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5480   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5481   const char *start;
5482 
5483   if (lib_name != NULL) {
5484     len = name_len = strlen(lib_name);
5485     if (is_absolute_path) {
5486       // Need to strip path, prefix and suffix
5487       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5488         lib_name = ++start;
5489       } else {
5490         // Need to check for drive prefix
5491         if ((start = strchr(lib_name, ':')) != NULL) {
5492           lib_name = ++start;
5493         }
5494       }
5495       if (len <= (prefix_len + suffix_len)) {
5496         return NULL;
5497       }
5498       lib_name += prefix_len;
5499       name_len = strlen(lib_name) - suffix_len;
5500     }
5501   }
5502   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5503   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5504   if (agent_entry_name == NULL) {
5505     return NULL;
5506   }
5507   if (lib_name != NULL) {
5508     const char *p = strrchr(sym_name, '@');
5509     if (p != NULL && p != sym_name) {
5510       // sym_name == _Agent_OnLoad@XX
5511       strncpy(agent_entry_name, sym_name, (p - sym_name));
5512       agent_entry_name[(p-sym_name)] = '\0';
5513       // agent_entry_name == _Agent_OnLoad
5514       strcat(agent_entry_name, "_");
5515       strncat(agent_entry_name, lib_name, name_len);
5516       strcat(agent_entry_name, p);
5517       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5518     } else {
5519       strcpy(agent_entry_name, sym_name);
5520       strcat(agent_entry_name, "_");
5521       strncat(agent_entry_name, lib_name, name_len);
5522     }
5523   } else {
5524     strcpy(agent_entry_name, sym_name);
5525   }
5526   return agent_entry_name;
5527 }
5528 
5529 #ifndef PRODUCT
5530 
5531 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5532 // contiguous memory block at a particular address.
5533 // The test first tries to find a good approximate address to allocate at by using the same
5534 // method to allocate some memory at any address. The test then tries to allocate memory in
5535 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5536 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5537 // the previously allocated memory is available for allocation. The only actual failure
5538 // that is reported is when the test tries to allocate at a particular location but gets a
5539 // different valid one. A NULL return value at this point is not considered an error but may
5540 // be legitimate.
5541 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5542 void TestReserveMemorySpecial_test() {
5543   if (!UseLargePages) {
5544     if (VerboseInternalVMTests) {
5545       tty->print("Skipping test because large pages are disabled");
5546     }
5547     return;
5548   }
5549   // save current value of globals
5550   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5551   bool old_use_numa_interleaving = UseNUMAInterleaving;
5552 
5553   // set globals to make sure we hit the correct code path
5554   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5555 
5556   // do an allocation at an address selected by the OS to get a good one.
5557   const size_t large_allocation_size = os::large_page_size() * 4;
5558   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5559   if (result == NULL) {
5560     if (VerboseInternalVMTests) {
5561       tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5562                           large_allocation_size);
5563     }
5564   } else {
5565     os::release_memory_special(result, large_allocation_size);
5566 
5567     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5568     // we managed to get it once.
5569     const size_t expected_allocation_size = os::large_page_size();
5570     char* expected_location = result + os::large_page_size();
5571     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5572     if (actual_location == NULL) {
5573       if (VerboseInternalVMTests) {
5574         tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5575                             expected_location, large_allocation_size);
5576       }
5577     } else {
5578       // release memory
5579       os::release_memory_special(actual_location, expected_allocation_size);
5580       // only now check, after releasing any memory to avoid any leaks.
5581       assert(actual_location == expected_location,
5582              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5583              expected_location, expected_allocation_size, actual_location);
5584     }
5585   }
5586 
5587   // restore globals
5588   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5589   UseNUMAInterleaving = old_use_numa_interleaving;
5590 }
5591 #endif // PRODUCT
5592 
5593 /*
5594   All the defined signal names for Windows.
5595 
5596   NOTE that not all of these names are accepted by FindSignal!
5597 
5598   For various reasons some of these may be rejected at runtime.
5599 
5600   Here are the names currently accepted by a user of sun.misc.Signal with
5601   1.4.1 (ignoring potential interaction with use of chaining, etc):
5602 
5603      (LIST TBD)
5604 
5605 */
5606 int os::get_signal_number(const char* name) {
5607   static const struct {
5608     char* name;
5609     int   number;
5610   } siglabels [] =
5611     // derived from version 6.0 VC98/include/signal.h
5612   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5613   "FPE",        SIGFPE,         // floating point exception
5614   "SEGV",       SIGSEGV,        // segment violation
5615   "INT",        SIGINT,         // interrupt
5616   "TERM",       SIGTERM,        // software term signal from kill
5617   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5618   "ILL",        SIGILL};        // illegal instruction
5619   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5620     if (strcmp(name, siglabels[i].name) == 0) {
5621       return siglabels[i].number;
5622     }
5623   }
5624   return -1;
5625 }
5626 
5627 // Fast current thread access
5628 
5629 int os::win32::_thread_ptr_offset = 0;
5630 
5631 static void call_wrapper_dummy() {}
5632 
5633 // We need to call the os_exception_wrapper once so that it sets
5634 // up the offset from FS of the thread pointer.
5635 void os::win32::initialize_thread_ptr_offset() {
5636   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5637                            NULL, NULL, NULL, NULL);
5638 }