1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "classfile/classLoader.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/disassembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "jvm_windows.h"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/filemap.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "os_share_windows.hpp"
  43 #include "os_windows.inline.hpp"
  44 #include "prims/jniFastGetField.hpp"
  45 #include "prims/jvm.h"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.inline.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "semaphore_windows.hpp"
  67 #include "services/attachListener.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/growableArray.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 
  77 #ifdef _DEBUG
  78 #include <crtdbg.h>
  79 #endif
  80 
  81 
  82 #include <windows.h>
  83 #include <sys/types.h>
  84 #include <sys/stat.h>
  85 #include <sys/timeb.h>
  86 #include <objidl.h>
  87 #include <shlobj.h>
  88 
  89 #include <malloc.h>
  90 #include <signal.h>
  91 #include <direct.h>
  92 #include <errno.h>
  93 #include <fcntl.h>
  94 #include <io.h>
  95 #include <process.h>              // For _beginthreadex(), _endthreadex()
  96 #include <imagehlp.h>             // For os::dll_address_to_function_name
  97 // for enumerating dll libraries
  98 #include <vdmdbg.h>
  99 
 100 // for timer info max values which include all bits
 101 #define ALL_64_BITS CONST64(-1)
 102 
 103 // For DLL loading/load error detection
 104 // Values of PE COFF
 105 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 106 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 107 
 108 static HANDLE main_process;
 109 static HANDLE main_thread;
 110 static int    main_thread_id;
 111 
 112 static FILETIME process_creation_time;
 113 static FILETIME process_exit_time;
 114 static FILETIME process_user_time;
 115 static FILETIME process_kernel_time;
 116 
 117 #ifdef _M_IA64
 118   #define __CPU__ ia64
 119 #else
 120   #ifdef _M_AMD64
 121     #define __CPU__ amd64
 122   #else
 123     #define __CPU__ i486
 124   #endif
 125 #endif
 126 
 127 // save DLL module handle, used by GetModuleFileName
 128 
 129 HINSTANCE vm_lib_handle;
 130 
 131 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 132   switch (reason) {
 133   case DLL_PROCESS_ATTACH:
 134     vm_lib_handle = hinst;
 135     if (ForceTimeHighResolution) {
 136       timeBeginPeriod(1L);
 137     }
 138     break;
 139   case DLL_PROCESS_DETACH:
 140     if (ForceTimeHighResolution) {
 141       timeEndPeriod(1L);
 142     }
 143     break;
 144   default:
 145     break;
 146   }
 147   return true;
 148 }
 149 
 150 static inline double fileTimeAsDouble(FILETIME* time) {
 151   const double high  = (double) ((unsigned int) ~0);
 152   const double split = 10000000.0;
 153   double result = (time->dwLowDateTime / split) +
 154                    time->dwHighDateTime * (high/split);
 155   return result;
 156 }
 157 
 158 // Implementation of os
 159 
 160 bool os::unsetenv(const char* name) {
 161   assert(name != NULL, "Null pointer");
 162   return (SetEnvironmentVariable(name, NULL) == TRUE);
 163 }
 164 
 165 // No setuid programs under Windows.
 166 bool os::have_special_privileges() {
 167   return false;
 168 }
 169 
 170 
 171 // This method is  a periodic task to check for misbehaving JNI applications
 172 // under CheckJNI, we can add any periodic checks here.
 173 // For Windows at the moment does nothing
 174 void os::run_periodic_checks() {
 175   return;
 176 }
 177 
 178 // previous UnhandledExceptionFilter, if there is one
 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 180 
 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 182 
 183 void os::init_system_properties_values() {
 184   // sysclasspath, java_home, dll_dir
 185   {
 186     char *home_path;
 187     char *dll_path;
 188     char *pslash;
 189     char *bin = "\\bin";
 190     char home_dir[MAX_PATH + 1];
 191     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 192 
 193     if (alt_home_dir != NULL)  {
 194       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 195       home_dir[MAX_PATH] = '\0';
 196     } else {
 197       os::jvm_path(home_dir, sizeof(home_dir));
 198       // Found the full path to jvm.dll.
 199       // Now cut the path to <java_home>/jre if we can.
 200       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 201       pslash = strrchr(home_dir, '\\');
 202       if (pslash != NULL) {
 203         *pslash = '\0';                   // get rid of \{client|server}
 204         pslash = strrchr(home_dir, '\\');
 205         if (pslash != NULL) {
 206           *pslash = '\0';                 // get rid of \bin
 207         }
 208       }
 209     }
 210 
 211     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 212     if (home_path == NULL) {
 213       return;
 214     }
 215     strcpy(home_path, home_dir);
 216     Arguments::set_java_home(home_path);
 217     FREE_C_HEAP_ARRAY(char, home_path);
 218 
 219     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 220                                 mtInternal);
 221     if (dll_path == NULL) {
 222       return;
 223     }
 224     strcpy(dll_path, home_dir);
 225     strcat(dll_path, bin);
 226     Arguments::set_dll_dir(dll_path);
 227     FREE_C_HEAP_ARRAY(char, dll_path);
 228 
 229     if (!set_boot_path('\\', ';')) {
 230       return;
 231     }
 232   }
 233 
 234 // library_path
 235 #define EXT_DIR "\\lib\\ext"
 236 #define BIN_DIR "\\bin"
 237 #define PACKAGE_DIR "\\Sun\\Java"
 238   {
 239     // Win32 library search order (See the documentation for LoadLibrary):
 240     //
 241     // 1. The directory from which application is loaded.
 242     // 2. The system wide Java Extensions directory (Java only)
 243     // 3. System directory (GetSystemDirectory)
 244     // 4. Windows directory (GetWindowsDirectory)
 245     // 5. The PATH environment variable
 246     // 6. The current directory
 247 
 248     char *library_path;
 249     char tmp[MAX_PATH];
 250     char *path_str = ::getenv("PATH");
 251 
 252     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 253                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 254 
 255     library_path[0] = '\0';
 256 
 257     GetModuleFileName(NULL, tmp, sizeof(tmp));
 258     *(strrchr(tmp, '\\')) = '\0';
 259     strcat(library_path, tmp);
 260 
 261     GetWindowsDirectory(tmp, sizeof(tmp));
 262     strcat(library_path, ";");
 263     strcat(library_path, tmp);
 264     strcat(library_path, PACKAGE_DIR BIN_DIR);
 265 
 266     GetSystemDirectory(tmp, sizeof(tmp));
 267     strcat(library_path, ";");
 268     strcat(library_path, tmp);
 269 
 270     GetWindowsDirectory(tmp, sizeof(tmp));
 271     strcat(library_path, ";");
 272     strcat(library_path, tmp);
 273 
 274     if (path_str) {
 275       strcat(library_path, ";");
 276       strcat(library_path, path_str);
 277     }
 278 
 279     strcat(library_path, ";.");
 280 
 281     Arguments::set_library_path(library_path);
 282     FREE_C_HEAP_ARRAY(char, library_path);
 283   }
 284 
 285   // Default extensions directory
 286   {
 287     char path[MAX_PATH];
 288     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 289     GetWindowsDirectory(path, MAX_PATH);
 290     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 291             path, PACKAGE_DIR, EXT_DIR);
 292     Arguments::set_ext_dirs(buf);
 293   }
 294   #undef EXT_DIR
 295   #undef BIN_DIR
 296   #undef PACKAGE_DIR
 297 
 298 #ifndef _WIN64
 299   // set our UnhandledExceptionFilter and save any previous one
 300   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 301 #endif
 302 
 303   // Done
 304   return;
 305 }
 306 
 307 void os::breakpoint() {
 308   DebugBreak();
 309 }
 310 
 311 // Invoked from the BREAKPOINT Macro
 312 extern "C" void breakpoint() {
 313   os::breakpoint();
 314 }
 315 
 316 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 317 // So far, this method is only used by Native Memory Tracking, which is
 318 // only supported on Windows XP or later.
 319 //
 320 int os::get_native_stack(address* stack, int frames, int toSkip) {
 321   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 322   for (int index = captured; index < frames; index ++) {
 323     stack[index] = NULL;
 324   }
 325   return captured;
 326 }
 327 
 328 
 329 // os::current_stack_base()
 330 //
 331 //   Returns the base of the stack, which is the stack's
 332 //   starting address.  This function must be called
 333 //   while running on the stack of the thread being queried.
 334 
 335 address os::current_stack_base() {
 336   MEMORY_BASIC_INFORMATION minfo;
 337   address stack_bottom;
 338   size_t stack_size;
 339 
 340   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 341   stack_bottom =  (address)minfo.AllocationBase;
 342   stack_size = minfo.RegionSize;
 343 
 344   // Add up the sizes of all the regions with the same
 345   // AllocationBase.
 346   while (1) {
 347     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 348     if (stack_bottom == (address)minfo.AllocationBase) {
 349       stack_size += minfo.RegionSize;
 350     } else {
 351       break;
 352     }
 353   }
 354 
 355 #ifdef _M_IA64
 356   // IA64 has memory and register stacks
 357   //
 358   // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
 359   // at thread creation (1MB backing store growing upwards, 1MB memory stack
 360   // growing downwards, 2MB summed up)
 361   //
 362   // ...
 363   // ------- top of stack (high address) -----
 364   // |
 365   // |      1MB
 366   // |      Backing Store (Register Stack)
 367   // |
 368   // |         / \
 369   // |          |
 370   // |          |
 371   // |          |
 372   // ------------------------ stack base -----
 373   // |      1MB
 374   // |      Memory Stack
 375   // |
 376   // |          |
 377   // |          |
 378   // |          |
 379   // |         \ /
 380   // |
 381   // ----- bottom of stack (low address) -----
 382   // ...
 383 
 384   stack_size = stack_size / 2;
 385 #endif
 386   return stack_bottom + stack_size;
 387 }
 388 
 389 size_t os::current_stack_size() {
 390   size_t sz;
 391   MEMORY_BASIC_INFORMATION minfo;
 392   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 393   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 394   return sz;
 395 }
 396 
 397 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 398   const struct tm* time_struct_ptr = localtime(clock);
 399   if (time_struct_ptr != NULL) {
 400     *res = *time_struct_ptr;
 401     return res;
 402   }
 403   return NULL;
 404 }
 405 
 406 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 407   const struct tm* time_struct_ptr = gmtime(clock);
 408   if (time_struct_ptr != NULL) {
 409     *res = *time_struct_ptr;
 410     return res;
 411   }
 412   return NULL;
 413 }
 414 
 415 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 416 
 417 // Thread start routine for all newly created threads
 418 static unsigned __stdcall thread_native_entry(Thread* thread) {
 419   // Try to randomize the cache line index of hot stack frames.
 420   // This helps when threads of the same stack traces evict each other's
 421   // cache lines. The threads can be either from the same JVM instance, or
 422   // from different JVM instances. The benefit is especially true for
 423   // processors with hyperthreading technology.
 424   static int counter = 0;
 425   int pid = os::current_process_id();
 426   _alloca(((pid ^ counter++) & 7) * 128);
 427 
 428   thread->initialize_thread_current();
 429 
 430   OSThread* osthr = thread->osthread();
 431   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 432 
 433   if (UseNUMA) {
 434     int lgrp_id = os::numa_get_group_id();
 435     if (lgrp_id != -1) {
 436       thread->set_lgrp_id(lgrp_id);
 437     }
 438   }
 439 
 440   // Diagnostic code to investigate JDK-6573254
 441   int res = 30115;  // non-java thread
 442   if (thread->is_Java_thread()) {
 443     res = 20115;    // java thread
 444   }
 445 
 446   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 447 
 448   // Install a win32 structured exception handler around every thread created
 449   // by VM, so VM can generate error dump when an exception occurred in non-
 450   // Java thread (e.g. VM thread).
 451   __try {
 452     thread->run();
 453   } __except(topLevelExceptionFilter(
 454                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 455     // Nothing to do.
 456   }
 457 
 458   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 459 
 460   // One less thread is executing
 461   // When the VMThread gets here, the main thread may have already exited
 462   // which frees the CodeHeap containing the Atomic::add code
 463   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 464     Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
 465   }
 466 
 467   // If a thread has not deleted itself ("delete this") as part of its
 468   // termination sequence, we have to ensure thread-local-storage is
 469   // cleared before we actually terminate. No threads should ever be
 470   // deleted asynchronously with respect to their termination.
 471   if (Thread::current_or_null_safe() != NULL) {
 472     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 473     thread->clear_thread_current();
 474   }
 475 
 476   // Thread must not return from exit_process_or_thread(), but if it does,
 477   // let it proceed to exit normally
 478   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 479 }
 480 
 481 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 482                                   int thread_id) {
 483   // Allocate the OSThread object
 484   OSThread* osthread = new OSThread(NULL, NULL);
 485   if (osthread == NULL) return NULL;
 486 
 487   // Initialize support for Java interrupts
 488   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 489   if (interrupt_event == NULL) {
 490     delete osthread;
 491     return NULL;
 492   }
 493   osthread->set_interrupt_event(interrupt_event);
 494 
 495   // Store info on the Win32 thread into the OSThread
 496   osthread->set_thread_handle(thread_handle);
 497   osthread->set_thread_id(thread_id);
 498 
 499   if (UseNUMA) {
 500     int lgrp_id = os::numa_get_group_id();
 501     if (lgrp_id != -1) {
 502       thread->set_lgrp_id(lgrp_id);
 503     }
 504   }
 505 
 506   // Initial thread state is INITIALIZED, not SUSPENDED
 507   osthread->set_state(INITIALIZED);
 508 
 509   return osthread;
 510 }
 511 
 512 
 513 bool os::create_attached_thread(JavaThread* thread) {
 514 #ifdef ASSERT
 515   thread->verify_not_published();
 516 #endif
 517   HANDLE thread_h;
 518   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 519                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 520     fatal("DuplicateHandle failed\n");
 521   }
 522   OSThread* osthread = create_os_thread(thread, thread_h,
 523                                         (int)current_thread_id());
 524   if (osthread == NULL) {
 525     return false;
 526   }
 527 
 528   // Initial thread state is RUNNABLE
 529   osthread->set_state(RUNNABLE);
 530 
 531   thread->set_osthread(osthread);
 532 
 533   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 534     os::current_thread_id());
 535 
 536   return true;
 537 }
 538 
 539 bool os::create_main_thread(JavaThread* thread) {
 540 #ifdef ASSERT
 541   thread->verify_not_published();
 542 #endif
 543   if (_starting_thread == NULL) {
 544     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 545     if (_starting_thread == NULL) {
 546       return false;
 547     }
 548   }
 549 
 550   // The primordial thread is runnable from the start)
 551   _starting_thread->set_state(RUNNABLE);
 552 
 553   thread->set_osthread(_starting_thread);
 554   return true;
 555 }
 556 
 557 // Helper function to trace _beginthreadex attributes,
 558 //  similar to os::Posix::describe_pthread_attr()
 559 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 560                                                size_t stacksize, unsigned initflag) {
 561   stringStream ss(buf, buflen);
 562   if (stacksize == 0) {
 563     ss.print("stacksize: default, ");
 564   } else {
 565     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 566   }
 567   ss.print("flags: ");
 568   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 569   #define ALL(X) \
 570     X(CREATE_SUSPENDED) \
 571     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 572   ALL(PRINT_FLAG)
 573   #undef ALL
 574   #undef PRINT_FLAG
 575   return buf;
 576 }
 577 
 578 // Allocate and initialize a new OSThread
 579 bool os::create_thread(Thread* thread, ThreadType thr_type,
 580                        size_t stack_size) {
 581   unsigned thread_id;
 582 
 583   // Allocate the OSThread object
 584   OSThread* osthread = new OSThread(NULL, NULL);
 585   if (osthread == NULL) {
 586     return false;
 587   }
 588 
 589   // Initialize support for Java interrupts
 590   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 591   if (interrupt_event == NULL) {
 592     delete osthread;
 593     return NULL;
 594   }
 595   osthread->set_interrupt_event(interrupt_event);
 596   osthread->set_interrupted(false);
 597 
 598   thread->set_osthread(osthread);
 599 
 600   if (stack_size == 0) {
 601     switch (thr_type) {
 602     case os::java_thread:
 603       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 604       if (JavaThread::stack_size_at_create() > 0) {
 605         stack_size = JavaThread::stack_size_at_create();
 606       }
 607       break;
 608     case os::compiler_thread:
 609       if (CompilerThreadStackSize > 0) {
 610         stack_size = (size_t)(CompilerThreadStackSize * K);
 611         break;
 612       } // else fall through:
 613         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 614     case os::vm_thread:
 615     case os::pgc_thread:
 616     case os::cgc_thread:
 617     case os::watcher_thread:
 618       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 619       break;
 620     }
 621   }
 622 
 623   // Create the Win32 thread
 624   //
 625   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 626   // does not specify stack size. Instead, it specifies the size of
 627   // initially committed space. The stack size is determined by
 628   // PE header in the executable. If the committed "stack_size" is larger
 629   // than default value in the PE header, the stack is rounded up to the
 630   // nearest multiple of 1MB. For example if the launcher has default
 631   // stack size of 320k, specifying any size less than 320k does not
 632   // affect the actual stack size at all, it only affects the initial
 633   // commitment. On the other hand, specifying 'stack_size' larger than
 634   // default value may cause significant increase in memory usage, because
 635   // not only the stack space will be rounded up to MB, but also the
 636   // entire space is committed upfront.
 637   //
 638   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 639   // for CreateThread() that can treat 'stack_size' as stack size. However we
 640   // are not supposed to call CreateThread() directly according to MSDN
 641   // document because JVM uses C runtime library. The good news is that the
 642   // flag appears to work with _beginthredex() as well.
 643 
 644   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 645   HANDLE thread_handle =
 646     (HANDLE)_beginthreadex(NULL,
 647                            (unsigned)stack_size,
 648                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 649                            thread,
 650                            initflag,
 651                            &thread_id);
 652 
 653   char buf[64];
 654   if (thread_handle != NULL) {
 655     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 656       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 657   } else {
 658     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 659       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 660   }
 661 
 662   if (thread_handle == NULL) {
 663     // Need to clean up stuff we've allocated so far
 664     CloseHandle(osthread->interrupt_event());
 665     thread->set_osthread(NULL);
 666     delete osthread;
 667     return NULL;
 668   }
 669 
 670   Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
 671 
 672   // Store info on the Win32 thread into the OSThread
 673   osthread->set_thread_handle(thread_handle);
 674   osthread->set_thread_id(thread_id);
 675 
 676   // Initial thread state is INITIALIZED, not SUSPENDED
 677   osthread->set_state(INITIALIZED);
 678 
 679   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 680   return true;
 681 }
 682 
 683 
 684 // Free Win32 resources related to the OSThread
 685 void os::free_thread(OSThread* osthread) {
 686   assert(osthread != NULL, "osthread not set");
 687 
 688   // We are told to free resources of the argument thread,
 689   // but we can only really operate on the current thread.
 690   assert(Thread::current()->osthread() == osthread,
 691          "os::free_thread but not current thread");
 692 
 693   CloseHandle(osthread->thread_handle());
 694   CloseHandle(osthread->interrupt_event());
 695   delete osthread;
 696 }
 697 
 698 static jlong first_filetime;
 699 static jlong initial_performance_count;
 700 static jlong performance_frequency;
 701 
 702 
 703 jlong as_long(LARGE_INTEGER x) {
 704   jlong result = 0; // initialization to avoid warning
 705   set_high(&result, x.HighPart);
 706   set_low(&result, x.LowPart);
 707   return result;
 708 }
 709 
 710 
 711 jlong os::elapsed_counter() {
 712   LARGE_INTEGER count;
 713   QueryPerformanceCounter(&count);
 714   return as_long(count) - initial_performance_count;
 715 }
 716 
 717 
 718 jlong os::elapsed_frequency() {
 719   return performance_frequency;
 720 }
 721 
 722 
 723 julong os::available_memory() {
 724   return win32::available_memory();
 725 }
 726 
 727 julong os::win32::available_memory() {
 728   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 729   // value if total memory is larger than 4GB
 730   MEMORYSTATUSEX ms;
 731   ms.dwLength = sizeof(ms);
 732   GlobalMemoryStatusEx(&ms);
 733 
 734   return (julong)ms.ullAvailPhys;
 735 }
 736 
 737 julong os::physical_memory() {
 738   return win32::physical_memory();
 739 }
 740 
 741 bool os::has_allocatable_memory_limit(julong* limit) {
 742   MEMORYSTATUSEX ms;
 743   ms.dwLength = sizeof(ms);
 744   GlobalMemoryStatusEx(&ms);
 745 #ifdef _LP64
 746   *limit = (julong)ms.ullAvailVirtual;
 747   return true;
 748 #else
 749   // Limit to 1400m because of the 2gb address space wall
 750   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 751   return true;
 752 #endif
 753 }
 754 
 755 int os::active_processor_count() {
 756   DWORD_PTR lpProcessAffinityMask = 0;
 757   DWORD_PTR lpSystemAffinityMask = 0;
 758   int proc_count = processor_count();
 759   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 760       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 761     // Nof active processors is number of bits in process affinity mask
 762     int bitcount = 0;
 763     while (lpProcessAffinityMask != 0) {
 764       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 765       bitcount++;
 766     }
 767     return bitcount;
 768   } else {
 769     return proc_count;
 770   }
 771 }
 772 
 773 void os::set_native_thread_name(const char *name) {
 774 
 775   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 776   //
 777   // Note that unfortunately this only works if the process
 778   // is already attached to a debugger; debugger must observe
 779   // the exception below to show the correct name.
 780 
 781   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 782   struct {
 783     DWORD dwType;     // must be 0x1000
 784     LPCSTR szName;    // pointer to name (in user addr space)
 785     DWORD dwThreadID; // thread ID (-1=caller thread)
 786     DWORD dwFlags;    // reserved for future use, must be zero
 787   } info;
 788 
 789   info.dwType = 0x1000;
 790   info.szName = name;
 791   info.dwThreadID = -1;
 792   info.dwFlags = 0;
 793 
 794   __try {
 795     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 796   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 797 }
 798 
 799 bool os::distribute_processes(uint length, uint* distribution) {
 800   // Not yet implemented.
 801   return false;
 802 }
 803 
 804 bool os::bind_to_processor(uint processor_id) {
 805   // Not yet implemented.
 806   return false;
 807 }
 808 
 809 void os::win32::initialize_performance_counter() {
 810   LARGE_INTEGER count;
 811   QueryPerformanceFrequency(&count);
 812   performance_frequency = as_long(count);
 813   QueryPerformanceCounter(&count);
 814   initial_performance_count = as_long(count);
 815 }
 816 
 817 
 818 double os::elapsedTime() {
 819   return (double) elapsed_counter() / (double) elapsed_frequency();
 820 }
 821 
 822 
 823 // Windows format:
 824 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 825 // Java format:
 826 //   Java standards require the number of milliseconds since 1/1/1970
 827 
 828 // Constant offset - calculated using offset()
 829 static jlong  _offset   = 116444736000000000;
 830 // Fake time counter for reproducible results when debugging
 831 static jlong  fake_time = 0;
 832 
 833 #ifdef ASSERT
 834 // Just to be safe, recalculate the offset in debug mode
 835 static jlong _calculated_offset = 0;
 836 static int   _has_calculated_offset = 0;
 837 
 838 jlong offset() {
 839   if (_has_calculated_offset) return _calculated_offset;
 840   SYSTEMTIME java_origin;
 841   java_origin.wYear          = 1970;
 842   java_origin.wMonth         = 1;
 843   java_origin.wDayOfWeek     = 0; // ignored
 844   java_origin.wDay           = 1;
 845   java_origin.wHour          = 0;
 846   java_origin.wMinute        = 0;
 847   java_origin.wSecond        = 0;
 848   java_origin.wMilliseconds  = 0;
 849   FILETIME jot;
 850   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 851     fatal("Error = %d\nWindows error", GetLastError());
 852   }
 853   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 854   _has_calculated_offset = 1;
 855   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 856   return _calculated_offset;
 857 }
 858 #else
 859 jlong offset() {
 860   return _offset;
 861 }
 862 #endif
 863 
 864 jlong windows_to_java_time(FILETIME wt) {
 865   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 866   return (a - offset()) / 10000;
 867 }
 868 
 869 // Returns time ticks in (10th of micro seconds)
 870 jlong windows_to_time_ticks(FILETIME wt) {
 871   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 872   return (a - offset());
 873 }
 874 
 875 FILETIME java_to_windows_time(jlong l) {
 876   jlong a = (l * 10000) + offset();
 877   FILETIME result;
 878   result.dwHighDateTime = high(a);
 879   result.dwLowDateTime  = low(a);
 880   return result;
 881 }
 882 
 883 bool os::supports_vtime() { return true; }
 884 bool os::enable_vtime() { return false; }
 885 bool os::vtime_enabled() { return false; }
 886 
 887 double os::elapsedVTime() {
 888   FILETIME created;
 889   FILETIME exited;
 890   FILETIME kernel;
 891   FILETIME user;
 892   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 893     // the resolution of windows_to_java_time() should be sufficient (ms)
 894     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 895   } else {
 896     return elapsedTime();
 897   }
 898 }
 899 
 900 jlong os::javaTimeMillis() {
 901   if (UseFakeTimers) {
 902     return fake_time++;
 903   } else {
 904     FILETIME wt;
 905     GetSystemTimeAsFileTime(&wt);
 906     return windows_to_java_time(wt);
 907   }
 908 }
 909 
 910 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 911   FILETIME wt;
 912   GetSystemTimeAsFileTime(&wt);
 913   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 914   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 915   seconds = secs;
 916   nanos = jlong(ticks - (secs*10000000)) * 100;
 917 }
 918 
 919 jlong os::javaTimeNanos() {
 920     LARGE_INTEGER current_count;
 921     QueryPerformanceCounter(&current_count);
 922     double current = as_long(current_count);
 923     double freq = performance_frequency;
 924     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 925     return time;
 926 }
 927 
 928 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 929   jlong freq = performance_frequency;
 930   if (freq < NANOSECS_PER_SEC) {
 931     // the performance counter is 64 bits and we will
 932     // be multiplying it -- so no wrap in 64 bits
 933     info_ptr->max_value = ALL_64_BITS;
 934   } else if (freq > NANOSECS_PER_SEC) {
 935     // use the max value the counter can reach to
 936     // determine the max value which could be returned
 937     julong max_counter = (julong)ALL_64_BITS;
 938     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 939   } else {
 940     // the performance counter is 64 bits and we will
 941     // be using it directly -- so no wrap in 64 bits
 942     info_ptr->max_value = ALL_64_BITS;
 943   }
 944 
 945   // using a counter, so no skipping
 946   info_ptr->may_skip_backward = false;
 947   info_ptr->may_skip_forward = false;
 948 
 949   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 950 }
 951 
 952 char* os::local_time_string(char *buf, size_t buflen) {
 953   SYSTEMTIME st;
 954   GetLocalTime(&st);
 955   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 956                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 957   return buf;
 958 }
 959 
 960 bool os::getTimesSecs(double* process_real_time,
 961                       double* process_user_time,
 962                       double* process_system_time) {
 963   HANDLE h_process = GetCurrentProcess();
 964   FILETIME create_time, exit_time, kernel_time, user_time;
 965   BOOL result = GetProcessTimes(h_process,
 966                                 &create_time,
 967                                 &exit_time,
 968                                 &kernel_time,
 969                                 &user_time);
 970   if (result != 0) {
 971     FILETIME wt;
 972     GetSystemTimeAsFileTime(&wt);
 973     jlong rtc_millis = windows_to_java_time(wt);
 974     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 975     *process_user_time =
 976       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 977     *process_system_time =
 978       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
 979     return true;
 980   } else {
 981     return false;
 982   }
 983 }
 984 
 985 void os::shutdown() {
 986   // allow PerfMemory to attempt cleanup of any persistent resources
 987   perfMemory_exit();
 988 
 989   // flush buffered output, finish log files
 990   ostream_abort();
 991 
 992   // Check for abort hook
 993   abort_hook_t abort_hook = Arguments::abort_hook();
 994   if (abort_hook != NULL) {
 995     abort_hook();
 996   }
 997 }
 998 
 999 
1000 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1001                                          PMINIDUMP_EXCEPTION_INFORMATION,
1002                                          PMINIDUMP_USER_STREAM_INFORMATION,
1003                                          PMINIDUMP_CALLBACK_INFORMATION);
1004 
1005 static HANDLE dumpFile = NULL;
1006 
1007 // Check if dump file can be created.
1008 void os::check_dump_limit(char* buffer, size_t buffsz) {
1009   bool status = true;
1010   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1011     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1012     status = false;
1013   }
1014 
1015 #ifndef ASSERT
1016   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1017     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1018     status = false;
1019   }
1020 #endif
1021 
1022   if (status) {
1023     const char* cwd = get_current_directory(NULL, 0);
1024     int pid = current_process_id();
1025     if (cwd != NULL) {
1026       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1027     } else {
1028       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1029     }
1030 
1031     if (dumpFile == NULL &&
1032        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1033                  == INVALID_HANDLE_VALUE) {
1034       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1035       status = false;
1036     }
1037   }
1038   VMError::record_coredump_status(buffer, status);
1039 }
1040 
1041 void os::abort(bool dump_core, void* siginfo, const void* context) {
1042   HINSTANCE dbghelp;
1043   EXCEPTION_POINTERS ep;
1044   MINIDUMP_EXCEPTION_INFORMATION mei;
1045   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1046 
1047   HANDLE hProcess = GetCurrentProcess();
1048   DWORD processId = GetCurrentProcessId();
1049   MINIDUMP_TYPE dumpType;
1050 
1051   shutdown();
1052   if (!dump_core || dumpFile == NULL) {
1053     if (dumpFile != NULL) {
1054       CloseHandle(dumpFile);
1055     }
1056     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1057   }
1058 
1059   dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
1060 
1061   if (dbghelp == NULL) {
1062     jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
1063     CloseHandle(dumpFile);
1064     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1065   }
1066 
1067   _MiniDumpWriteDump =
1068       CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1069                                     PMINIDUMP_EXCEPTION_INFORMATION,
1070                                     PMINIDUMP_USER_STREAM_INFORMATION,
1071                                     PMINIDUMP_CALLBACK_INFORMATION),
1072                                     GetProcAddress(dbghelp,
1073                                     "MiniDumpWriteDump"));
1074 
1075   if (_MiniDumpWriteDump == NULL) {
1076     jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
1077     CloseHandle(dumpFile);
1078     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1079   }
1080 
1081   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1082     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1083 
1084   if (siginfo != NULL && context != NULL) {
1085     ep.ContextRecord = (PCONTEXT) context;
1086     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1087 
1088     mei.ThreadId = GetCurrentThreadId();
1089     mei.ExceptionPointers = &ep;
1090     pmei = &mei;
1091   } else {
1092     pmei = NULL;
1093   }
1094 
1095   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1096   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1097   if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1098       _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1099     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1100   }
1101   CloseHandle(dumpFile);
1102   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1103 }
1104 
1105 // Die immediately, no exit hook, no abort hook, no cleanup.
1106 void os::die() {
1107   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1108 }
1109 
1110 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1111 //  * dirent_md.c       1.15 00/02/02
1112 //
1113 // The declarations for DIR and struct dirent are in jvm_win32.h.
1114 
1115 // Caller must have already run dirname through JVM_NativePath, which removes
1116 // duplicate slashes and converts all instances of '/' into '\\'.
1117 
1118 DIR * os::opendir(const char *dirname) {
1119   assert(dirname != NULL, "just checking");   // hotspot change
1120   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1121   DWORD fattr;                                // hotspot change
1122   char alt_dirname[4] = { 0, 0, 0, 0 };
1123 
1124   if (dirp == 0) {
1125     errno = ENOMEM;
1126     return 0;
1127   }
1128 
1129   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1130   // as a directory in FindFirstFile().  We detect this case here and
1131   // prepend the current drive name.
1132   //
1133   if (dirname[1] == '\0' && dirname[0] == '\\') {
1134     alt_dirname[0] = _getdrive() + 'A' - 1;
1135     alt_dirname[1] = ':';
1136     alt_dirname[2] = '\\';
1137     alt_dirname[3] = '\0';
1138     dirname = alt_dirname;
1139   }
1140 
1141   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1142   if (dirp->path == 0) {
1143     free(dirp);
1144     errno = ENOMEM;
1145     return 0;
1146   }
1147   strcpy(dirp->path, dirname);
1148 
1149   fattr = GetFileAttributes(dirp->path);
1150   if (fattr == 0xffffffff) {
1151     free(dirp->path);
1152     free(dirp);
1153     errno = ENOENT;
1154     return 0;
1155   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1156     free(dirp->path);
1157     free(dirp);
1158     errno = ENOTDIR;
1159     return 0;
1160   }
1161 
1162   // Append "*.*", or possibly "\\*.*", to path
1163   if (dirp->path[1] == ':' &&
1164       (dirp->path[2] == '\0' ||
1165       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1166     // No '\\' needed for cases like "Z:" or "Z:\"
1167     strcat(dirp->path, "*.*");
1168   } else {
1169     strcat(dirp->path, "\\*.*");
1170   }
1171 
1172   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1173   if (dirp->handle == INVALID_HANDLE_VALUE) {
1174     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1175       free(dirp->path);
1176       free(dirp);
1177       errno = EACCES;
1178       return 0;
1179     }
1180   }
1181   return dirp;
1182 }
1183 
1184 // parameter dbuf unused on Windows
1185 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) {
1186   assert(dirp != NULL, "just checking");      // hotspot change
1187   if (dirp->handle == INVALID_HANDLE_VALUE) {
1188     return 0;
1189   }
1190 
1191   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1192 
1193   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1194     if (GetLastError() == ERROR_INVALID_HANDLE) {
1195       errno = EBADF;
1196       return 0;
1197     }
1198     FindClose(dirp->handle);
1199     dirp->handle = INVALID_HANDLE_VALUE;
1200   }
1201 
1202   return &dirp->dirent;
1203 }
1204 
1205 int os::closedir(DIR *dirp) {
1206   assert(dirp != NULL, "just checking");      // hotspot change
1207   if (dirp->handle != INVALID_HANDLE_VALUE) {
1208     if (!FindClose(dirp->handle)) {
1209       errno = EBADF;
1210       return -1;
1211     }
1212     dirp->handle = INVALID_HANDLE_VALUE;
1213   }
1214   free(dirp->path);
1215   free(dirp);
1216   return 0;
1217 }
1218 
1219 // This must be hard coded because it's the system's temporary
1220 // directory not the java application's temp directory, ala java.io.tmpdir.
1221 const char* os::get_temp_directory() {
1222   static char path_buf[MAX_PATH];
1223   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1224     return path_buf;
1225   } else {
1226     path_buf[0] = '\0';
1227     return path_buf;
1228   }
1229 }
1230 
1231 static bool file_exists(const char* filename) {
1232   if (filename == NULL || strlen(filename) == 0) {
1233     return false;
1234   }
1235   return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1236 }
1237 
1238 bool os::dll_build_name(char *buffer, size_t buflen,
1239                         const char* pname, const char* fname) {
1240   bool retval = false;
1241   const size_t pnamelen = pname ? strlen(pname) : 0;
1242   const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1243 
1244   // Return error on buffer overflow.
1245   if (pnamelen + strlen(fname) + 10 > buflen) {
1246     return retval;
1247   }
1248 
1249   if (pnamelen == 0) {
1250     jio_snprintf(buffer, buflen, "%s.dll", fname);
1251     retval = true;
1252   } else if (c == ':' || c == '\\') {
1253     jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1254     retval = true;
1255   } else if (strchr(pname, *os::path_separator()) != NULL) {
1256     int n;
1257     char** pelements = split_path(pname, &n);
1258     if (pelements == NULL) {
1259       return false;
1260     }
1261     for (int i = 0; i < n; i++) {
1262       char* path = pelements[i];
1263       // Really shouldn't be NULL, but check can't hurt
1264       size_t plen = (path == NULL) ? 0 : strlen(path);
1265       if (plen == 0) {
1266         continue; // skip the empty path values
1267       }
1268       const char lastchar = path[plen - 1];
1269       if (lastchar == ':' || lastchar == '\\') {
1270         jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1271       } else {
1272         jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1273       }
1274       if (file_exists(buffer)) {
1275         retval = true;
1276         break;
1277       }
1278     }
1279     // release the storage
1280     for (int i = 0; i < n; i++) {
1281       if (pelements[i] != NULL) {
1282         FREE_C_HEAP_ARRAY(char, pelements[i]);
1283       }
1284     }
1285     if (pelements != NULL) {
1286       FREE_C_HEAP_ARRAY(char*, pelements);
1287     }
1288   } else {
1289     jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1290     retval = true;
1291   }
1292   return retval;
1293 }
1294 
1295 // Needs to be in os specific directory because windows requires another
1296 // header file <direct.h>
1297 const char* os::get_current_directory(char *buf, size_t buflen) {
1298   int n = static_cast<int>(buflen);
1299   if (buflen > INT_MAX)  n = INT_MAX;
1300   return _getcwd(buf, n);
1301 }
1302 
1303 //-----------------------------------------------------------
1304 // Helper functions for fatal error handler
1305 #ifdef _WIN64
1306 // Helper routine which returns true if address in
1307 // within the NTDLL address space.
1308 //
1309 static bool _addr_in_ntdll(address addr) {
1310   HMODULE hmod;
1311   MODULEINFO minfo;
1312 
1313   hmod = GetModuleHandle("NTDLL.DLL");
1314   if (hmod == NULL) return false;
1315   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1316                                           &minfo, sizeof(MODULEINFO))) {
1317     return false;
1318   }
1319 
1320   if ((addr >= minfo.lpBaseOfDll) &&
1321       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1322     return true;
1323   } else {
1324     return false;
1325   }
1326 }
1327 #endif
1328 
1329 struct _modinfo {
1330   address addr;
1331   char*   full_path;   // point to a char buffer
1332   int     buflen;      // size of the buffer
1333   address base_addr;
1334 };
1335 
1336 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1337                                   address top_address, void * param) {
1338   struct _modinfo *pmod = (struct _modinfo *)param;
1339   if (!pmod) return -1;
1340 
1341   if (base_addr   <= pmod->addr &&
1342       top_address > pmod->addr) {
1343     // if a buffer is provided, copy path name to the buffer
1344     if (pmod->full_path) {
1345       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1346     }
1347     pmod->base_addr = base_addr;
1348     return 1;
1349   }
1350   return 0;
1351 }
1352 
1353 bool os::dll_address_to_library_name(address addr, char* buf,
1354                                      int buflen, int* offset) {
1355   // buf is not optional, but offset is optional
1356   assert(buf != NULL, "sanity check");
1357 
1358 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1359 //       return the full path to the DLL file, sometimes it returns path
1360 //       to the corresponding PDB file (debug info); sometimes it only
1361 //       returns partial path, which makes life painful.
1362 
1363   struct _modinfo mi;
1364   mi.addr      = addr;
1365   mi.full_path = buf;
1366   mi.buflen    = buflen;
1367   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1368     // buf already contains path name
1369     if (offset) *offset = addr - mi.base_addr;
1370     return true;
1371   }
1372 
1373   buf[0] = '\0';
1374   if (offset) *offset = -1;
1375   return false;
1376 }
1377 
1378 bool os::dll_address_to_function_name(address addr, char *buf,
1379                                       int buflen, int *offset,
1380                                       bool demangle) {
1381   // buf is not optional, but offset is optional
1382   assert(buf != NULL, "sanity check");
1383 
1384   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1385     return true;
1386   }
1387   if (offset != NULL)  *offset  = -1;
1388   buf[0] = '\0';
1389   return false;
1390 }
1391 
1392 // save the start and end address of jvm.dll into param[0] and param[1]
1393 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1394                            address top_address, void * param) {
1395   if (!param) return -1;
1396 
1397   if (base_addr   <= (address)_locate_jvm_dll &&
1398       top_address > (address)_locate_jvm_dll) {
1399     ((address*)param)[0] = base_addr;
1400     ((address*)param)[1] = top_address;
1401     return 1;
1402   }
1403   return 0;
1404 }
1405 
1406 address vm_lib_location[2];    // start and end address of jvm.dll
1407 
1408 // check if addr is inside jvm.dll
1409 bool os::address_is_in_vm(address addr) {
1410   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1411     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1412       assert(false, "Can't find jvm module.");
1413       return false;
1414     }
1415   }
1416 
1417   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1418 }
1419 
1420 // print module info; param is outputStream*
1421 static int _print_module(const char* fname, address base_address,
1422                          address top_address, void* param) {
1423   if (!param) return -1;
1424 
1425   outputStream* st = (outputStream*)param;
1426 
1427   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1428   return 0;
1429 }
1430 
1431 // Loads .dll/.so and
1432 // in case of error it checks if .dll/.so was built for the
1433 // same architecture as Hotspot is running on
1434 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1435   void * result = LoadLibrary(name);
1436   if (result != NULL) {
1437     return result;
1438   }
1439 
1440   DWORD errcode = GetLastError();
1441   if (errcode == ERROR_MOD_NOT_FOUND) {
1442     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1443     ebuf[ebuflen - 1] = '\0';
1444     return NULL;
1445   }
1446 
1447   // Parsing dll below
1448   // If we can read dll-info and find that dll was built
1449   // for an architecture other than Hotspot is running in
1450   // - then print to buffer "DLL was built for a different architecture"
1451   // else call os::lasterror to obtain system error message
1452 
1453   // Read system error message into ebuf
1454   // It may or may not be overwritten below (in the for loop and just above)
1455   lasterror(ebuf, (size_t) ebuflen);
1456   ebuf[ebuflen - 1] = '\0';
1457   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1458   if (fd < 0) {
1459     return NULL;
1460   }
1461 
1462   uint32_t signature_offset;
1463   uint16_t lib_arch = 0;
1464   bool failed_to_get_lib_arch =
1465     ( // Go to position 3c in the dll
1466      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1467      ||
1468      // Read location of signature
1469      (sizeof(signature_offset) !=
1470      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1471      ||
1472      // Go to COFF File Header in dll
1473      // that is located after "signature" (4 bytes long)
1474      (os::seek_to_file_offset(fd,
1475      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1476      ||
1477      // Read field that contains code of architecture
1478      // that dll was built for
1479      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1480     );
1481 
1482   ::close(fd);
1483   if (failed_to_get_lib_arch) {
1484     // file i/o error - report os::lasterror(...) msg
1485     return NULL;
1486   }
1487 
1488   typedef struct {
1489     uint16_t arch_code;
1490     char* arch_name;
1491   } arch_t;
1492 
1493   static const arch_t arch_array[] = {
1494     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1495     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1496     {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
1497   };
1498 #if   (defined _M_IA64)
1499   static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64;
1500 #elif (defined _M_AMD64)
1501   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1502 #elif (defined _M_IX86)
1503   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1504 #else
1505   #error Method os::dll_load requires that one of following \
1506          is defined :_M_IA64,_M_AMD64 or _M_IX86
1507 #endif
1508 
1509 
1510   // Obtain a string for printf operation
1511   // lib_arch_str shall contain string what platform this .dll was built for
1512   // running_arch_str shall string contain what platform Hotspot was built for
1513   char *running_arch_str = NULL, *lib_arch_str = NULL;
1514   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1515     if (lib_arch == arch_array[i].arch_code) {
1516       lib_arch_str = arch_array[i].arch_name;
1517     }
1518     if (running_arch == arch_array[i].arch_code) {
1519       running_arch_str = arch_array[i].arch_name;
1520     }
1521   }
1522 
1523   assert(running_arch_str,
1524          "Didn't find running architecture code in arch_array");
1525 
1526   // If the architecture is right
1527   // but some other error took place - report os::lasterror(...) msg
1528   if (lib_arch == running_arch) {
1529     return NULL;
1530   }
1531 
1532   if (lib_arch_str != NULL) {
1533     ::_snprintf(ebuf, ebuflen - 1,
1534                 "Can't load %s-bit .dll on a %s-bit platform",
1535                 lib_arch_str, running_arch_str);
1536   } else {
1537     // don't know what architecture this dll was build for
1538     ::_snprintf(ebuf, ebuflen - 1,
1539                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1540                 lib_arch, running_arch_str);
1541   }
1542 
1543   return NULL;
1544 }
1545 
1546 void os::print_dll_info(outputStream *st) {
1547   st->print_cr("Dynamic libraries:");
1548   get_loaded_modules_info(_print_module, (void *)st);
1549 }
1550 
1551 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1552   HANDLE   hProcess;
1553 
1554 # define MAX_NUM_MODULES 128
1555   HMODULE     modules[MAX_NUM_MODULES];
1556   static char filename[MAX_PATH];
1557   int         result = 0;
1558 
1559   int pid = os::current_process_id();
1560   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1561                          FALSE, pid);
1562   if (hProcess == NULL) return 0;
1563 
1564   DWORD size_needed;
1565   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1566     CloseHandle(hProcess);
1567     return 0;
1568   }
1569 
1570   // number of modules that are currently loaded
1571   int num_modules = size_needed / sizeof(HMODULE);
1572 
1573   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1574     // Get Full pathname:
1575     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1576       filename[0] = '\0';
1577     }
1578 
1579     MODULEINFO modinfo;
1580     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1581       modinfo.lpBaseOfDll = NULL;
1582       modinfo.SizeOfImage = 0;
1583     }
1584 
1585     // Invoke callback function
1586     result = callback(filename, (address)modinfo.lpBaseOfDll,
1587                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1588     if (result) break;
1589   }
1590 
1591   CloseHandle(hProcess);
1592   return result;
1593 }
1594 
1595 bool os::get_host_name(char* buf, size_t buflen) {
1596   DWORD size = (DWORD)buflen;
1597   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1598 }
1599 
1600 void os::get_summary_os_info(char* buf, size_t buflen) {
1601   stringStream sst(buf, buflen);
1602   os::win32::print_windows_version(&sst);
1603   // chop off newline character
1604   char* nl = strchr(buf, '\n');
1605   if (nl != NULL) *nl = '\0';
1606 }
1607 
1608 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1609   int ret = vsnprintf(buf, len, fmt, args);
1610   // Get the correct buffer size if buf is too small
1611   if (ret < 0) {
1612     return _vscprintf(fmt, args);
1613   }
1614   return ret;
1615 }
1616 
1617 static inline time_t get_mtime(const char* filename) {
1618   struct stat st;
1619   int ret = os::stat(filename, &st);
1620   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
1621   return st.st_mtime;
1622 }
1623 
1624 int os::compare_file_modified_times(const char* file1, const char* file2) {
1625   time_t t1 = get_mtime(file1);
1626   time_t t2 = get_mtime(file2);
1627   return t1 - t2;
1628 }
1629 
1630 void os::print_os_info_brief(outputStream* st) {
1631   os::print_os_info(st);
1632 }
1633 
1634 void os::print_os_info(outputStream* st) {
1635 #ifdef ASSERT
1636   char buffer[1024];
1637   st->print("HostName: ");
1638   if (get_host_name(buffer, sizeof(buffer))) {
1639     st->print("%s ", buffer);
1640   } else {
1641     st->print("N/A ");
1642   }
1643 #endif
1644   st->print("OS:");
1645   os::win32::print_windows_version(st);
1646 }
1647 
1648 void os::win32::print_windows_version(outputStream* st) {
1649   OSVERSIONINFOEX osvi;
1650   VS_FIXEDFILEINFO *file_info;
1651   TCHAR kernel32_path[MAX_PATH];
1652   UINT len, ret;
1653 
1654   // Use the GetVersionEx information to see if we're on a server or
1655   // workstation edition of Windows. Starting with Windows 8.1 we can't
1656   // trust the OS version information returned by this API.
1657   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1658   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1659   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1660     st->print_cr("Call to GetVersionEx failed");
1661     return;
1662   }
1663   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1664 
1665   // Get the full path to \Windows\System32\kernel32.dll and use that for
1666   // determining what version of Windows we're running on.
1667   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1668   ret = GetSystemDirectory(kernel32_path, len);
1669   if (ret == 0 || ret > len) {
1670     st->print_cr("Call to GetSystemDirectory failed");
1671     return;
1672   }
1673   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1674 
1675   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1676   if (version_size == 0) {
1677     st->print_cr("Call to GetFileVersionInfoSize failed");
1678     return;
1679   }
1680 
1681   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1682   if (version_info == NULL) {
1683     st->print_cr("Failed to allocate version_info");
1684     return;
1685   }
1686 
1687   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1688     os::free(version_info);
1689     st->print_cr("Call to GetFileVersionInfo failed");
1690     return;
1691   }
1692 
1693   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1694     os::free(version_info);
1695     st->print_cr("Call to VerQueryValue failed");
1696     return;
1697   }
1698 
1699   int major_version = HIWORD(file_info->dwProductVersionMS);
1700   int minor_version = LOWORD(file_info->dwProductVersionMS);
1701   int build_number = HIWORD(file_info->dwProductVersionLS);
1702   int build_minor = LOWORD(file_info->dwProductVersionLS);
1703   int os_vers = major_version * 1000 + minor_version;
1704   os::free(version_info);
1705 
1706   st->print(" Windows ");
1707   switch (os_vers) {
1708 
1709   case 6000:
1710     if (is_workstation) {
1711       st->print("Vista");
1712     } else {
1713       st->print("Server 2008");
1714     }
1715     break;
1716 
1717   case 6001:
1718     if (is_workstation) {
1719       st->print("7");
1720     } else {
1721       st->print("Server 2008 R2");
1722     }
1723     break;
1724 
1725   case 6002:
1726     if (is_workstation) {
1727       st->print("8");
1728     } else {
1729       st->print("Server 2012");
1730     }
1731     break;
1732 
1733   case 6003:
1734     if (is_workstation) {
1735       st->print("8.1");
1736     } else {
1737       st->print("Server 2012 R2");
1738     }
1739     break;
1740 
1741   case 10000:
1742     if (is_workstation) {
1743       st->print("10");
1744     } else {
1745       st->print("Server 2016");
1746     }
1747     break;
1748 
1749   default:
1750     // Unrecognized windows, print out its major and minor versions
1751     st->print("%d.%d", major_version, minor_version);
1752     break;
1753   }
1754 
1755   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1756   // find out whether we are running on 64 bit processor or not
1757   SYSTEM_INFO si;
1758   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1759   GetNativeSystemInfo(&si);
1760   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1761     st->print(" , 64 bit");
1762   }
1763 
1764   st->print(" Build %d", build_number);
1765   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1766   st->cr();
1767 }
1768 
1769 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1770   // Nothing to do for now.
1771 }
1772 
1773 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1774   HKEY key;
1775   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1776                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1777   if (status == ERROR_SUCCESS) {
1778     DWORD size = (DWORD)buflen;
1779     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1780     if (status != ERROR_SUCCESS) {
1781         strncpy(buf, "## __CPU__", buflen);
1782     }
1783     RegCloseKey(key);
1784   } else {
1785     // Put generic cpu info to return
1786     strncpy(buf, "## __CPU__", buflen);
1787   }
1788 }
1789 
1790 void os::print_memory_info(outputStream* st) {
1791   st->print("Memory:");
1792   st->print(" %dk page", os::vm_page_size()>>10);
1793 
1794   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1795   // value if total memory is larger than 4GB
1796   MEMORYSTATUSEX ms;
1797   ms.dwLength = sizeof(ms);
1798   GlobalMemoryStatusEx(&ms);
1799 
1800   st->print(", physical %uk", os::physical_memory() >> 10);
1801   st->print("(%uk free)", os::available_memory() >> 10);
1802 
1803   st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1804   st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1805   st->cr();
1806 }
1807 
1808 void os::print_siginfo(outputStream *st, const void* siginfo) {
1809   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1810   st->print("siginfo:");
1811 
1812   char tmp[64];
1813   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1814     strcpy(tmp, "EXCEPTION_??");
1815   }
1816   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1817 
1818   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1819        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1820        er->NumberParameters >= 2) {
1821     switch (er->ExceptionInformation[0]) {
1822     case 0: st->print(", reading address"); break;
1823     case 1: st->print(", writing address"); break;
1824     case 8: st->print(", data execution prevention violation at address"); break;
1825     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1826                        er->ExceptionInformation[0]);
1827     }
1828     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1829   } else {
1830     int num = er->NumberParameters;
1831     if (num > 0) {
1832       st->print(", ExceptionInformation=");
1833       for (int i = 0; i < num; i++) {
1834         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1835       }
1836     }
1837   }
1838   st->cr();
1839 }
1840 
1841 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1842   // do nothing
1843 }
1844 
1845 static char saved_jvm_path[MAX_PATH] = {0};
1846 
1847 // Find the full path to the current module, jvm.dll
1848 void os::jvm_path(char *buf, jint buflen) {
1849   // Error checking.
1850   if (buflen < MAX_PATH) {
1851     assert(false, "must use a large-enough buffer");
1852     buf[0] = '\0';
1853     return;
1854   }
1855   // Lazy resolve the path to current module.
1856   if (saved_jvm_path[0] != 0) {
1857     strcpy(buf, saved_jvm_path);
1858     return;
1859   }
1860 
1861   buf[0] = '\0';
1862   if (Arguments::sun_java_launcher_is_altjvm()) {
1863     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1864     // for a JAVA_HOME environment variable and fix up the path so it
1865     // looks like jvm.dll is installed there (append a fake suffix
1866     // hotspot/jvm.dll).
1867     char* java_home_var = ::getenv("JAVA_HOME");
1868     if (java_home_var != NULL && java_home_var[0] != 0 &&
1869         strlen(java_home_var) < (size_t)buflen) {
1870       strncpy(buf, java_home_var, buflen);
1871 
1872       // determine if this is a legacy image or modules image
1873       // modules image doesn't have "jre" subdirectory
1874       size_t len = strlen(buf);
1875       char* jrebin_p = buf + len;
1876       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1877       if (0 != _access(buf, 0)) {
1878         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1879       }
1880       len = strlen(buf);
1881       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1882     }
1883   }
1884 
1885   if (buf[0] == '\0') {
1886     GetModuleFileName(vm_lib_handle, buf, buflen);
1887   }
1888   strncpy(saved_jvm_path, buf, MAX_PATH);
1889   saved_jvm_path[MAX_PATH - 1] = '\0';
1890 }
1891 
1892 
1893 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1894 #ifndef _WIN64
1895   st->print("_");
1896 #endif
1897 }
1898 
1899 
1900 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1901 #ifndef _WIN64
1902   st->print("@%d", args_size  * sizeof(int));
1903 #endif
1904 }
1905 
1906 // This method is a copy of JDK's sysGetLastErrorString
1907 // from src/windows/hpi/src/system_md.c
1908 
1909 size_t os::lasterror(char* buf, size_t len) {
1910   DWORD errval;
1911 
1912   if ((errval = GetLastError()) != 0) {
1913     // DOS error
1914     size_t n = (size_t)FormatMessage(
1915                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1916                                      NULL,
1917                                      errval,
1918                                      0,
1919                                      buf,
1920                                      (DWORD)len,
1921                                      NULL);
1922     if (n > 3) {
1923       // Drop final '.', CR, LF
1924       if (buf[n - 1] == '\n') n--;
1925       if (buf[n - 1] == '\r') n--;
1926       if (buf[n - 1] == '.') n--;
1927       buf[n] = '\0';
1928     }
1929     return n;
1930   }
1931 
1932   if (errno != 0) {
1933     // C runtime error that has no corresponding DOS error code
1934     const char* s = os::strerror(errno);
1935     size_t n = strlen(s);
1936     if (n >= len) n = len - 1;
1937     strncpy(buf, s, n);
1938     buf[n] = '\0';
1939     return n;
1940   }
1941 
1942   return 0;
1943 }
1944 
1945 int os::get_last_error() {
1946   DWORD error = GetLastError();
1947   if (error == 0) {
1948     error = errno;
1949   }
1950   return (int)error;
1951 }
1952 
1953 WindowsSemaphore::WindowsSemaphore(uint value) {
1954   _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
1955 
1956   guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError());
1957 }
1958 
1959 WindowsSemaphore::~WindowsSemaphore() {
1960   ::CloseHandle(_semaphore);
1961 }
1962 
1963 void WindowsSemaphore::signal(uint count) {
1964   if (count > 0) {
1965     BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
1966 
1967     assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError());
1968   }
1969 }
1970 
1971 void WindowsSemaphore::wait() {
1972   DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
1973   assert(ret != WAIT_FAILED,   "WaitForSingleObject failed with error code: %lu", GetLastError());
1974   assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret);
1975 }
1976 
1977 // sun.misc.Signal
1978 // NOTE that this is a workaround for an apparent kernel bug where if
1979 // a signal handler for SIGBREAK is installed then that signal handler
1980 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1981 // See bug 4416763.
1982 static void (*sigbreakHandler)(int) = NULL;
1983 
1984 static void UserHandler(int sig, void *siginfo, void *context) {
1985   os::signal_notify(sig);
1986   // We need to reinstate the signal handler each time...
1987   os::signal(sig, (void*)UserHandler);
1988 }
1989 
1990 void* os::user_handler() {
1991   return (void*) UserHandler;
1992 }
1993 
1994 void* os::signal(int signal_number, void* handler) {
1995   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1996     void (*oldHandler)(int) = sigbreakHandler;
1997     sigbreakHandler = (void (*)(int)) handler;
1998     return (void*) oldHandler;
1999   } else {
2000     return (void*)::signal(signal_number, (void (*)(int))handler);
2001   }
2002 }
2003 
2004 void os::signal_raise(int signal_number) {
2005   raise(signal_number);
2006 }
2007 
2008 // The Win32 C runtime library maps all console control events other than ^C
2009 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2010 // logoff, and shutdown events.  We therefore install our own console handler
2011 // that raises SIGTERM for the latter cases.
2012 //
2013 static BOOL WINAPI consoleHandler(DWORD event) {
2014   switch (event) {
2015   case CTRL_C_EVENT:
2016     if (is_error_reported()) {
2017       // Ctrl-C is pressed during error reporting, likely because the error
2018       // handler fails to abort. Let VM die immediately.
2019       os::die();
2020     }
2021 
2022     os::signal_raise(SIGINT);
2023     return TRUE;
2024     break;
2025   case CTRL_BREAK_EVENT:
2026     if (sigbreakHandler != NULL) {
2027       (*sigbreakHandler)(SIGBREAK);
2028     }
2029     return TRUE;
2030     break;
2031   case CTRL_LOGOFF_EVENT: {
2032     // Don't terminate JVM if it is running in a non-interactive session,
2033     // such as a service process.
2034     USEROBJECTFLAGS flags;
2035     HANDLE handle = GetProcessWindowStation();
2036     if (handle != NULL &&
2037         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2038         sizeof(USEROBJECTFLAGS), NULL)) {
2039       // If it is a non-interactive session, let next handler to deal
2040       // with it.
2041       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2042         return FALSE;
2043       }
2044     }
2045   }
2046   case CTRL_CLOSE_EVENT:
2047   case CTRL_SHUTDOWN_EVENT:
2048     os::signal_raise(SIGTERM);
2049     return TRUE;
2050     break;
2051   default:
2052     break;
2053   }
2054   return FALSE;
2055 }
2056 
2057 // The following code is moved from os.cpp for making this
2058 // code platform specific, which it is by its very nature.
2059 
2060 // Return maximum OS signal used + 1 for internal use only
2061 // Used as exit signal for signal_thread
2062 int os::sigexitnum_pd() {
2063   return NSIG;
2064 }
2065 
2066 // a counter for each possible signal value, including signal_thread exit signal
2067 static volatile jint pending_signals[NSIG+1] = { 0 };
2068 static HANDLE sig_sem = NULL;
2069 
2070 void os::signal_init_pd() {
2071   // Initialize signal structures
2072   memset((void*)pending_signals, 0, sizeof(pending_signals));
2073 
2074   sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2075 
2076   // Programs embedding the VM do not want it to attempt to receive
2077   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2078   // shutdown hooks mechanism introduced in 1.3.  For example, when
2079   // the VM is run as part of a Windows NT service (i.e., a servlet
2080   // engine in a web server), the correct behavior is for any console
2081   // control handler to return FALSE, not TRUE, because the OS's
2082   // "final" handler for such events allows the process to continue if
2083   // it is a service (while terminating it if it is not a service).
2084   // To make this behavior uniform and the mechanism simpler, we
2085   // completely disable the VM's usage of these console events if -Xrs
2086   // (=ReduceSignalUsage) is specified.  This means, for example, that
2087   // the CTRL-BREAK thread dump mechanism is also disabled in this
2088   // case.  See bugs 4323062, 4345157, and related bugs.
2089 
2090   if (!ReduceSignalUsage) {
2091     // Add a CTRL-C handler
2092     SetConsoleCtrlHandler(consoleHandler, TRUE);
2093   }
2094 }
2095 
2096 void os::signal_notify(int signal_number) {
2097   BOOL ret;
2098   if (sig_sem != NULL) {
2099     Atomic::inc(&pending_signals[signal_number]);
2100     ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2101     assert(ret != 0, "ReleaseSemaphore() failed");
2102   }
2103 }
2104 
2105 static int check_pending_signals(bool wait_for_signal) {
2106   DWORD ret;
2107   while (true) {
2108     for (int i = 0; i < NSIG + 1; i++) {
2109       jint n = pending_signals[i];
2110       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2111         return i;
2112       }
2113     }
2114     if (!wait_for_signal) {
2115       return -1;
2116     }
2117 
2118     JavaThread *thread = JavaThread::current();
2119 
2120     ThreadBlockInVM tbivm(thread);
2121 
2122     bool threadIsSuspended;
2123     do {
2124       thread->set_suspend_equivalent();
2125       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2126       ret = ::WaitForSingleObject(sig_sem, INFINITE);
2127       assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2128 
2129       // were we externally suspended while we were waiting?
2130       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2131       if (threadIsSuspended) {
2132         // The semaphore has been incremented, but while we were waiting
2133         // another thread suspended us. We don't want to continue running
2134         // while suspended because that would surprise the thread that
2135         // suspended us.
2136         ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2137         assert(ret != 0, "ReleaseSemaphore() failed");
2138 
2139         thread->java_suspend_self();
2140       }
2141     } while (threadIsSuspended);
2142   }
2143 }
2144 
2145 int os::signal_lookup() {
2146   return check_pending_signals(false);
2147 }
2148 
2149 int os::signal_wait() {
2150   return check_pending_signals(true);
2151 }
2152 
2153 // Implicit OS exception handling
2154 
2155 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2156                       address handler) {
2157     JavaThread* thread = (JavaThread*) Thread::current_or_null();
2158   // Save pc in thread
2159 #ifdef _M_IA64
2160   // Do not blow up if no thread info available.
2161   if (thread) {
2162     // Saving PRECISE pc (with slot information) in thread.
2163     uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2164     // Convert precise PC into "Unix" format
2165     precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2166     thread->set_saved_exception_pc((address)precise_pc);
2167   }
2168   // Set pc to handler
2169   exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2170   // Clear out psr.ri (= Restart Instruction) in order to continue
2171   // at the beginning of the target bundle.
2172   exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2173   assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2174 #else
2175   #ifdef _M_AMD64
2176   // Do not blow up if no thread info available.
2177   if (thread) {
2178     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2179   }
2180   // Set pc to handler
2181   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2182   #else
2183   // Do not blow up if no thread info available.
2184   if (thread) {
2185     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2186   }
2187   // Set pc to handler
2188   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2189   #endif
2190 #endif
2191 
2192   // Continue the execution
2193   return EXCEPTION_CONTINUE_EXECUTION;
2194 }
2195 
2196 
2197 // Used for PostMortemDump
2198 extern "C" void safepoints();
2199 extern "C" void find(int x);
2200 extern "C" void events();
2201 
2202 // According to Windows API documentation, an illegal instruction sequence should generate
2203 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2204 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2205 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2206 
2207 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2208 
2209 // From "Execution Protection in the Windows Operating System" draft 0.35
2210 // Once a system header becomes available, the "real" define should be
2211 // included or copied here.
2212 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2213 
2214 // Handle NAT Bit consumption on IA64.
2215 #ifdef _M_IA64
2216   #define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
2217 #endif
2218 
2219 // Windows Vista/2008 heap corruption check
2220 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2221 
2222 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2223 // C++ compiler contain this error code. Because this is a compiler-generated
2224 // error, the code is not listed in the Win32 API header files.
2225 // The code is actually a cryptic mnemonic device, with the initial "E"
2226 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2227 // ASCII values of "msc".
2228 
2229 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2230 
2231 #define def_excpt(val) { #val, (val) }
2232 
2233 static const struct { char* name; uint number; } exceptlabels[] = {
2234     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2235     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2236     def_excpt(EXCEPTION_BREAKPOINT),
2237     def_excpt(EXCEPTION_SINGLE_STEP),
2238     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2239     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2240     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2241     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2242     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2243     def_excpt(EXCEPTION_FLT_OVERFLOW),
2244     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2245     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2246     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2247     def_excpt(EXCEPTION_INT_OVERFLOW),
2248     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2249     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2250     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2251     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2252     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2253     def_excpt(EXCEPTION_STACK_OVERFLOW),
2254     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2255     def_excpt(EXCEPTION_GUARD_PAGE),
2256     def_excpt(EXCEPTION_INVALID_HANDLE),
2257     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2258     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2259 #ifdef _M_IA64
2260     , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION)
2261 #endif
2262 };
2263 
2264 #undef def_excpt
2265 
2266 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2267   uint code = static_cast<uint>(exception_code);
2268   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2269     if (exceptlabels[i].number == code) {
2270       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2271       return buf;
2272     }
2273   }
2274 
2275   return NULL;
2276 }
2277 
2278 //-----------------------------------------------------------------------------
2279 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2280   // handle exception caused by idiv; should only happen for -MinInt/-1
2281   // (division by zero is handled explicitly)
2282 #ifdef _M_IA64
2283   assert(0, "Fix Handle_IDiv_Exception");
2284 #else
2285   #ifdef  _M_AMD64
2286   PCONTEXT ctx = exceptionInfo->ContextRecord;
2287   address pc = (address)ctx->Rip;
2288   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2289   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2290   if (pc[0] == 0xF7) {
2291     // set correct result values and continue after idiv instruction
2292     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2293   } else {
2294     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2295   }
2296   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2297   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2298   // idiv opcode (0xF7).
2299   ctx->Rdx = (DWORD)0;             // remainder
2300   // Continue the execution
2301   #else
2302   PCONTEXT ctx = exceptionInfo->ContextRecord;
2303   address pc = (address)ctx->Eip;
2304   assert(pc[0] == 0xF7, "not an idiv opcode");
2305   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2306   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2307   // set correct result values and continue after idiv instruction
2308   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2309   ctx->Eax = (DWORD)min_jint;      // result
2310   ctx->Edx = (DWORD)0;             // remainder
2311   // Continue the execution
2312   #endif
2313 #endif
2314   return EXCEPTION_CONTINUE_EXECUTION;
2315 }
2316 
2317 //-----------------------------------------------------------------------------
2318 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2319   PCONTEXT ctx = exceptionInfo->ContextRecord;
2320 #ifndef  _WIN64
2321   // handle exception caused by native method modifying control word
2322   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2323 
2324   switch (exception_code) {
2325   case EXCEPTION_FLT_DENORMAL_OPERAND:
2326   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2327   case EXCEPTION_FLT_INEXACT_RESULT:
2328   case EXCEPTION_FLT_INVALID_OPERATION:
2329   case EXCEPTION_FLT_OVERFLOW:
2330   case EXCEPTION_FLT_STACK_CHECK:
2331   case EXCEPTION_FLT_UNDERFLOW:
2332     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2333     if (fp_control_word != ctx->FloatSave.ControlWord) {
2334       // Restore FPCW and mask out FLT exceptions
2335       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2336       // Mask out pending FLT exceptions
2337       ctx->FloatSave.StatusWord &=  0xffffff00;
2338       return EXCEPTION_CONTINUE_EXECUTION;
2339     }
2340   }
2341 
2342   if (prev_uef_handler != NULL) {
2343     // We didn't handle this exception so pass it to the previous
2344     // UnhandledExceptionFilter.
2345     return (prev_uef_handler)(exceptionInfo);
2346   }
2347 #else // !_WIN64
2348   // On Windows, the mxcsr control bits are non-volatile across calls
2349   // See also CR 6192333
2350   //
2351   jint MxCsr = INITIAL_MXCSR;
2352   // we can't use StubRoutines::addr_mxcsr_std()
2353   // because in Win64 mxcsr is not saved there
2354   if (MxCsr != ctx->MxCsr) {
2355     ctx->MxCsr = MxCsr;
2356     return EXCEPTION_CONTINUE_EXECUTION;
2357   }
2358 #endif // !_WIN64
2359 
2360   return EXCEPTION_CONTINUE_SEARCH;
2361 }
2362 
2363 static inline void report_error(Thread* t, DWORD exception_code,
2364                                 address addr, void* siginfo, void* context) {
2365   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2366 
2367   // If UseOsErrorReporting, this will return here and save the error file
2368   // somewhere where we can find it in the minidump.
2369 }
2370 
2371 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2372         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2373   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2374   address addr = (address) exceptionRecord->ExceptionInformation[1];
2375   if (Interpreter::contains(pc)) {
2376     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2377     if (!fr->is_first_java_frame()) {
2378       // get_frame_at_stack_banging_point() is only called when we
2379       // have well defined stacks so java_sender() calls do not need
2380       // to assert safe_for_sender() first.
2381       *fr = fr->java_sender();
2382     }
2383   } else {
2384     // more complex code with compiled code
2385     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2386     CodeBlob* cb = CodeCache::find_blob(pc);
2387     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2388       // Not sure where the pc points to, fallback to default
2389       // stack overflow handling
2390       return false;
2391     } else {
2392       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2393       // in compiled code, the stack banging is performed just after the return pc
2394       // has been pushed on the stack
2395       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2396       if (!fr->is_java_frame()) {
2397         // See java_sender() comment above.
2398         *fr = fr->java_sender();
2399       }
2400     }
2401   }
2402   assert(fr->is_java_frame(), "Safety check");
2403   return true;
2404 }
2405 
2406 //-----------------------------------------------------------------------------
2407 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2408   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2409   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2410 #ifdef _M_IA64
2411   // On Itanium, we need the "precise pc", which has the slot number coded
2412   // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2413   address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2414   // Convert the pc to "Unix format", which has the slot number coded
2415   // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2416   // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2417   // information is saved in the Unix format.
2418   address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2419 #else
2420   #ifdef _M_AMD64
2421   address pc = (address) exceptionInfo->ContextRecord->Rip;
2422   #else
2423   address pc = (address) exceptionInfo->ContextRecord->Eip;
2424   #endif
2425 #endif
2426   Thread* t = Thread::current_or_null_safe();
2427 
2428   // Handle SafeFetch32 and SafeFetchN exceptions.
2429   if (StubRoutines::is_safefetch_fault(pc)) {
2430     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2431   }
2432 
2433 #ifndef _WIN64
2434   // Execution protection violation - win32 running on AMD64 only
2435   // Handled first to avoid misdiagnosis as a "normal" access violation;
2436   // This is safe to do because we have a new/unique ExceptionInformation
2437   // code for this condition.
2438   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2439     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2440     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2441     address addr = (address) exceptionRecord->ExceptionInformation[1];
2442 
2443     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2444       int page_size = os::vm_page_size();
2445 
2446       // Make sure the pc and the faulting address are sane.
2447       //
2448       // If an instruction spans a page boundary, and the page containing
2449       // the beginning of the instruction is executable but the following
2450       // page is not, the pc and the faulting address might be slightly
2451       // different - we still want to unguard the 2nd page in this case.
2452       //
2453       // 15 bytes seems to be a (very) safe value for max instruction size.
2454       bool pc_is_near_addr =
2455         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2456       bool instr_spans_page_boundary =
2457         (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2458                          (intptr_t) page_size) > 0);
2459 
2460       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2461         static volatile address last_addr =
2462           (address) os::non_memory_address_word();
2463 
2464         // In conservative mode, don't unguard unless the address is in the VM
2465         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2466             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2467 
2468           // Set memory to RWX and retry
2469           address page_start =
2470             (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2471           bool res = os::protect_memory((char*) page_start, page_size,
2472                                         os::MEM_PROT_RWX);
2473 
2474           log_debug(os)("Execution protection violation "
2475                         "at " INTPTR_FORMAT
2476                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2477                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2478 
2479           // Set last_addr so if we fault again at the same address, we don't
2480           // end up in an endless loop.
2481           //
2482           // There are two potential complications here.  Two threads trapping
2483           // at the same address at the same time could cause one of the
2484           // threads to think it already unguarded, and abort the VM.  Likely
2485           // very rare.
2486           //
2487           // The other race involves two threads alternately trapping at
2488           // different addresses and failing to unguard the page, resulting in
2489           // an endless loop.  This condition is probably even more unlikely
2490           // than the first.
2491           //
2492           // Although both cases could be avoided by using locks or thread
2493           // local last_addr, these solutions are unnecessary complication:
2494           // this handler is a best-effort safety net, not a complete solution.
2495           // It is disabled by default and should only be used as a workaround
2496           // in case we missed any no-execute-unsafe VM code.
2497 
2498           last_addr = addr;
2499 
2500           return EXCEPTION_CONTINUE_EXECUTION;
2501         }
2502       }
2503 
2504       // Last unguard failed or not unguarding
2505       tty->print_raw_cr("Execution protection violation");
2506       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2507                    exceptionInfo->ContextRecord);
2508       return EXCEPTION_CONTINUE_SEARCH;
2509     }
2510   }
2511 #endif // _WIN64
2512 
2513   // Check to see if we caught the safepoint code in the
2514   // process of write protecting the memory serialization page.
2515   // It write enables the page immediately after protecting it
2516   // so just return.
2517   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2518     if (t != NULL && t->is_Java_thread()) {
2519       JavaThread* thread = (JavaThread*) t;
2520       PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2521       address addr = (address) exceptionRecord->ExceptionInformation[1];
2522       if (os::is_memory_serialize_page(thread, addr)) {
2523         // Block current thread until the memory serialize page permission restored.
2524         os::block_on_serialize_page_trap();
2525         return EXCEPTION_CONTINUE_EXECUTION;
2526       }
2527     }
2528   }
2529 
2530   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2531       VM_Version::is_cpuinfo_segv_addr(pc)) {
2532     // Verify that OS save/restore AVX registers.
2533     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2534   }
2535 
2536   if (t != NULL && t->is_Java_thread()) {
2537     JavaThread* thread = (JavaThread*) t;
2538     bool in_java = thread->thread_state() == _thread_in_Java;
2539 
2540     // Handle potential stack overflows up front.
2541     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2542 #ifdef _M_IA64
2543       // Use guard page for register stack.
2544       PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2545       address addr = (address) exceptionRecord->ExceptionInformation[1];
2546       // Check for a register stack overflow on Itanium
2547       if (thread->addr_inside_register_stack_red_zone(addr)) {
2548         // Fatal red zone violation happens if the Java program
2549         // catches a StackOverflow error and does so much processing
2550         // that it runs beyond the unprotected yellow guard zone. As
2551         // a result, we are out of here.
2552         fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2553       } else if(thread->addr_inside_register_stack(addr)) {
2554         // Disable the yellow zone which sets the state that
2555         // we've got a stack overflow problem.
2556         if (thread->stack_yellow_reserved_zone_enabled()) {
2557           thread->disable_stack_yellow_reserved_zone();
2558         }
2559         // Give us some room to process the exception.
2560         thread->disable_register_stack_guard();
2561         // Tracing with +Verbose.
2562         if (Verbose) {
2563           tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2564           tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2565           tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2566           tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2567                         thread->register_stack_base(),
2568                         thread->register_stack_base() + thread->stack_size());
2569         }
2570 
2571         // Reguard the permanent register stack red zone just to be sure.
2572         // We saw Windows silently disabling this without telling us.
2573         thread->enable_register_stack_red_zone();
2574 
2575         return Handle_Exception(exceptionInfo,
2576                                 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2577       }
2578 #endif
2579       if (thread->stack_guards_enabled()) {
2580         if (in_java) {
2581           frame fr;
2582           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2583           address addr = (address) exceptionRecord->ExceptionInformation[1];
2584           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2585             assert(fr.is_java_frame(), "Must be a Java frame");
2586             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2587           }
2588         }
2589         // Yellow zone violation.  The o/s has unprotected the first yellow
2590         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2591         // update the enabled status, even if the zone contains only one page.
2592         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2593         thread->disable_stack_yellow_reserved_zone();
2594         // If not in java code, return and hope for the best.
2595         return in_java
2596             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2597             :  EXCEPTION_CONTINUE_EXECUTION;
2598       } else {
2599         // Fatal red zone violation.
2600         thread->disable_stack_red_zone();
2601         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2602         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2603                       exceptionInfo->ContextRecord);
2604         return EXCEPTION_CONTINUE_SEARCH;
2605       }
2606     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2607       // Either stack overflow or null pointer exception.
2608       if (in_java) {
2609         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2610         address addr = (address) exceptionRecord->ExceptionInformation[1];
2611         address stack_end = thread->stack_end();
2612         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2613           // Stack overflow.
2614           assert(!os::uses_stack_guard_pages(),
2615                  "should be caught by red zone code above.");
2616           return Handle_Exception(exceptionInfo,
2617                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2618         }
2619         // Check for safepoint polling and implicit null
2620         // We only expect null pointers in the stubs (vtable)
2621         // the rest are checked explicitly now.
2622         CodeBlob* cb = CodeCache::find_blob(pc);
2623         if (cb != NULL) {
2624           if (os::is_poll_address(addr)) {
2625             address stub = SharedRuntime::get_poll_stub(pc);
2626             return Handle_Exception(exceptionInfo, stub);
2627           }
2628         }
2629         {
2630 #ifdef _WIN64
2631           // If it's a legal stack address map the entire region in
2632           //
2633           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2634           address addr = (address) exceptionRecord->ExceptionInformation[1];
2635           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2636             addr = (address)((uintptr_t)addr &
2637                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2638             os::commit_memory((char *)addr, thread->stack_base() - addr,
2639                               !ExecMem);
2640             return EXCEPTION_CONTINUE_EXECUTION;
2641           } else
2642 #endif
2643           {
2644             // Null pointer exception.
2645 #ifdef _M_IA64
2646             // Process implicit null checks in compiled code. Note: Implicit null checks
2647             // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2648             if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2649               CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2650               // Handle implicit null check in UEP method entry
2651               if (cb && (cb->is_frame_complete_at(pc) ||
2652                          (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2653                 if (Verbose) {
2654                   intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2655                   tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2656                   tty->print_cr("      to addr " INTPTR_FORMAT, addr);
2657                   tty->print_cr("      bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2658                                 *(bundle_start + 1), *bundle_start);
2659                 }
2660                 return Handle_Exception(exceptionInfo,
2661                                         SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2662               }
2663             }
2664 
2665             // Implicit null checks were processed above.  Hence, we should not reach
2666             // here in the usual case => die!
2667             if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2668             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2669                          exceptionInfo->ContextRecord);
2670             return EXCEPTION_CONTINUE_SEARCH;
2671 
2672 #else // !IA64
2673 
2674             if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
2675               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2676               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2677             }
2678             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2679                          exceptionInfo->ContextRecord);
2680             return EXCEPTION_CONTINUE_SEARCH;
2681 #endif
2682           }
2683         }
2684       }
2685 
2686 #ifdef _WIN64
2687       // Special care for fast JNI field accessors.
2688       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2689       // in and the heap gets shrunk before the field access.
2690       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2691         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2692         if (addr != (address)-1) {
2693           return Handle_Exception(exceptionInfo, addr);
2694         }
2695       }
2696 #endif
2697 
2698       // Stack overflow or null pointer exception in native code.
2699       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2700                    exceptionInfo->ContextRecord);
2701       return EXCEPTION_CONTINUE_SEARCH;
2702     } // /EXCEPTION_ACCESS_VIOLATION
2703     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2704 #if defined _M_IA64
2705     else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2706               exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2707       M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2708 
2709       // Compiled method patched to be non entrant? Following conditions must apply:
2710       // 1. must be first instruction in bundle
2711       // 2. must be a break instruction with appropriate code
2712       if ((((uint64_t) pc & 0x0F) == 0) &&
2713           (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2714         return Handle_Exception(exceptionInfo,
2715                                 (address)SharedRuntime::get_handle_wrong_method_stub());
2716       }
2717     } // /EXCEPTION_ILLEGAL_INSTRUCTION
2718 #endif
2719 
2720 
2721     if (in_java) {
2722       switch (exception_code) {
2723       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2724         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2725 
2726       case EXCEPTION_INT_OVERFLOW:
2727         return Handle_IDiv_Exception(exceptionInfo);
2728 
2729       } // switch
2730     }
2731     if (((thread->thread_state() == _thread_in_Java) ||
2732          (thread->thread_state() == _thread_in_native)) &&
2733          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2734       LONG result=Handle_FLT_Exception(exceptionInfo);
2735       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2736     }
2737   }
2738 
2739   if (exception_code != EXCEPTION_BREAKPOINT) {
2740     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2741                  exceptionInfo->ContextRecord);
2742   }
2743   return EXCEPTION_CONTINUE_SEARCH;
2744 }
2745 
2746 #ifndef _WIN64
2747 // Special care for fast JNI accessors.
2748 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2749 // the heap gets shrunk before the field access.
2750 // Need to install our own structured exception handler since native code may
2751 // install its own.
2752 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2753   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2754   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2755     address pc = (address) exceptionInfo->ContextRecord->Eip;
2756     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2757     if (addr != (address)-1) {
2758       return Handle_Exception(exceptionInfo, addr);
2759     }
2760   }
2761   return EXCEPTION_CONTINUE_SEARCH;
2762 }
2763 
2764 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2765   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2766                                                      jobject obj,           \
2767                                                      jfieldID fieldID) {    \
2768     __try {                                                                 \
2769       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2770                                                                  obj,       \
2771                                                                  fieldID);  \
2772     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2773                                               _exception_info())) {         \
2774     }                                                                       \
2775     return 0;                                                               \
2776   }
2777 
2778 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2779 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2780 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2781 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2782 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2783 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2784 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2785 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2786 
2787 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2788   switch (type) {
2789   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2790   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2791   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2792   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2793   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2794   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2795   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2796   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2797   default:        ShouldNotReachHere();
2798   }
2799   return (address)-1;
2800 }
2801 #endif
2802 
2803 // Virtual Memory
2804 
2805 int os::vm_page_size() { return os::win32::vm_page_size(); }
2806 int os::vm_allocation_granularity() {
2807   return os::win32::vm_allocation_granularity();
2808 }
2809 
2810 // Windows large page support is available on Windows 2003. In order to use
2811 // large page memory, the administrator must first assign additional privilege
2812 // to the user:
2813 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2814 //   + select Local Policies -> User Rights Assignment
2815 //   + double click "Lock pages in memory", add users and/or groups
2816 //   + reboot
2817 // Note the above steps are needed for administrator as well, as administrators
2818 // by default do not have the privilege to lock pages in memory.
2819 //
2820 // Note about Windows 2003: although the API supports committing large page
2821 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2822 // scenario, I found through experiment it only uses large page if the entire
2823 // memory region is reserved and committed in a single VirtualAlloc() call.
2824 // This makes Windows large page support more or less like Solaris ISM, in
2825 // that the entire heap must be committed upfront. This probably will change
2826 // in the future, if so the code below needs to be revisited.
2827 
2828 #ifndef MEM_LARGE_PAGES
2829   #define MEM_LARGE_PAGES 0x20000000
2830 #endif
2831 
2832 static HANDLE    _hProcess;
2833 static HANDLE    _hToken;
2834 
2835 // Container for NUMA node list info
2836 class NUMANodeListHolder {
2837  private:
2838   int *_numa_used_node_list;  // allocated below
2839   int _numa_used_node_count;
2840 
2841   void free_node_list() {
2842     if (_numa_used_node_list != NULL) {
2843       FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2844     }
2845   }
2846 
2847  public:
2848   NUMANodeListHolder() {
2849     _numa_used_node_count = 0;
2850     _numa_used_node_list = NULL;
2851     // do rest of initialization in build routine (after function pointers are set up)
2852   }
2853 
2854   ~NUMANodeListHolder() {
2855     free_node_list();
2856   }
2857 
2858   bool build() {
2859     DWORD_PTR proc_aff_mask;
2860     DWORD_PTR sys_aff_mask;
2861     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2862     ULONG highest_node_number;
2863     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2864     free_node_list();
2865     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2866     for (unsigned int i = 0; i <= highest_node_number; i++) {
2867       ULONGLONG proc_mask_numa_node;
2868       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2869       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2870         _numa_used_node_list[_numa_used_node_count++] = i;
2871       }
2872     }
2873     return (_numa_used_node_count > 1);
2874   }
2875 
2876   int get_count() { return _numa_used_node_count; }
2877   int get_node_list_entry(int n) {
2878     // for indexes out of range, returns -1
2879     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2880   }
2881 
2882 } numa_node_list_holder;
2883 
2884 
2885 
2886 static size_t _large_page_size = 0;
2887 
2888 static bool request_lock_memory_privilege() {
2889   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2890                           os::current_process_id());
2891 
2892   LUID luid;
2893   if (_hProcess != NULL &&
2894       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2895       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2896 
2897     TOKEN_PRIVILEGES tp;
2898     tp.PrivilegeCount = 1;
2899     tp.Privileges[0].Luid = luid;
2900     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2901 
2902     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2903     // privilege. Check GetLastError() too. See MSDN document.
2904     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2905         (GetLastError() == ERROR_SUCCESS)) {
2906       return true;
2907     }
2908   }
2909 
2910   return false;
2911 }
2912 
2913 static void cleanup_after_large_page_init() {
2914   if (_hProcess) CloseHandle(_hProcess);
2915   _hProcess = NULL;
2916   if (_hToken) CloseHandle(_hToken);
2917   _hToken = NULL;
2918 }
2919 
2920 static bool numa_interleaving_init() {
2921   bool success = false;
2922   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2923 
2924   // print a warning if UseNUMAInterleaving flag is specified on command line
2925   bool warn_on_failure = use_numa_interleaving_specified;
2926 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2927 
2928   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2929   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2930   NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2931 
2932   if (numa_node_list_holder.build()) {
2933     if (log_is_enabled(Debug, os, cpu)) {
2934       Log(os, cpu) log;
2935       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2936       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2937         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2938       }
2939     }
2940     success = true;
2941   } else {
2942     WARN("Process does not cover multiple NUMA nodes.");
2943   }
2944   if (!success) {
2945     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2946   }
2947   return success;
2948 #undef WARN
2949 }
2950 
2951 // this routine is used whenever we need to reserve a contiguous VA range
2952 // but we need to make separate VirtualAlloc calls for each piece of the range
2953 // Reasons for doing this:
2954 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2955 //  * UseNUMAInterleaving requires a separate node for each piece
2956 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2957                                          DWORD prot,
2958                                          bool should_inject_error = false) {
2959   char * p_buf;
2960   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2961   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2962   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2963 
2964   // first reserve enough address space in advance since we want to be
2965   // able to break a single contiguous virtual address range into multiple
2966   // large page commits but WS2003 does not allow reserving large page space
2967   // so we just use 4K pages for reserve, this gives us a legal contiguous
2968   // address space. then we will deallocate that reservation, and re alloc
2969   // using large pages
2970   const size_t size_of_reserve = bytes + chunk_size;
2971   if (bytes > size_of_reserve) {
2972     // Overflowed.
2973     return NULL;
2974   }
2975   p_buf = (char *) VirtualAlloc(addr,
2976                                 size_of_reserve,  // size of Reserve
2977                                 MEM_RESERVE,
2978                                 PAGE_READWRITE);
2979   // If reservation failed, return NULL
2980   if (p_buf == NULL) return NULL;
2981   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2982   os::release_memory(p_buf, bytes + chunk_size);
2983 
2984   // we still need to round up to a page boundary (in case we are using large pages)
2985   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2986   // instead we handle this in the bytes_to_rq computation below
2987   p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2988 
2989   // now go through and allocate one chunk at a time until all bytes are
2990   // allocated
2991   size_t  bytes_remaining = bytes;
2992   // An overflow of align_size_up() would have been caught above
2993   // in the calculation of size_of_reserve.
2994   char * next_alloc_addr = p_buf;
2995   HANDLE hProc = GetCurrentProcess();
2996 
2997 #ifdef ASSERT
2998   // Variable for the failure injection
2999   long ran_num = os::random();
3000   size_t fail_after = ran_num % bytes;
3001 #endif
3002 
3003   int count=0;
3004   while (bytes_remaining) {
3005     // select bytes_to_rq to get to the next chunk_size boundary
3006 
3007     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
3008     // Note allocate and commit
3009     char * p_new;
3010 
3011 #ifdef ASSERT
3012     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3013 #else
3014     const bool inject_error_now = false;
3015 #endif
3016 
3017     if (inject_error_now) {
3018       p_new = NULL;
3019     } else {
3020       if (!UseNUMAInterleaving) {
3021         p_new = (char *) VirtualAlloc(next_alloc_addr,
3022                                       bytes_to_rq,
3023                                       flags,
3024                                       prot);
3025       } else {
3026         // get the next node to use from the used_node_list
3027         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3028         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3029         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3030       }
3031     }
3032 
3033     if (p_new == NULL) {
3034       // Free any allocated pages
3035       if (next_alloc_addr > p_buf) {
3036         // Some memory was committed so release it.
3037         size_t bytes_to_release = bytes - bytes_remaining;
3038         // NMT has yet to record any individual blocks, so it
3039         // need to create a dummy 'reserve' record to match
3040         // the release.
3041         MemTracker::record_virtual_memory_reserve((address)p_buf,
3042                                                   bytes_to_release, CALLER_PC);
3043         os::release_memory(p_buf, bytes_to_release);
3044       }
3045 #ifdef ASSERT
3046       if (should_inject_error) {
3047         log_develop_debug(pagesize)("Reserving pages individually failed.");
3048       }
3049 #endif
3050       return NULL;
3051     }
3052 
3053     bytes_remaining -= bytes_to_rq;
3054     next_alloc_addr += bytes_to_rq;
3055     count++;
3056   }
3057   // Although the memory is allocated individually, it is returned as one.
3058   // NMT records it as one block.
3059   if ((flags & MEM_COMMIT) != 0) {
3060     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3061   } else {
3062     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3063   }
3064 
3065   // made it this far, success
3066   return p_buf;
3067 }
3068 
3069 
3070 
3071 void os::large_page_init() {
3072   if (!UseLargePages) return;
3073 
3074   // print a warning if any large page related flag is specified on command line
3075   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3076                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3077   bool success = false;
3078 
3079 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3080   if (request_lock_memory_privilege()) {
3081     size_t s = GetLargePageMinimum();
3082     if (s) {
3083 #if defined(IA32) || defined(AMD64)
3084       if (s > 4*M || LargePageSizeInBytes > 4*M) {
3085         WARN("JVM cannot use large pages bigger than 4mb.");
3086       } else {
3087 #endif
3088         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3089           _large_page_size = LargePageSizeInBytes;
3090         } else {
3091           _large_page_size = s;
3092         }
3093         success = true;
3094 #if defined(IA32) || defined(AMD64)
3095       }
3096 #endif
3097     } else {
3098       WARN("Large page is not supported by the processor.");
3099     }
3100   } else {
3101     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3102   }
3103 #undef WARN
3104 
3105   const size_t default_page_size = (size_t) vm_page_size();
3106   if (success && _large_page_size > default_page_size) {
3107     _page_sizes[0] = _large_page_size;
3108     _page_sizes[1] = default_page_size;
3109     _page_sizes[2] = 0;
3110   }
3111 
3112   cleanup_after_large_page_init();
3113   UseLargePages = success;
3114 }
3115 
3116 // Helper function to create a temp file in the given directory
3117 int os::create_file_for_heap(const char* dir, size_t size) {
3118 
3119   const char name_template[] = "/jvmheap.XXXXXX";
3120 
3121   char *fullname = (char*)_alloca(strlen(dir) + sizeof(name_template));
3122   (void)strcpy(fullname, dir);
3123   (void)strcat(fullname, name_template);
3124   os::native_path(fullname);
3125 
3126   char *path = _mktemp(fullname);
3127   if (path == NULL) {
3128     return -1;
3129   }
3130 
3131   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3132 
3133   if (fd < 0) {
3134     warning("Failure to create file %s for heap", path);
3135     return -1;
3136   }
3137 
3138   return fd;
3139 }
3140 
3141 // if 'base' is not NULL, function will return NULL if it cannot get 'base'
3142 //
3143 char* os::map_memory_to_dax_file(char* base, size_t size, int fd) {
3144   assert(fd != -1, "File descriptor is not valid");
3145 
3146   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3147   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3148                                          (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3149   if (fileMapping == NULL) {
3150     if (GetLastError() == ERROR_DISK_FULL) {
3151       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for heap"));
3152     } else {
3153       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3154     }
3155 
3156     return NULL;
3157   }
3158 
3159   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3160 
3161   CloseHandle(fileMapping);
3162 
3163   return (char*)addr;
3164 }
3165 
3166 char* os::replace_existing_mapping_with_dax_file_mapping(char* base, size_t size, int fd) {
3167   assert(fd != -1, "File descriptor is not valid");
3168   assert(base != NULL, "base cannot be NULL");
3169 
3170   release_memory(base, size);
3171   return map_memory_to_dax_file(base, size, fd);
3172 
3173 }
3174 
3175 // On win32, one cannot release just a part of reserved memory, it's an
3176 // all or nothing deal.  When we split a reservation, we must break the
3177 // reservation into two reservations.
3178 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3179                                   bool realloc) {
3180   if (size > 0) {
3181     release_memory(base, size);
3182     if (realloc) {
3183       reserve_memory(split, base);
3184     }
3185     if (size != split) {
3186       reserve_memory(size - split, base + split);
3187     }
3188   }
3189 }
3190 
3191 char* os::attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc) {
3192   char* result = NULL;
3193   if (file_desc != -1) {
3194     result = map_memory_to_dax_file(addr, bytes, file_desc);
3195     if (result != NULL) {
3196       MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
3197     }
3198   } else {
3199     result = pd_attempt_reserve_memory_at(bytes, addr);
3200     if (result != NULL) {
3201       MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
3202     }
3203   }
3204   return result;
3205 }
3206 
3207 // Multiple threads can race in this code but it's not possible to unmap small sections of
3208 // virtual space to get requested alignment, like posix-like os's.
3209 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3210 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3211   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3212          "Alignment must be a multiple of allocation granularity (page size)");
3213   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3214 
3215   size_t extra_size = size + alignment;
3216   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3217 
3218   char* aligned_base = NULL;
3219 
3220   do {
3221     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3222     if (extra_base == NULL) {
3223       return NULL;
3224     }
3225     // Do manual alignment
3226     aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3227 
3228     if (file_desc != -1) {
3229       os::unmap_memory(extra_base, extra_size);
3230     }
3231     else {
3232       os::release_memory(extra_base, extra_size);
3233     }
3234 
3235     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3236 
3237   } while (aligned_base == NULL);
3238 
3239   return aligned_base;
3240 }
3241 
3242 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3243   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3244          "reserve alignment");
3245   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3246   char* res;
3247   // note that if UseLargePages is on, all the areas that require interleaving
3248   // will go thru reserve_memory_special rather than thru here.
3249   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3250   if (!use_individual) {
3251     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3252   } else {
3253     elapsedTimer reserveTimer;
3254     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3255     // in numa interleaving, we have to allocate pages individually
3256     // (well really chunks of NUMAInterleaveGranularity size)
3257     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3258     if (res == NULL) {
3259       warning("NUMA page allocation failed");
3260     }
3261     if (Verbose && PrintMiscellaneous) {
3262       reserveTimer.stop();
3263       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3264                     reserveTimer.milliseconds(), reserveTimer.ticks());
3265     }
3266   }
3267   assert(res == NULL || addr == NULL || addr == res,
3268          "Unexpected address from reserve.");
3269 
3270   return res;
3271 }
3272 
3273 // Reserve memory at an arbitrary address, only if that area is
3274 // available (and not reserved for something else).
3275 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3276   // Windows os::reserve_memory() fails of the requested address range is
3277   // not avilable.
3278   return reserve_memory(bytes, requested_addr, 0);
3279 }
3280 
3281 size_t os::large_page_size() {
3282   return _large_page_size;
3283 }
3284 
3285 bool os::can_commit_large_page_memory() {
3286   // Windows only uses large page memory when the entire region is reserved
3287   // and committed in a single VirtualAlloc() call. This may change in the
3288   // future, but with Windows 2003 it's not possible to commit on demand.
3289   return false;
3290 }
3291 
3292 bool os::can_execute_large_page_memory() {
3293   return true;
3294 }
3295 
3296 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3297                                  bool exec) {
3298   assert(UseLargePages, "only for large pages");
3299 
3300   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3301     return NULL; // Fallback to small pages.
3302   }
3303 
3304   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3305   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3306 
3307   // with large pages, there are two cases where we need to use Individual Allocation
3308   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3309   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3310   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3311     log_debug(pagesize)("Reserving large pages individually.");
3312 
3313     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3314     if (p_buf == NULL) {
3315       // give an appropriate warning message
3316       if (UseNUMAInterleaving) {
3317         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3318       }
3319       if (UseLargePagesIndividualAllocation) {
3320         warning("Individually allocated large pages failed, "
3321                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3322       }
3323       return NULL;
3324     }
3325 
3326     return p_buf;
3327 
3328   } else {
3329     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3330 
3331     // normal policy just allocate it all at once
3332     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3333     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3334     if (res != NULL) {
3335       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3336     }
3337 
3338     return res;
3339   }
3340 }
3341 
3342 bool os::release_memory_special(char* base, size_t bytes) {
3343   assert(base != NULL, "Sanity check");
3344   return release_memory(base, bytes);
3345 }
3346 
3347 void os::print_statistics() {
3348 }
3349 
3350 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3351   int err = os::get_last_error();
3352   char buf[256];
3353   size_t buf_len = os::lasterror(buf, sizeof(buf));
3354   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3355           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3356           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3357 }
3358 
3359 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3360   if (bytes == 0) {
3361     // Don't bother the OS with noops.
3362     return true;
3363   }
3364   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3365   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3366   // Don't attempt to print anything if the OS call fails. We're
3367   // probably low on resources, so the print itself may cause crashes.
3368 
3369   // unless we have NUMAInterleaving enabled, the range of a commit
3370   // is always within a reserve covered by a single VirtualAlloc
3371   // in that case we can just do a single commit for the requested size
3372   if (!UseNUMAInterleaving) {
3373     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3374       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3375       return false;
3376     }
3377     if (exec) {
3378       DWORD oldprot;
3379       // Windows doc says to use VirtualProtect to get execute permissions
3380       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3381         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3382         return false;
3383       }
3384     }
3385     return true;
3386   } else {
3387 
3388     // when NUMAInterleaving is enabled, the commit might cover a range that
3389     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3390     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3391     // returns represents the number of bytes that can be committed in one step.
3392     size_t bytes_remaining = bytes;
3393     char * next_alloc_addr = addr;
3394     while (bytes_remaining > 0) {
3395       MEMORY_BASIC_INFORMATION alloc_info;
3396       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3397       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3398       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3399                        PAGE_READWRITE) == NULL) {
3400         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3401                                             exec);)
3402         return false;
3403       }
3404       if (exec) {
3405         DWORD oldprot;
3406         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3407                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3408           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3409                                               exec);)
3410           return false;
3411         }
3412       }
3413       bytes_remaining -= bytes_to_rq;
3414       next_alloc_addr += bytes_to_rq;
3415     }
3416   }
3417   // if we made it this far, return true
3418   return true;
3419 }
3420 
3421 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3422                           bool exec) {
3423   // alignment_hint is ignored on this OS
3424   return pd_commit_memory(addr, size, exec);
3425 }
3426 
3427 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3428                                   const char* mesg) {
3429   assert(mesg != NULL, "mesg must be specified");
3430   if (!pd_commit_memory(addr, size, exec)) {
3431     warn_fail_commit_memory(addr, size, exec);
3432     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3433   }
3434 }
3435 
3436 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3437                                   size_t alignment_hint, bool exec,
3438                                   const char* mesg) {
3439   // alignment_hint is ignored on this OS
3440   pd_commit_memory_or_exit(addr, size, exec, mesg);
3441 }
3442 
3443 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3444   if (bytes == 0) {
3445     // Don't bother the OS with noops.
3446     return true;
3447   }
3448   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3449   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3450   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3451 }
3452 
3453 bool os::pd_release_memory(char* addr, size_t bytes) {
3454   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3455 }
3456 
3457 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3458   return os::commit_memory(addr, size, !ExecMem);
3459 }
3460 
3461 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3462   return os::uncommit_memory(addr, size);
3463 }
3464 
3465 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3466   uint count = 0;
3467   bool ret = false;
3468   size_t bytes_remaining = bytes;
3469   char * next_protect_addr = addr;
3470 
3471   // Use VirtualQuery() to get the chunk size.
3472   while (bytes_remaining) {
3473     MEMORY_BASIC_INFORMATION alloc_info;
3474     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3475       return false;
3476     }
3477 
3478     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3479     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3480     // but we don't distinguish here as both cases are protected by same API.
3481     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3482     warning("Failed protecting pages individually for chunk #%u", count);
3483     if (!ret) {
3484       return false;
3485     }
3486 
3487     bytes_remaining -= bytes_to_protect;
3488     next_protect_addr += bytes_to_protect;
3489     count++;
3490   }
3491   return ret;
3492 }
3493 
3494 // Set protections specified
3495 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3496                         bool is_committed) {
3497   unsigned int p = 0;
3498   switch (prot) {
3499   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3500   case MEM_PROT_READ: p = PAGE_READONLY; break;
3501   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3502   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3503   default:
3504     ShouldNotReachHere();
3505   }
3506 
3507   DWORD old_status;
3508 
3509   // Strange enough, but on Win32 one can change protection only for committed
3510   // memory, not a big deal anyway, as bytes less or equal than 64K
3511   if (!is_committed) {
3512     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3513                           "cannot commit protection page");
3514   }
3515   // One cannot use os::guard_memory() here, as on Win32 guard page
3516   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3517   //
3518   // Pages in the region become guard pages. Any attempt to access a guard page
3519   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3520   // the guard page status. Guard pages thus act as a one-time access alarm.
3521   bool ret;
3522   if (UseNUMAInterleaving) {
3523     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3524     // so we must protect the chunks individually.
3525     ret = protect_pages_individually(addr, bytes, p, &old_status);
3526   } else {
3527     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3528   }
3529 #ifdef ASSERT
3530   if (!ret) {
3531     int err = os::get_last_error();
3532     char buf[256];
3533     size_t buf_len = os::lasterror(buf, sizeof(buf));
3534     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3535           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3536           buf_len != 0 ? buf : "<no_error_string>", err);
3537   }
3538 #endif
3539   return ret;
3540 }
3541 
3542 bool os::guard_memory(char* addr, size_t bytes) {
3543   DWORD old_status;
3544   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3545 }
3546 
3547 bool os::unguard_memory(char* addr, size_t bytes) {
3548   DWORD old_status;
3549   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3550 }
3551 
3552 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3553 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3554 void os::numa_make_global(char *addr, size_t bytes)    { }
3555 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3556 bool os::numa_topology_changed()                       { return false; }
3557 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3558 int os::numa_get_group_id()                            { return 0; }
3559 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3560   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3561     // Provide an answer for UMA systems
3562     ids[0] = 0;
3563     return 1;
3564   } else {
3565     // check for size bigger than actual groups_num
3566     size = MIN2(size, numa_get_groups_num());
3567     for (int i = 0; i < (int)size; i++) {
3568       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3569     }
3570     return size;
3571   }
3572 }
3573 
3574 bool os::get_page_info(char *start, page_info* info) {
3575   return false;
3576 }
3577 
3578 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3579                      page_info* page_found) {
3580   return end;
3581 }
3582 
3583 char* os::non_memory_address_word() {
3584   // Must never look like an address returned by reserve_memory,
3585   // even in its subfields (as defined by the CPU immediate fields,
3586   // if the CPU splits constants across multiple instructions).
3587   return (char*)-1;
3588 }
3589 
3590 #define MAX_ERROR_COUNT 100
3591 #define SYS_THREAD_ERROR 0xffffffffUL
3592 
3593 void os::pd_start_thread(Thread* thread) {
3594   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3595   // Returns previous suspend state:
3596   // 0:  Thread was not suspended
3597   // 1:  Thread is running now
3598   // >1: Thread is still suspended.
3599   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3600 }
3601 
3602 class HighResolutionInterval : public CHeapObj<mtThread> {
3603   // The default timer resolution seems to be 10 milliseconds.
3604   // (Where is this written down?)
3605   // If someone wants to sleep for only a fraction of the default,
3606   // then we set the timer resolution down to 1 millisecond for
3607   // the duration of their interval.
3608   // We carefully set the resolution back, since otherwise we
3609   // seem to incur an overhead (3%?) that we don't need.
3610   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3611   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3612   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3613   // timeBeginPeriod() if the relative error exceeded some threshold.
3614   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3615   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
3616   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3617   // resolution timers running.
3618  private:
3619   jlong resolution;
3620  public:
3621   HighResolutionInterval(jlong ms) {
3622     resolution = ms % 10L;
3623     if (resolution != 0) {
3624       MMRESULT result = timeBeginPeriod(1L);
3625     }
3626   }
3627   ~HighResolutionInterval() {
3628     if (resolution != 0) {
3629       MMRESULT result = timeEndPeriod(1L);
3630     }
3631     resolution = 0L;
3632   }
3633 };
3634 
3635 int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3636   jlong limit = (jlong) MAXDWORD;
3637 
3638   while (ms > limit) {
3639     int res;
3640     if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) {
3641       return res;
3642     }
3643     ms -= limit;
3644   }
3645 
3646   assert(thread == Thread::current(), "thread consistency check");
3647   OSThread* osthread = thread->osthread();
3648   OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3649   int result;
3650   if (interruptable) {
3651     assert(thread->is_Java_thread(), "must be java thread");
3652     JavaThread *jt = (JavaThread *) thread;
3653     ThreadBlockInVM tbivm(jt);
3654 
3655     jt->set_suspend_equivalent();
3656     // cleared by handle_special_suspend_equivalent_condition() or
3657     // java_suspend_self() via check_and_wait_while_suspended()
3658 
3659     HANDLE events[1];
3660     events[0] = osthread->interrupt_event();
3661     HighResolutionInterval *phri=NULL;
3662     if (!ForceTimeHighResolution) {
3663       phri = new HighResolutionInterval(ms);
3664     }
3665     if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3666       result = OS_TIMEOUT;
3667     } else {
3668       ResetEvent(osthread->interrupt_event());
3669       osthread->set_interrupted(false);
3670       result = OS_INTRPT;
3671     }
3672     delete phri; //if it is NULL, harmless
3673 
3674     // were we externally suspended while we were waiting?
3675     jt->check_and_wait_while_suspended();
3676   } else {
3677     assert(!thread->is_Java_thread(), "must not be java thread");
3678     Sleep((long) ms);
3679     result = OS_TIMEOUT;
3680   }
3681   return result;
3682 }
3683 
3684 // Short sleep, direct OS call.
3685 //
3686 // ms = 0, means allow others (if any) to run.
3687 //
3688 void os::naked_short_sleep(jlong ms) {
3689   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3690   Sleep(ms);
3691 }
3692 
3693 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3694 void os::infinite_sleep() {
3695   while (true) {    // sleep forever ...
3696     Sleep(100000);  // ... 100 seconds at a time
3697   }
3698 }
3699 
3700 typedef BOOL (WINAPI * STTSignature)(void);
3701 
3702 void os::naked_yield() {
3703   // Consider passing back the return value from SwitchToThread().
3704   SwitchToThread();
3705 }
3706 
3707 // Win32 only gives you access to seven real priorities at a time,
3708 // so we compress Java's ten down to seven.  It would be better
3709 // if we dynamically adjusted relative priorities.
3710 
3711 int os::java_to_os_priority[CriticalPriority + 1] = {
3712   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3713   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3714   THREAD_PRIORITY_LOWEST,                       // 2
3715   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3716   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3717   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3718   THREAD_PRIORITY_NORMAL,                       // 6
3719   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3720   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3721   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3722   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3723   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3724 };
3725 
3726 int prio_policy1[CriticalPriority + 1] = {
3727   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3728   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3729   THREAD_PRIORITY_LOWEST,                       // 2
3730   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3731   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3732   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3733   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3734   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3735   THREAD_PRIORITY_HIGHEST,                      // 8
3736   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3737   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3738   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3739 };
3740 
3741 static int prio_init() {
3742   // If ThreadPriorityPolicy is 1, switch tables
3743   if (ThreadPriorityPolicy == 1) {
3744     int i;
3745     for (i = 0; i < CriticalPriority + 1; i++) {
3746       os::java_to_os_priority[i] = prio_policy1[i];
3747     }
3748   }
3749   if (UseCriticalJavaThreadPriority) {
3750     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3751   }
3752   return 0;
3753 }
3754 
3755 OSReturn os::set_native_priority(Thread* thread, int priority) {
3756   if (!UseThreadPriorities) return OS_OK;
3757   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3758   return ret ? OS_OK : OS_ERR;
3759 }
3760 
3761 OSReturn os::get_native_priority(const Thread* const thread,
3762                                  int* priority_ptr) {
3763   if (!UseThreadPriorities) {
3764     *priority_ptr = java_to_os_priority[NormPriority];
3765     return OS_OK;
3766   }
3767   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3768   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3769     assert(false, "GetThreadPriority failed");
3770     return OS_ERR;
3771   }
3772   *priority_ptr = os_prio;
3773   return OS_OK;
3774 }
3775 
3776 
3777 // Hint to the underlying OS that a task switch would not be good.
3778 // Void return because it's a hint and can fail.
3779 void os::hint_no_preempt() {}
3780 
3781 void os::interrupt(Thread* thread) {
3782   assert(!thread->is_Java_thread() || Thread::current() == thread ||
3783          Threads_lock->owned_by_self(),
3784          "possibility of dangling Thread pointer");
3785 
3786   OSThread* osthread = thread->osthread();
3787   osthread->set_interrupted(true);
3788   // More than one thread can get here with the same value of osthread,
3789   // resulting in multiple notifications.  We do, however, want the store
3790   // to interrupted() to be visible to other threads before we post
3791   // the interrupt event.
3792   OrderAccess::release();
3793   SetEvent(osthread->interrupt_event());
3794   // For JSR166:  unpark after setting status
3795   if (thread->is_Java_thread()) {
3796     ((JavaThread*)thread)->parker()->unpark();
3797   }
3798 
3799   ParkEvent * ev = thread->_ParkEvent;
3800   if (ev != NULL) ev->unpark();
3801 }
3802 
3803 
3804 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3805   assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3806          "possibility of dangling Thread pointer");
3807 
3808   OSThread* osthread = thread->osthread();
3809   // There is no synchronization between the setting of the interrupt
3810   // and it being cleared here. It is critical - see 6535709 - that
3811   // we only clear the interrupt state, and reset the interrupt event,
3812   // if we are going to report that we were indeed interrupted - else
3813   // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3814   // depending on the timing. By checking thread interrupt event to see
3815   // if the thread gets real interrupt thus prevent spurious wakeup.
3816   bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3817   if (interrupted && clear_interrupted) {
3818     osthread->set_interrupted(false);
3819     ResetEvent(osthread->interrupt_event());
3820   } // Otherwise leave the interrupted state alone
3821 
3822   return interrupted;
3823 }
3824 
3825 // Get's a pc (hint) for a running thread. Currently used only for profiling.
3826 ExtendedPC os::get_thread_pc(Thread* thread) {
3827   CONTEXT context;
3828   context.ContextFlags = CONTEXT_CONTROL;
3829   HANDLE handle = thread->osthread()->thread_handle();
3830 #ifdef _M_IA64
3831   assert(0, "Fix get_thread_pc");
3832   return ExtendedPC(NULL);
3833 #else
3834   if (GetThreadContext(handle, &context)) {
3835 #ifdef _M_AMD64
3836     return ExtendedPC((address) context.Rip);
3837 #else
3838     return ExtendedPC((address) context.Eip);
3839 #endif
3840   } else {
3841     return ExtendedPC(NULL);
3842   }
3843 #endif
3844 }
3845 
3846 // GetCurrentThreadId() returns DWORD
3847 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3848 
3849 static int _initial_pid = 0;
3850 
3851 int os::current_process_id() {
3852   return (_initial_pid ? _initial_pid : _getpid());
3853 }
3854 
3855 int    os::win32::_vm_page_size              = 0;
3856 int    os::win32::_vm_allocation_granularity = 0;
3857 int    os::win32::_processor_type            = 0;
3858 // Processor level is not available on non-NT systems, use vm_version instead
3859 int    os::win32::_processor_level           = 0;
3860 julong os::win32::_physical_memory           = 0;
3861 size_t os::win32::_default_stack_size        = 0;
3862 
3863 intx          os::win32::_os_thread_limit    = 0;
3864 volatile intx os::win32::_os_thread_count    = 0;
3865 
3866 bool   os::win32::_is_windows_server         = false;
3867 
3868 // 6573254
3869 // Currently, the bug is observed across all the supported Windows releases,
3870 // including the latest one (as of this writing - Windows Server 2012 R2)
3871 bool   os::win32::_has_exit_bug              = true;
3872 
3873 void os::win32::initialize_system_info() {
3874   SYSTEM_INFO si;
3875   GetSystemInfo(&si);
3876   _vm_page_size    = si.dwPageSize;
3877   _vm_allocation_granularity = si.dwAllocationGranularity;
3878   _processor_type  = si.dwProcessorType;
3879   _processor_level = si.wProcessorLevel;
3880   set_processor_count(si.dwNumberOfProcessors);
3881 
3882   MEMORYSTATUSEX ms;
3883   ms.dwLength = sizeof(ms);
3884 
3885   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3886   // dwMemoryLoad (% of memory in use)
3887   GlobalMemoryStatusEx(&ms);
3888   _physical_memory = ms.ullTotalPhys;
3889 
3890   if (FLAG_IS_DEFAULT(MaxRAM)) {
3891     // Adjust MaxRAM according to the maximum virtual address space available.
3892     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3893   }
3894 
3895   OSVERSIONINFOEX oi;
3896   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3897   GetVersionEx((OSVERSIONINFO*)&oi);
3898   switch (oi.dwPlatformId) {
3899   case VER_PLATFORM_WIN32_NT:
3900     {
3901       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3902       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3903           oi.wProductType == VER_NT_SERVER) {
3904         _is_windows_server = true;
3905       }
3906     }
3907     break;
3908   default: fatal("Unknown platform");
3909   }
3910 
3911   _default_stack_size = os::current_stack_size();
3912   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3913   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3914          "stack size not a multiple of page size");
3915 
3916   initialize_performance_counter();
3917 }
3918 
3919 
3920 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3921                                       int ebuflen) {
3922   char path[MAX_PATH];
3923   DWORD size;
3924   DWORD pathLen = (DWORD)sizeof(path);
3925   HINSTANCE result = NULL;
3926 
3927   // only allow library name without path component
3928   assert(strchr(name, '\\') == NULL, "path not allowed");
3929   assert(strchr(name, ':') == NULL, "path not allowed");
3930   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3931     jio_snprintf(ebuf, ebuflen,
3932                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3933     return NULL;
3934   }
3935 
3936   // search system directory
3937   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3938     if (size >= pathLen) {
3939       return NULL; // truncated
3940     }
3941     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3942       return NULL; // truncated
3943     }
3944     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3945       return result;
3946     }
3947   }
3948 
3949   // try Windows directory
3950   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3951     if (size >= pathLen) {
3952       return NULL; // truncated
3953     }
3954     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3955       return NULL; // truncated
3956     }
3957     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3958       return result;
3959     }
3960   }
3961 
3962   jio_snprintf(ebuf, ebuflen,
3963                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3964   return NULL;
3965 }
3966 
3967 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3968 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3969 
3970 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3971   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3972   return TRUE;
3973 }
3974 
3975 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3976   // Basic approach:
3977   //  - Each exiting thread registers its intent to exit and then does so.
3978   //  - A thread trying to terminate the process must wait for all
3979   //    threads currently exiting to complete their exit.
3980 
3981   if (os::win32::has_exit_bug()) {
3982     // The array holds handles of the threads that have started exiting by calling
3983     // _endthreadex().
3984     // Should be large enough to avoid blocking the exiting thread due to lack of
3985     // a free slot.
3986     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3987     static int handle_count = 0;
3988 
3989     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3990     static CRITICAL_SECTION crit_sect;
3991     static volatile jint process_exiting = 0;
3992     int i, j;
3993     DWORD res;
3994     HANDLE hproc, hthr;
3995 
3996     // We only attempt to register threads until a process exiting
3997     // thread manages to set the process_exiting flag. Any threads
3998     // that come through here after the process_exiting flag is set
3999     // are unregistered and will be caught in the SuspendThread()
4000     // infinite loop below.
4001     bool registered = false;
4002 
4003     // The first thread that reached this point, initializes the critical section.
4004     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
4005       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
4006     } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
4007       if (what != EPT_THREAD) {
4008         // Atomically set process_exiting before the critical section
4009         // to increase the visibility between racing threads.
4010         Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0);
4011       }
4012       EnterCriticalSection(&crit_sect);
4013 
4014       if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
4015         // Remove from the array those handles of the threads that have completed exiting.
4016         for (i = 0, j = 0; i < handle_count; ++i) {
4017           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
4018           if (res == WAIT_TIMEOUT) {
4019             handles[j++] = handles[i];
4020           } else {
4021             if (res == WAIT_FAILED) {
4022               warning("WaitForSingleObject failed (%u) in %s: %d\n",
4023                       GetLastError(), __FILE__, __LINE__);
4024             }
4025             // Don't keep the handle, if we failed waiting for it.
4026             CloseHandle(handles[i]);
4027           }
4028         }
4029 
4030         // If there's no free slot in the array of the kept handles, we'll have to
4031         // wait until at least one thread completes exiting.
4032         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
4033           // Raise the priority of the oldest exiting thread to increase its chances
4034           // to complete sooner.
4035           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
4036           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
4037           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
4038             i = (res - WAIT_OBJECT_0);
4039             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
4040             for (; i < handle_count; ++i) {
4041               handles[i] = handles[i + 1];
4042             }
4043           } else {
4044             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4045                     (res == WAIT_FAILED ? "failed" : "timed out"),
4046                     GetLastError(), __FILE__, __LINE__);
4047             // Don't keep handles, if we failed waiting for them.
4048             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
4049               CloseHandle(handles[i]);
4050             }
4051             handle_count = 0;
4052           }
4053         }
4054 
4055         // Store a duplicate of the current thread handle in the array of handles.
4056         hproc = GetCurrentProcess();
4057         hthr = GetCurrentThread();
4058         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
4059                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
4060           warning("DuplicateHandle failed (%u) in %s: %d\n",
4061                   GetLastError(), __FILE__, __LINE__);
4062 
4063           // We can't register this thread (no more handles) so this thread
4064           // may be racing with a thread that is calling exit(). If the thread
4065           // that is calling exit() has managed to set the process_exiting
4066           // flag, then this thread will be caught in the SuspendThread()
4067           // infinite loop below which closes that race. A small timing
4068           // window remains before the process_exiting flag is set, but it
4069           // is only exposed when we are out of handles.
4070         } else {
4071           ++handle_count;
4072           registered = true;
4073 
4074           // The current exiting thread has stored its handle in the array, and now
4075           // should leave the critical section before calling _endthreadex().
4076         }
4077 
4078       } else if (what != EPT_THREAD && handle_count > 0) {
4079         jlong start_time, finish_time, timeout_left;
4080         // Before ending the process, make sure all the threads that had called
4081         // _endthreadex() completed.
4082 
4083         // Set the priority level of the current thread to the same value as
4084         // the priority level of exiting threads.
4085         // This is to ensure it will be given a fair chance to execute if
4086         // the timeout expires.
4087         hthr = GetCurrentThread();
4088         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
4089         start_time = os::javaTimeNanos();
4090         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
4091         for (i = 0; ; ) {
4092           int portion_count = handle_count - i;
4093           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
4094             portion_count = MAXIMUM_WAIT_OBJECTS;
4095           }
4096           for (j = 0; j < portion_count; ++j) {
4097             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4098           }
4099           timeout_left = (finish_time - start_time) / 1000000L;
4100           if (timeout_left < 0) {
4101             timeout_left = 0;
4102           }
4103           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4104           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4105             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4106                     (res == WAIT_FAILED ? "failed" : "timed out"),
4107                     GetLastError(), __FILE__, __LINE__);
4108             // Reset portion_count so we close the remaining
4109             // handles due to this error.
4110             portion_count = handle_count - i;
4111           }
4112           for (j = 0; j < portion_count; ++j) {
4113             CloseHandle(handles[i + j]);
4114           }
4115           if ((i += portion_count) >= handle_count) {
4116             break;
4117           }
4118           start_time = os::javaTimeNanos();
4119         }
4120         handle_count = 0;
4121       }
4122 
4123       LeaveCriticalSection(&crit_sect);
4124     }
4125 
4126     if (!registered &&
4127         OrderAccess::load_acquire(&process_exiting) != 0 &&
4128         process_exiting != (jint)GetCurrentThreadId()) {
4129       // Some other thread is about to call exit(), so we don't let
4130       // the current unregistered thread proceed to exit() or _endthreadex()
4131       while (true) {
4132         SuspendThread(GetCurrentThread());
4133         // Avoid busy-wait loop, if SuspendThread() failed.
4134         Sleep(EXIT_TIMEOUT);
4135       }
4136     }
4137   }
4138 
4139   // We are here if either
4140   // - there's no 'race at exit' bug on this OS release;
4141   // - initialization of the critical section failed (unlikely);
4142   // - the current thread has registered itself and left the critical section;
4143   // - the process-exiting thread has raised the flag and left the critical section.
4144   if (what == EPT_THREAD) {
4145     _endthreadex((unsigned)exit_code);
4146   } else if (what == EPT_PROCESS) {
4147     ::exit(exit_code);
4148   } else {
4149     _exit(exit_code);
4150   }
4151 
4152   // Should not reach here
4153   return exit_code;
4154 }
4155 
4156 #undef EXIT_TIMEOUT
4157 
4158 void os::win32::setmode_streams() {
4159   _setmode(_fileno(stdin), _O_BINARY);
4160   _setmode(_fileno(stdout), _O_BINARY);
4161   _setmode(_fileno(stderr), _O_BINARY);
4162 }
4163 
4164 
4165 bool os::is_debugger_attached() {
4166   return IsDebuggerPresent() ? true : false;
4167 }
4168 
4169 
4170 void os::wait_for_keypress_at_exit(void) {
4171   if (PauseAtExit) {
4172     fprintf(stderr, "Press any key to continue...\n");
4173     fgetc(stdin);
4174   }
4175 }
4176 
4177 
4178 bool os::message_box(const char* title, const char* message) {
4179   int result = MessageBox(NULL, message, title,
4180                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4181   return result == IDYES;
4182 }
4183 
4184 #ifndef PRODUCT
4185 #ifndef _WIN64
4186 // Helpers to check whether NX protection is enabled
4187 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4188   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4189       pex->ExceptionRecord->NumberParameters > 0 &&
4190       pex->ExceptionRecord->ExceptionInformation[0] ==
4191       EXCEPTION_INFO_EXEC_VIOLATION) {
4192     return EXCEPTION_EXECUTE_HANDLER;
4193   }
4194   return EXCEPTION_CONTINUE_SEARCH;
4195 }
4196 
4197 void nx_check_protection() {
4198   // If NX is enabled we'll get an exception calling into code on the stack
4199   char code[] = { (char)0xC3 }; // ret
4200   void *code_ptr = (void *)code;
4201   __try {
4202     __asm call code_ptr
4203   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4204     tty->print_raw_cr("NX protection detected.");
4205   }
4206 }
4207 #endif // _WIN64
4208 #endif // PRODUCT
4209 
4210 // This is called _before_ the global arguments have been parsed
4211 void os::init(void) {
4212   _initial_pid = _getpid();
4213 
4214   init_random(1234567);
4215 
4216   win32::initialize_system_info();
4217   win32::setmode_streams();
4218   init_page_sizes((size_t) win32::vm_page_size());
4219 
4220   // This may be overridden later when argument processing is done.
4221   FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false);
4222 
4223   // Initialize main_process and main_thread
4224   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4225   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4226                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4227     fatal("DuplicateHandle failed\n");
4228   }
4229   main_thread_id = (int) GetCurrentThreadId();
4230 
4231   // initialize fast thread access - only used for 32-bit
4232   win32::initialize_thread_ptr_offset();
4233 }
4234 
4235 // To install functions for atexit processing
4236 extern "C" {
4237   static void perfMemory_exit_helper() {
4238     perfMemory_exit();
4239   }
4240 }
4241 
4242 static jint initSock();
4243 
4244 // this is called _after_ the global arguments have been parsed
4245 jint os::init_2(void) {
4246   // Allocate a single page and mark it as readable for safepoint polling
4247   address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
4248   guarantee(polling_page != NULL, "Reserve Failed for polling page");
4249 
4250   address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
4251   guarantee(return_page != NULL, "Commit Failed for polling page");
4252 
4253   os::set_polling_page(polling_page);
4254   log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
4255 
4256   if (!UseMembar) {
4257     address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
4258     guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
4259 
4260     return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
4261     guarantee(return_page != NULL, "Commit Failed for memory serialize page");
4262 
4263     os::set_memory_serialize_page(mem_serialize_page);
4264     log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
4265   }
4266 
4267   // Setup Windows Exceptions
4268 
4269   // for debugging float code generation bugs
4270   if (ForceFloatExceptions) {
4271 #ifndef  _WIN64
4272     static long fp_control_word = 0;
4273     __asm { fstcw fp_control_word }
4274     // see Intel PPro Manual, Vol. 2, p 7-16
4275     const long precision = 0x20;
4276     const long underflow = 0x10;
4277     const long overflow  = 0x08;
4278     const long zero_div  = 0x04;
4279     const long denorm    = 0x02;
4280     const long invalid   = 0x01;
4281     fp_control_word |= invalid;
4282     __asm { fldcw fp_control_word }
4283 #endif
4284   }
4285 
4286   // If stack_commit_size is 0, windows will reserve the default size,
4287   // but only commit a small portion of it.
4288   size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
4289   size_t default_reserve_size = os::win32::default_stack_size();
4290   size_t actual_reserve_size = stack_commit_size;
4291   if (stack_commit_size < default_reserve_size) {
4292     // If stack_commit_size == 0, we want this too
4293     actual_reserve_size = default_reserve_size;
4294   }
4295 
4296   // Check minimum allowable stack size for thread creation and to initialize
4297   // the java system classes, including StackOverflowError - depends on page
4298   // size.  Add two 4K pages for compiler2 recursion in main thread.
4299   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4300   // class initialization depending on 32 or 64 bit VM.
4301   size_t min_stack_allowed =
4302             (size_t)(JavaThread::stack_guard_zone_size() +
4303                      JavaThread::stack_shadow_zone_size() +
4304                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4305 
4306   min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
4307 
4308   if (actual_reserve_size < min_stack_allowed) {
4309     tty->print_cr("\nThe Java thread stack size specified is too small. "
4310                   "Specify at least %dk",
4311                   min_stack_allowed / K);
4312     return JNI_ERR;
4313   }
4314 
4315   JavaThread::set_stack_size_at_create(stack_commit_size);
4316 
4317   // Calculate theoretical max. size of Threads to guard gainst artifical
4318   // out-of-memory situations, where all available address-space has been
4319   // reserved by thread stacks.
4320   assert(actual_reserve_size != 0, "Must have a stack");
4321 
4322   // Calculate the thread limit when we should start doing Virtual Memory
4323   // banging. Currently when the threads will have used all but 200Mb of space.
4324   //
4325   // TODO: consider performing a similar calculation for commit size instead
4326   // as reserve size, since on a 64-bit platform we'll run into that more
4327   // often than running out of virtual memory space.  We can use the
4328   // lower value of the two calculations as the os_thread_limit.
4329   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4330   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4331 
4332   // at exit methods are called in the reverse order of their registration.
4333   // there is no limit to the number of functions registered. atexit does
4334   // not set errno.
4335 
4336   if (PerfAllowAtExitRegistration) {
4337     // only register atexit functions if PerfAllowAtExitRegistration is set.
4338     // atexit functions can be delayed until process exit time, which
4339     // can be problematic for embedded VM situations. Embedded VMs should
4340     // call DestroyJavaVM() to assure that VM resources are released.
4341 
4342     // note: perfMemory_exit_helper atexit function may be removed in
4343     // the future if the appropriate cleanup code can be added to the
4344     // VM_Exit VMOperation's doit method.
4345     if (atexit(perfMemory_exit_helper) != 0) {
4346       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4347     }
4348   }
4349 
4350 #ifndef _WIN64
4351   // Print something if NX is enabled (win32 on AMD64)
4352   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4353 #endif
4354 
4355   // initialize thread priority policy
4356   prio_init();
4357 
4358   if (UseNUMA && !ForceNUMA) {
4359     UseNUMA = false; // We don't fully support this yet
4360   }
4361 
4362   if (UseNUMAInterleaving) {
4363     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4364     bool success = numa_interleaving_init();
4365     if (!success) UseNUMAInterleaving = false;
4366   }
4367 
4368   if (initSock() != JNI_OK) {
4369     return JNI_ERR;
4370   }
4371 
4372   return JNI_OK;
4373 }
4374 
4375 // Mark the polling page as unreadable
4376 void os::make_polling_page_unreadable(void) {
4377   DWORD old_status;
4378   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4379                       PAGE_NOACCESS, &old_status)) {
4380     fatal("Could not disable polling page");
4381   }
4382 }
4383 
4384 // Mark the polling page as readable
4385 void os::make_polling_page_readable(void) {
4386   DWORD old_status;
4387   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4388                       PAGE_READONLY, &old_status)) {
4389     fatal("Could not enable polling page");
4390   }
4391 }
4392 
4393 
4394 int os::stat(const char *path, struct stat *sbuf) {
4395   char pathbuf[MAX_PATH];
4396   if (strlen(path) > MAX_PATH - 1) {
4397     errno = ENAMETOOLONG;
4398     return -1;
4399   }
4400   os::native_path(strcpy(pathbuf, path));
4401   int ret = ::stat(pathbuf, sbuf);
4402   if (sbuf != NULL && UseUTCFileTimestamp) {
4403     // Fix for 6539723.  st_mtime returned from stat() is dependent on
4404     // the system timezone and so can return different values for the
4405     // same file if/when daylight savings time changes.  This adjustment
4406     // makes sure the same timestamp is returned regardless of the TZ.
4407     //
4408     // See:
4409     // http://msdn.microsoft.com/library/
4410     //   default.asp?url=/library/en-us/sysinfo/base/
4411     //   time_zone_information_str.asp
4412     // and
4413     // http://msdn.microsoft.com/library/default.asp?url=
4414     //   /library/en-us/sysinfo/base/settimezoneinformation.asp
4415     //
4416     // NOTE: there is a insidious bug here:  If the timezone is changed
4417     // after the call to stat() but before 'GetTimeZoneInformation()', then
4418     // the adjustment we do here will be wrong and we'll return the wrong
4419     // value (which will likely end up creating an invalid class data
4420     // archive).  Absent a better API for this, or some time zone locking
4421     // mechanism, we'll have to live with this risk.
4422     TIME_ZONE_INFORMATION tz;
4423     DWORD tzid = GetTimeZoneInformation(&tz);
4424     int daylightBias =
4425       (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
4426     sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4427   }
4428   return ret;
4429 }
4430 
4431 
4432 #define FT2INT64(ft) \
4433   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4434 
4435 
4436 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4437 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4438 // of a thread.
4439 //
4440 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4441 // the fast estimate available on the platform.
4442 
4443 // current_thread_cpu_time() is not optimized for Windows yet
4444 jlong os::current_thread_cpu_time() {
4445   // return user + sys since the cost is the same
4446   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4447 }
4448 
4449 jlong os::thread_cpu_time(Thread* thread) {
4450   // consistent with what current_thread_cpu_time() returns.
4451   return os::thread_cpu_time(thread, true /* user+sys */);
4452 }
4453 
4454 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4455   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4456 }
4457 
4458 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4459   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4460   // If this function changes, os::is_thread_cpu_time_supported() should too
4461   FILETIME CreationTime;
4462   FILETIME ExitTime;
4463   FILETIME KernelTime;
4464   FILETIME UserTime;
4465 
4466   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4467                       &ExitTime, &KernelTime, &UserTime) == 0) {
4468     return -1;
4469   } else if (user_sys_cpu_time) {
4470     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4471   } else {
4472     return FT2INT64(UserTime) * 100;
4473   }
4474 }
4475 
4476 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4477   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4478   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4479   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4480   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4481 }
4482 
4483 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4484   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4485   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4486   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4487   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4488 }
4489 
4490 bool os::is_thread_cpu_time_supported() {
4491   // see os::thread_cpu_time
4492   FILETIME CreationTime;
4493   FILETIME ExitTime;
4494   FILETIME KernelTime;
4495   FILETIME UserTime;
4496 
4497   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4498                       &KernelTime, &UserTime) == 0) {
4499     return false;
4500   } else {
4501     return true;
4502   }
4503 }
4504 
4505 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4506 // It does have primitives (PDH API) to get CPU usage and run queue length.
4507 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4508 // If we wanted to implement loadavg on Windows, we have a few options:
4509 //
4510 // a) Query CPU usage and run queue length and "fake" an answer by
4511 //    returning the CPU usage if it's under 100%, and the run queue
4512 //    length otherwise.  It turns out that querying is pretty slow
4513 //    on Windows, on the order of 200 microseconds on a fast machine.
4514 //    Note that on the Windows the CPU usage value is the % usage
4515 //    since the last time the API was called (and the first call
4516 //    returns 100%), so we'd have to deal with that as well.
4517 //
4518 // b) Sample the "fake" answer using a sampling thread and store
4519 //    the answer in a global variable.  The call to loadavg would
4520 //    just return the value of the global, avoiding the slow query.
4521 //
4522 // c) Sample a better answer using exponential decay to smooth the
4523 //    value.  This is basically the algorithm used by UNIX kernels.
4524 //
4525 // Note that sampling thread starvation could affect both (b) and (c).
4526 int os::loadavg(double loadavg[], int nelem) {
4527   return -1;
4528 }
4529 
4530 
4531 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4532 bool os::dont_yield() {
4533   return DontYieldALot;
4534 }
4535 
4536 // This method is a slightly reworked copy of JDK's sysOpen
4537 // from src/windows/hpi/src/sys_api_md.c
4538 
4539 int os::open(const char *path, int oflag, int mode) {
4540   char pathbuf[MAX_PATH];
4541 
4542   if (strlen(path) > MAX_PATH - 1) {
4543     errno = ENAMETOOLONG;
4544     return -1;
4545   }
4546   os::native_path(strcpy(pathbuf, path));
4547   return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4548 }
4549 
4550 FILE* os::open(int fd, const char* mode) {
4551   return ::_fdopen(fd, mode);
4552 }
4553 
4554 // Is a (classpath) directory empty?
4555 bool os::dir_is_empty(const char* path) {
4556   WIN32_FIND_DATA fd;
4557   HANDLE f = FindFirstFile(path, &fd);
4558   if (f == INVALID_HANDLE_VALUE) {
4559     return true;
4560   }
4561   FindClose(f);
4562   return false;
4563 }
4564 
4565 // create binary file, rewriting existing file if required
4566 int os::create_binary_file(const char* path, bool rewrite_existing) {
4567   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4568   if (!rewrite_existing) {
4569     oflags |= _O_EXCL;
4570   }
4571   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4572 }
4573 
4574 // return current position of file pointer
4575 jlong os::current_file_offset(int fd) {
4576   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4577 }
4578 
4579 // move file pointer to the specified offset
4580 jlong os::seek_to_file_offset(int fd, jlong offset) {
4581   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4582 }
4583 
4584 
4585 jlong os::lseek(int fd, jlong offset, int whence) {
4586   return (jlong) ::_lseeki64(fd, offset, whence);
4587 }
4588 
4589 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4590   OVERLAPPED ov;
4591   DWORD nread;
4592   BOOL result;
4593 
4594   ZeroMemory(&ov, sizeof(ov));
4595   ov.Offset = (DWORD)offset;
4596   ov.OffsetHigh = (DWORD)(offset >> 32);
4597 
4598   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4599 
4600   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4601 
4602   return result ? nread : 0;
4603 }
4604 
4605 
4606 // This method is a slightly reworked copy of JDK's sysNativePath
4607 // from src/windows/hpi/src/path_md.c
4608 
4609 // Convert a pathname to native format.  On win32, this involves forcing all
4610 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4611 // sometimes rejects '/') and removing redundant separators.  The input path is
4612 // assumed to have been converted into the character encoding used by the local
4613 // system.  Because this might be a double-byte encoding, care is taken to
4614 // treat double-byte lead characters correctly.
4615 //
4616 // This procedure modifies the given path in place, as the result is never
4617 // longer than the original.  There is no error return; this operation always
4618 // succeeds.
4619 char * os::native_path(char *path) {
4620   char *src = path, *dst = path, *end = path;
4621   char *colon = NULL;  // If a drive specifier is found, this will
4622                        // point to the colon following the drive letter
4623 
4624   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4625   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4626           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4627 
4628   // Check for leading separators
4629 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4630   while (isfilesep(*src)) {
4631     src++;
4632   }
4633 
4634   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4635     // Remove leading separators if followed by drive specifier.  This
4636     // hack is necessary to support file URLs containing drive
4637     // specifiers (e.g., "file://c:/path").  As a side effect,
4638     // "/c:/path" can be used as an alternative to "c:/path".
4639     *dst++ = *src++;
4640     colon = dst;
4641     *dst++ = ':';
4642     src++;
4643   } else {
4644     src = path;
4645     if (isfilesep(src[0]) && isfilesep(src[1])) {
4646       // UNC pathname: Retain first separator; leave src pointed at
4647       // second separator so that further separators will be collapsed
4648       // into the second separator.  The result will be a pathname
4649       // beginning with "\\\\" followed (most likely) by a host name.
4650       src = dst = path + 1;
4651       path[0] = '\\';     // Force first separator to '\\'
4652     }
4653   }
4654 
4655   end = dst;
4656 
4657   // Remove redundant separators from remainder of path, forcing all
4658   // separators to be '\\' rather than '/'. Also, single byte space
4659   // characters are removed from the end of the path because those
4660   // are not legal ending characters on this operating system.
4661   //
4662   while (*src != '\0') {
4663     if (isfilesep(*src)) {
4664       *dst++ = '\\'; src++;
4665       while (isfilesep(*src)) src++;
4666       if (*src == '\0') {
4667         // Check for trailing separator
4668         end = dst;
4669         if (colon == dst - 2) break;  // "z:\\"
4670         if (dst == path + 1) break;   // "\\"
4671         if (dst == path + 2 && isfilesep(path[0])) {
4672           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4673           // beginning of a UNC pathname.  Even though it is not, by
4674           // itself, a valid UNC pathname, we leave it as is in order
4675           // to be consistent with the path canonicalizer as well
4676           // as the win32 APIs, which treat this case as an invalid
4677           // UNC pathname rather than as an alias for the root
4678           // directory of the current drive.
4679           break;
4680         }
4681         end = --dst;  // Path does not denote a root directory, so
4682                       // remove trailing separator
4683         break;
4684       }
4685       end = dst;
4686     } else {
4687       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4688         *dst++ = *src++;
4689         if (*src) *dst++ = *src++;
4690         end = dst;
4691       } else {  // Copy a single-byte character
4692         char c = *src++;
4693         *dst++ = c;
4694         // Space is not a legal ending character
4695         if (c != ' ') end = dst;
4696       }
4697     }
4698   }
4699 
4700   *end = '\0';
4701 
4702   // For "z:", add "." to work around a bug in the C runtime library
4703   if (colon == dst - 1) {
4704     path[2] = '.';
4705     path[3] = '\0';
4706   }
4707 
4708   return path;
4709 }
4710 
4711 // This code is a copy of JDK's sysSetLength
4712 // from src/windows/hpi/src/sys_api_md.c
4713 
4714 int os::ftruncate(int fd, jlong length) {
4715   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4716   long high = (long)(length >> 32);
4717   DWORD ret;
4718 
4719   if (h == (HANDLE)(-1)) {
4720     return -1;
4721   }
4722 
4723   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4724   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4725     return -1;
4726   }
4727 
4728   if (::SetEndOfFile(h) == FALSE) {
4729     return -1;
4730   }
4731 
4732   return 0;
4733 }
4734 
4735 int os::get_fileno(FILE* fp) {
4736   return _fileno(fp);
4737 }
4738 
4739 // This code is a copy of JDK's sysSync
4740 // from src/windows/hpi/src/sys_api_md.c
4741 // except for the legacy workaround for a bug in Win 98
4742 
4743 int os::fsync(int fd) {
4744   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4745 
4746   if ((!::FlushFileBuffers(handle)) &&
4747       (GetLastError() != ERROR_ACCESS_DENIED)) {
4748     // from winerror.h
4749     return -1;
4750   }
4751   return 0;
4752 }
4753 
4754 static int nonSeekAvailable(int, long *);
4755 static int stdinAvailable(int, long *);
4756 
4757 #define S_ISCHR(mode)   (((mode) & _S_IFCHR) == _S_IFCHR)
4758 #define S_ISFIFO(mode)  (((mode) & _S_IFIFO) == _S_IFIFO)
4759 
4760 // This code is a copy of JDK's sysAvailable
4761 // from src/windows/hpi/src/sys_api_md.c
4762 
4763 int os::available(int fd, jlong *bytes) {
4764   jlong cur, end;
4765   struct _stati64 stbuf64;
4766 
4767   if (::_fstati64(fd, &stbuf64) >= 0) {
4768     int mode = stbuf64.st_mode;
4769     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4770       int ret;
4771       long lpbytes;
4772       if (fd == 0) {
4773         ret = stdinAvailable(fd, &lpbytes);
4774       } else {
4775         ret = nonSeekAvailable(fd, &lpbytes);
4776       }
4777       (*bytes) = (jlong)(lpbytes);
4778       return ret;
4779     }
4780     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4781       return FALSE;
4782     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4783       return FALSE;
4784     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4785       return FALSE;
4786     }
4787     *bytes = end - cur;
4788     return TRUE;
4789   } else {
4790     return FALSE;
4791   }
4792 }
4793 
4794 void os::flockfile(FILE* fp) {
4795   _lock_file(fp);
4796 }
4797 
4798 void os::funlockfile(FILE* fp) {
4799   _unlock_file(fp);
4800 }
4801 
4802 // This code is a copy of JDK's nonSeekAvailable
4803 // from src/windows/hpi/src/sys_api_md.c
4804 
4805 static int nonSeekAvailable(int fd, long *pbytes) {
4806   // This is used for available on non-seekable devices
4807   // (like both named and anonymous pipes, such as pipes
4808   //  connected to an exec'd process).
4809   // Standard Input is a special case.
4810   HANDLE han;
4811 
4812   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4813     return FALSE;
4814   }
4815 
4816   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4817     // PeekNamedPipe fails when at EOF.  In that case we
4818     // simply make *pbytes = 0 which is consistent with the
4819     // behavior we get on Solaris when an fd is at EOF.
4820     // The only alternative is to raise an Exception,
4821     // which isn't really warranted.
4822     //
4823     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4824       return FALSE;
4825     }
4826     *pbytes = 0;
4827   }
4828   return TRUE;
4829 }
4830 
4831 #define MAX_INPUT_EVENTS 2000
4832 
4833 // This code is a copy of JDK's stdinAvailable
4834 // from src/windows/hpi/src/sys_api_md.c
4835 
4836 static int stdinAvailable(int fd, long *pbytes) {
4837   HANDLE han;
4838   DWORD numEventsRead = 0;  // Number of events read from buffer
4839   DWORD numEvents = 0;      // Number of events in buffer
4840   DWORD i = 0;              // Loop index
4841   DWORD curLength = 0;      // Position marker
4842   DWORD actualLength = 0;   // Number of bytes readable
4843   BOOL error = FALSE;       // Error holder
4844   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4845 
4846   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4847     return FALSE;
4848   }
4849 
4850   // Construct an array of input records in the console buffer
4851   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4852   if (error == 0) {
4853     return nonSeekAvailable(fd, pbytes);
4854   }
4855 
4856   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4857   if (numEvents > MAX_INPUT_EVENTS) {
4858     numEvents = MAX_INPUT_EVENTS;
4859   }
4860 
4861   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4862   if (lpBuffer == NULL) {
4863     return FALSE;
4864   }
4865 
4866   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4867   if (error == 0) {
4868     os::free(lpBuffer);
4869     return FALSE;
4870   }
4871 
4872   // Examine input records for the number of bytes available
4873   for (i=0; i<numEvents; i++) {
4874     if (lpBuffer[i].EventType == KEY_EVENT) {
4875 
4876       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4877                                       &(lpBuffer[i].Event);
4878       if (keyRecord->bKeyDown == TRUE) {
4879         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4880         curLength++;
4881         if (*keyPressed == '\r') {
4882           actualLength = curLength;
4883         }
4884       }
4885     }
4886   }
4887 
4888   if (lpBuffer != NULL) {
4889     os::free(lpBuffer);
4890   }
4891 
4892   *pbytes = (long) actualLength;
4893   return TRUE;
4894 }
4895 
4896 // Map a block of memory.
4897 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4898                         char *addr, size_t bytes, bool read_only,
4899                         bool allow_exec) {
4900   HANDLE hFile;
4901   char* base;
4902 
4903   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4904                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4905   if (hFile == NULL) {
4906     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4907     return NULL;
4908   }
4909 
4910   if (allow_exec) {
4911     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4912     // unless it comes from a PE image (which the shared archive is not.)
4913     // Even VirtualProtect refuses to give execute access to mapped memory
4914     // that was not previously executable.
4915     //
4916     // Instead, stick the executable region in anonymous memory.  Yuck.
4917     // Penalty is that ~4 pages will not be shareable - in the future
4918     // we might consider DLLizing the shared archive with a proper PE
4919     // header so that mapping executable + sharing is possible.
4920 
4921     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4922                                 PAGE_READWRITE);
4923     if (base == NULL) {
4924       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4925       CloseHandle(hFile);
4926       return NULL;
4927     }
4928 
4929     DWORD bytes_read;
4930     OVERLAPPED overlapped;
4931     overlapped.Offset = (DWORD)file_offset;
4932     overlapped.OffsetHigh = 0;
4933     overlapped.hEvent = NULL;
4934     // ReadFile guarantees that if the return value is true, the requested
4935     // number of bytes were read before returning.
4936     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4937     if (!res) {
4938       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4939       release_memory(base, bytes);
4940       CloseHandle(hFile);
4941       return NULL;
4942     }
4943   } else {
4944     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4945                                     NULL /* file_name */);
4946     if (hMap == NULL) {
4947       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4948       CloseHandle(hFile);
4949       return NULL;
4950     }
4951 
4952     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4953     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4954                                   (DWORD)bytes, addr);
4955     if (base == NULL) {
4956       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4957       CloseHandle(hMap);
4958       CloseHandle(hFile);
4959       return NULL;
4960     }
4961 
4962     if (CloseHandle(hMap) == 0) {
4963       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4964       CloseHandle(hFile);
4965       return base;
4966     }
4967   }
4968 
4969   if (allow_exec) {
4970     DWORD old_protect;
4971     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4972     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4973 
4974     if (!res) {
4975       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4976       // Don't consider this a hard error, on IA32 even if the
4977       // VirtualProtect fails, we should still be able to execute
4978       CloseHandle(hFile);
4979       return base;
4980     }
4981   }
4982 
4983   if (CloseHandle(hFile) == 0) {
4984     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4985     return base;
4986   }
4987 
4988   return base;
4989 }
4990 
4991 // Remap a block of memory.
4992 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4993                           char *addr, size_t bytes, bool read_only,
4994                           bool allow_exec) {
4995   // This OS does not allow existing memory maps to be remapped so we
4996   // have to unmap the memory before we remap it.
4997   if (!os::unmap_memory(addr, bytes)) {
4998     return NULL;
4999   }
5000 
5001   // There is a very small theoretical window between the unmap_memory()
5002   // call above and the map_memory() call below where a thread in native
5003   // code may be able to access an address that is no longer mapped.
5004 
5005   return os::map_memory(fd, file_name, file_offset, addr, bytes,
5006                         read_only, allow_exec);
5007 }
5008 
5009 
5010 // Unmap a block of memory.
5011 // Returns true=success, otherwise false.
5012 
5013 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5014   MEMORY_BASIC_INFORMATION mem_info;
5015   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5016     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5017     return false;
5018   }
5019 
5020   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5021   // Instead, executable region was allocated using VirtualAlloc(). See
5022   // pd_map_memory() above.
5023   //
5024   // The following flags should match the 'exec_access' flages used for
5025   // VirtualProtect() in pd_map_memory().
5026   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5027       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5028     return pd_release_memory(addr, bytes);
5029   }
5030 
5031   BOOL result = UnmapViewOfFile(addr);
5032   if (result == 0) {
5033     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
5034     return false;
5035   }
5036   return true;
5037 }
5038 
5039 void os::pause() {
5040   char filename[MAX_PATH];
5041   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5042     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5043   } else {
5044     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5045   }
5046 
5047   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5048   if (fd != -1) {
5049     struct stat buf;
5050     ::close(fd);
5051     while (::stat(filename, &buf) == 0) {
5052       Sleep(100);
5053     }
5054   } else {
5055     jio_fprintf(stderr,
5056                 "Could not open pause file '%s', continuing immediately.\n", filename);
5057   }
5058 }
5059 
5060 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
5061   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
5062 }
5063 
5064 // See the caveats for this class in os_windows.hpp
5065 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5066 // into this method and returns false. If no OS EXCEPTION was raised, returns
5067 // true.
5068 // The callback is supposed to provide the method that should be protected.
5069 //
5070 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5071   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
5072   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
5073          "crash_protection already set?");
5074 
5075   bool success = true;
5076   __try {
5077     WatcherThread::watcher_thread()->set_crash_protection(this);
5078     cb.call();
5079   } __except(EXCEPTION_EXECUTE_HANDLER) {
5080     // only for protection, nothing to do
5081     success = false;
5082   }
5083   WatcherThread::watcher_thread()->set_crash_protection(NULL);
5084   return success;
5085 }
5086 
5087 // An Event wraps a win32 "CreateEvent" kernel handle.
5088 //
5089 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5090 //
5091 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5092 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5093 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5094 //     In addition, an unpark() operation might fetch the handle field, but the
5095 //     event could recycle between the fetch and the SetEvent() operation.
5096 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5097 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5098 //     on an stale but recycled handle would be harmless, but in practice this might
5099 //     confuse other non-Sun code, so it's not a viable approach.
5100 //
5101 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5102 //     with the Event.  The event handle is never closed.  This could be construed
5103 //     as handle leakage, but only up to the maximum # of threads that have been extant
5104 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5105 //     permit a process to have hundreds of thousands of open handles.
5106 //
5107 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5108 //     and release unused handles.
5109 //
5110 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5111 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5112 //
5113 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5114 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5115 //
5116 // We use (2).
5117 //
5118 // TODO-FIXME:
5119 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5120 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5121 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5122 // 3.  Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
5123 //     into a single win32 CreateEvent() handle.
5124 //
5125 // Assumption:
5126 //    Only one parker can exist on an event, which is why we allocate
5127 //    them per-thread. Multiple unparkers can coexist.
5128 //
5129 // _Event transitions in park()
5130 //   -1 => -1 : illegal
5131 //    1 =>  0 : pass - return immediately
5132 //    0 => -1 : block; then set _Event to 0 before returning
5133 //
5134 // _Event transitions in unpark()
5135 //    0 => 1 : just return
5136 //    1 => 1 : just return
5137 //   -1 => either 0 or 1; must signal target thread
5138 //         That is, we can safely transition _Event from -1 to either
5139 //         0 or 1.
5140 //
5141 // _Event serves as a restricted-range semaphore.
5142 //   -1 : thread is blocked, i.e. there is a waiter
5143 //    0 : neutral: thread is running or ready,
5144 //        could have been signaled after a wait started
5145 //    1 : signaled - thread is running or ready
5146 //
5147 // Another possible encoding of _Event would be with
5148 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5149 //
5150 
5151 int os::PlatformEvent::park(jlong Millis) {
5152   // Transitions for _Event:
5153   //   -1 => -1 : illegal
5154   //    1 =>  0 : pass - return immediately
5155   //    0 => -1 : block; then set _Event to 0 before returning
5156 
5157   guarantee(_ParkHandle != NULL , "Invariant");
5158   guarantee(Millis > 0          , "Invariant");
5159 
5160   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5161   // the initial park() operation.
5162   // Consider: use atomic decrement instead of CAS-loop
5163 
5164   int v;
5165   for (;;) {
5166     v = _Event;
5167     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5168   }
5169   guarantee((v == 0) || (v == 1), "invariant");
5170   if (v != 0) return OS_OK;
5171 
5172   // Do this the hard way by blocking ...
5173   // TODO: consider a brief spin here, gated on the success of recent
5174   // spin attempts by this thread.
5175   //
5176   // We decompose long timeouts into series of shorter timed waits.
5177   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5178   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5179   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5180   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5181   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5182   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5183   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5184   // for the already waited time.  This policy does not admit any new outcomes.
5185   // In the future, however, we might want to track the accumulated wait time and
5186   // adjust Millis accordingly if we encounter a spurious wakeup.
5187 
5188   const int MAXTIMEOUT = 0x10000000;
5189   DWORD rv = WAIT_TIMEOUT;
5190   while (_Event < 0 && Millis > 0) {
5191     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5192     if (Millis > MAXTIMEOUT) {
5193       prd = MAXTIMEOUT;
5194     }
5195     rv = ::WaitForSingleObject(_ParkHandle, prd);
5196     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5197     if (rv == WAIT_TIMEOUT) {
5198       Millis -= prd;
5199     }
5200   }
5201   v = _Event;
5202   _Event = 0;
5203   // see comment at end of os::PlatformEvent::park() below:
5204   OrderAccess::fence();
5205   // If we encounter a nearly simultanous timeout expiry and unpark()
5206   // we return OS_OK indicating we awoke via unpark().
5207   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5208   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5209 }
5210 
5211 void os::PlatformEvent::park() {
5212   // Transitions for _Event:
5213   //   -1 => -1 : illegal
5214   //    1 =>  0 : pass - return immediately
5215   //    0 => -1 : block; then set _Event to 0 before returning
5216 
5217   guarantee(_ParkHandle != NULL, "Invariant");
5218   // Invariant: Only the thread associated with the Event/PlatformEvent
5219   // may call park().
5220   // Consider: use atomic decrement instead of CAS-loop
5221   int v;
5222   for (;;) {
5223     v = _Event;
5224     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5225   }
5226   guarantee((v == 0) || (v == 1), "invariant");
5227   if (v != 0) return;
5228 
5229   // Do this the hard way by blocking ...
5230   // TODO: consider a brief spin here, gated on the success of recent
5231   // spin attempts by this thread.
5232   while (_Event < 0) {
5233     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5234     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5235   }
5236 
5237   // Usually we'll find _Event == 0 at this point, but as
5238   // an optional optimization we clear it, just in case can
5239   // multiple unpark() operations drove _Event up to 1.
5240   _Event = 0;
5241   OrderAccess::fence();
5242   guarantee(_Event >= 0, "invariant");
5243 }
5244 
5245 void os::PlatformEvent::unpark() {
5246   guarantee(_ParkHandle != NULL, "Invariant");
5247 
5248   // Transitions for _Event:
5249   //    0 => 1 : just return
5250   //    1 => 1 : just return
5251   //   -1 => either 0 or 1; must signal target thread
5252   //         That is, we can safely transition _Event from -1 to either
5253   //         0 or 1.
5254   // See also: "Semaphores in Plan 9" by Mullender & Cox
5255   //
5256   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5257   // that it will take two back-to-back park() calls for the owning
5258   // thread to block. This has the benefit of forcing a spurious return
5259   // from the first park() call after an unpark() call which will help
5260   // shake out uses of park() and unpark() without condition variables.
5261 
5262   if (Atomic::xchg(1, &_Event) >= 0) return;
5263 
5264   ::SetEvent(_ParkHandle);
5265 }
5266 
5267 
5268 // JSR166
5269 // -------------------------------------------------------
5270 
5271 // The Windows implementation of Park is very straightforward: Basic
5272 // operations on Win32 Events turn out to have the right semantics to
5273 // use them directly. We opportunistically resuse the event inherited
5274 // from Monitor.
5275 
5276 void Parker::park(bool isAbsolute, jlong time) {
5277   guarantee(_ParkEvent != NULL, "invariant");
5278   // First, demultiplex/decode time arguments
5279   if (time < 0) { // don't wait
5280     return;
5281   } else if (time == 0 && !isAbsolute) {
5282     time = INFINITE;
5283   } else if (isAbsolute) {
5284     time -= os::javaTimeMillis(); // convert to relative time
5285     if (time <= 0) {  // already elapsed
5286       return;
5287     }
5288   } else { // relative
5289     time /= 1000000;  // Must coarsen from nanos to millis
5290     if (time == 0) {  // Wait for the minimal time unit if zero
5291       time = 1;
5292     }
5293   }
5294 
5295   JavaThread* thread = JavaThread::current();
5296 
5297   // Don't wait if interrupted or already triggered
5298   if (Thread::is_interrupted(thread, false) ||
5299       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5300     ResetEvent(_ParkEvent);
5301     return;
5302   } else {
5303     ThreadBlockInVM tbivm(thread);
5304     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5305     thread->set_suspend_equivalent();
5306 
5307     WaitForSingleObject(_ParkEvent, time);
5308     ResetEvent(_ParkEvent);
5309 
5310     // If externally suspended while waiting, re-suspend
5311     if (thread->handle_special_suspend_equivalent_condition()) {
5312       thread->java_suspend_self();
5313     }
5314   }
5315 }
5316 
5317 void Parker::unpark() {
5318   guarantee(_ParkEvent != NULL, "invariant");
5319   SetEvent(_ParkEvent);
5320 }
5321 
5322 // Run the specified command in a separate process. Return its exit value,
5323 // or -1 on failure (e.g. can't create a new process).
5324 int os::fork_and_exec(char* cmd) {
5325   STARTUPINFO si;
5326   PROCESS_INFORMATION pi;
5327 
5328   memset(&si, 0, sizeof(si));
5329   si.cb = sizeof(si);
5330   memset(&pi, 0, sizeof(pi));
5331   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5332                             cmd,    // command line
5333                             NULL,   // process security attribute
5334                             NULL,   // thread security attribute
5335                             TRUE,   // inherits system handles
5336                             0,      // no creation flags
5337                             NULL,   // use parent's environment block
5338                             NULL,   // use parent's starting directory
5339                             &si,    // (in) startup information
5340                             &pi);   // (out) process information
5341 
5342   if (rslt) {
5343     // Wait until child process exits.
5344     WaitForSingleObject(pi.hProcess, INFINITE);
5345 
5346     DWORD exit_code;
5347     GetExitCodeProcess(pi.hProcess, &exit_code);
5348 
5349     // Close process and thread handles.
5350     CloseHandle(pi.hProcess);
5351     CloseHandle(pi.hThread);
5352 
5353     return (int)exit_code;
5354   } else {
5355     return -1;
5356   }
5357 }
5358 
5359 bool os::find(address addr, outputStream* st) {
5360   int offset = -1;
5361   bool result = false;
5362   char buf[256];
5363   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5364     st->print(PTR_FORMAT " ", addr);
5365     if (strlen(buf) < sizeof(buf) - 1) {
5366       char* p = strrchr(buf, '\\');
5367       if (p) {
5368         st->print("%s", p + 1);
5369       } else {
5370         st->print("%s", buf);
5371       }
5372     } else {
5373         // The library name is probably truncated. Let's omit the library name.
5374         // See also JDK-8147512.
5375     }
5376     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5377       st->print("::%s + 0x%x", buf, offset);
5378     }
5379     st->cr();
5380     result = true;
5381   }
5382   return result;
5383 }
5384 
5385 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5386   DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5387 
5388   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
5389     JavaThread* thread = JavaThread::current();
5390     PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5391     address addr = (address) exceptionRecord->ExceptionInformation[1];
5392 
5393     if (os::is_memory_serialize_page(thread, addr)) {
5394       return EXCEPTION_CONTINUE_EXECUTION;
5395     }
5396   }
5397 
5398   return EXCEPTION_CONTINUE_SEARCH;
5399 }
5400 
5401 // We don't build a headless jre for Windows
5402 bool os::is_headless_jre() { return false; }
5403 
5404 static jint initSock() {
5405   WSADATA wsadata;
5406 
5407   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5408     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5409                 ::GetLastError());
5410     return JNI_ERR;
5411   }
5412   return JNI_OK;
5413 }
5414 
5415 struct hostent* os::get_host_by_name(char* name) {
5416   return (struct hostent*)gethostbyname(name);
5417 }
5418 
5419 int os::socket_close(int fd) {
5420   return ::closesocket(fd);
5421 }
5422 
5423 int os::socket(int domain, int type, int protocol) {
5424   return ::socket(domain, type, protocol);
5425 }
5426 
5427 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5428   return ::connect(fd, him, len);
5429 }
5430 
5431 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5432   return ::recv(fd, buf, (int)nBytes, flags);
5433 }
5434 
5435 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5436   return ::send(fd, buf, (int)nBytes, flags);
5437 }
5438 
5439 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5440   return ::send(fd, buf, (int)nBytes, flags);
5441 }
5442 
5443 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5444 #if defined(IA32)
5445   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5446 #elif defined (AMD64)
5447   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5448 #endif
5449 
5450 // returns true if thread could be suspended,
5451 // false otherwise
5452 static bool do_suspend(HANDLE* h) {
5453   if (h != NULL) {
5454     if (SuspendThread(*h) != ~0) {
5455       return true;
5456     }
5457   }
5458   return false;
5459 }
5460 
5461 // resume the thread
5462 // calling resume on an active thread is a no-op
5463 static void do_resume(HANDLE* h) {
5464   if (h != NULL) {
5465     ResumeThread(*h);
5466   }
5467 }
5468 
5469 // retrieve a suspend/resume context capable handle
5470 // from the tid. Caller validates handle return value.
5471 void get_thread_handle_for_extended_context(HANDLE* h,
5472                                             OSThread::thread_id_t tid) {
5473   if (h != NULL) {
5474     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5475   }
5476 }
5477 
5478 // Thread sampling implementation
5479 //
5480 void os::SuspendedThreadTask::internal_do_task() {
5481   CONTEXT    ctxt;
5482   HANDLE     h = NULL;
5483 
5484   // get context capable handle for thread
5485   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5486 
5487   // sanity
5488   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5489     return;
5490   }
5491 
5492   // suspend the thread
5493   if (do_suspend(&h)) {
5494     ctxt.ContextFlags = sampling_context_flags;
5495     // get thread context
5496     GetThreadContext(h, &ctxt);
5497     SuspendedThreadTaskContext context(_thread, &ctxt);
5498     // pass context to Thread Sampling impl
5499     do_task(context);
5500     // resume thread
5501     do_resume(&h);
5502   }
5503 
5504   // close handle
5505   CloseHandle(h);
5506 }
5507 
5508 bool os::start_debugging(char *buf, int buflen) {
5509   int len = (int)strlen(buf);
5510   char *p = &buf[len];
5511 
5512   jio_snprintf(p, buflen-len,
5513              "\n\n"
5514              "Do you want to debug the problem?\n\n"
5515              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5516              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5517              "Otherwise, select 'No' to abort...",
5518              os::current_process_id(), os::current_thread_id());
5519 
5520   bool yes = os::message_box("Unexpected Error", buf);
5521 
5522   if (yes) {
5523     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5524     // exception. If VM is running inside a debugger, the debugger will
5525     // catch the exception. Otherwise, the breakpoint exception will reach
5526     // the default windows exception handler, which can spawn a debugger and
5527     // automatically attach to the dying VM.
5528     os::breakpoint();
5529     yes = false;
5530   }
5531   return yes;
5532 }
5533 
5534 void* os::get_default_process_handle() {
5535   return (void*)GetModuleHandle(NULL);
5536 }
5537 
5538 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5539 // which is used to find statically linked in agents.
5540 // Additionally for windows, takes into account __stdcall names.
5541 // Parameters:
5542 //            sym_name: Symbol in library we are looking for
5543 //            lib_name: Name of library to look in, NULL for shared libs.
5544 //            is_absolute_path == true if lib_name is absolute path to agent
5545 //                                     such as "C:/a/b/L.dll"
5546 //            == false if only the base name of the library is passed in
5547 //               such as "L"
5548 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5549                                     bool is_absolute_path) {
5550   char *agent_entry_name;
5551   size_t len;
5552   size_t name_len;
5553   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5554   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5555   const char *start;
5556 
5557   if (lib_name != NULL) {
5558     len = name_len = strlen(lib_name);
5559     if (is_absolute_path) {
5560       // Need to strip path, prefix and suffix
5561       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5562         lib_name = ++start;
5563       } else {
5564         // Need to check for drive prefix
5565         if ((start = strchr(lib_name, ':')) != NULL) {
5566           lib_name = ++start;
5567         }
5568       }
5569       if (len <= (prefix_len + suffix_len)) {
5570         return NULL;
5571       }
5572       lib_name += prefix_len;
5573       name_len = strlen(lib_name) - suffix_len;
5574     }
5575   }
5576   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5577   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5578   if (agent_entry_name == NULL) {
5579     return NULL;
5580   }
5581   if (lib_name != NULL) {
5582     const char *p = strrchr(sym_name, '@');
5583     if (p != NULL && p != sym_name) {
5584       // sym_name == _Agent_OnLoad@XX
5585       strncpy(agent_entry_name, sym_name, (p - sym_name));
5586       agent_entry_name[(p-sym_name)] = '\0';
5587       // agent_entry_name == _Agent_OnLoad
5588       strcat(agent_entry_name, "_");
5589       strncat(agent_entry_name, lib_name, name_len);
5590       strcat(agent_entry_name, p);
5591       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5592     } else {
5593       strcpy(agent_entry_name, sym_name);
5594       strcat(agent_entry_name, "_");
5595       strncat(agent_entry_name, lib_name, name_len);
5596     }
5597   } else {
5598     strcpy(agent_entry_name, sym_name);
5599   }
5600   return agent_entry_name;
5601 }
5602 
5603 #ifndef PRODUCT
5604 
5605 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5606 // contiguous memory block at a particular address.
5607 // The test first tries to find a good approximate address to allocate at by using the same
5608 // method to allocate some memory at any address. The test then tries to allocate memory in
5609 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5610 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5611 // the previously allocated memory is available for allocation. The only actual failure
5612 // that is reported is when the test tries to allocate at a particular location but gets a
5613 // different valid one. A NULL return value at this point is not considered an error but may
5614 // be legitimate.
5615 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5616 void TestReserveMemorySpecial_test() {
5617   if (!UseLargePages) {
5618     if (VerboseInternalVMTests) {
5619       tty->print("Skipping test because large pages are disabled");
5620     }
5621     return;
5622   }
5623   // save current value of globals
5624   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5625   bool old_use_numa_interleaving = UseNUMAInterleaving;
5626 
5627   // set globals to make sure we hit the correct code path
5628   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5629 
5630   // do an allocation at an address selected by the OS to get a good one.
5631   const size_t large_allocation_size = os::large_page_size() * 4;
5632   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5633   if (result == NULL) {
5634     if (VerboseInternalVMTests) {
5635       tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5636                           large_allocation_size);
5637     }
5638   } else {
5639     os::release_memory_special(result, large_allocation_size);
5640 
5641     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5642     // we managed to get it once.
5643     const size_t expected_allocation_size = os::large_page_size();
5644     char* expected_location = result + os::large_page_size();
5645     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5646     if (actual_location == NULL) {
5647       if (VerboseInternalVMTests) {
5648         tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5649                             expected_location, large_allocation_size);
5650       }
5651     } else {
5652       // release memory
5653       os::release_memory_special(actual_location, expected_allocation_size);
5654       // only now check, after releasing any memory to avoid any leaks.
5655       assert(actual_location == expected_location,
5656              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5657              expected_location, expected_allocation_size, actual_location);
5658     }
5659   }
5660 
5661   // restore globals
5662   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5663   UseNUMAInterleaving = old_use_numa_interleaving;
5664 }
5665 #endif // PRODUCT
5666 
5667 /*
5668   All the defined signal names for Windows.
5669 
5670   NOTE that not all of these names are accepted by FindSignal!
5671 
5672   For various reasons some of these may be rejected at runtime.
5673 
5674   Here are the names currently accepted by a user of sun.misc.Signal with
5675   1.4.1 (ignoring potential interaction with use of chaining, etc):
5676 
5677      (LIST TBD)
5678 
5679 */
5680 int os::get_signal_number(const char* name) {
5681   static const struct {
5682     char* name;
5683     int   number;
5684   } siglabels [] =
5685     // derived from version 6.0 VC98/include/signal.h
5686   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5687   "FPE",        SIGFPE,         // floating point exception
5688   "SEGV",       SIGSEGV,        // segment violation
5689   "INT",        SIGINT,         // interrupt
5690   "TERM",       SIGTERM,        // software term signal from kill
5691   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5692   "ILL",        SIGILL};        // illegal instruction
5693   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5694     if (strcmp(name, siglabels[i].name) == 0) {
5695       return siglabels[i].number;
5696     }
5697   }
5698   return -1;
5699 }
5700 
5701 // Fast current thread access
5702 
5703 int os::win32::_thread_ptr_offset = 0;
5704 
5705 static void call_wrapper_dummy() {}
5706 
5707 // We need to call the os_exception_wrapper once so that it sets
5708 // up the offset from FS of the thread pointer.
5709 void os::win32::initialize_thread_ptr_offset() {
5710   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5711                            NULL, NULL, NULL, NULL);
5712 }