1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "classfile/classLoader.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/disassembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "jvm_windows.h"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/filemap.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "os_share_windows.hpp"
  43 #include "os_windows.inline.hpp"
  44 #include "prims/jniFastGetField.hpp"
  45 #include "prims/jvm.h"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.inline.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "semaphore_windows.hpp"
  67 #include "services/attachListener.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/growableArray.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 
  77 #ifdef _DEBUG
  78 #include <crtdbg.h>
  79 #endif
  80 
  81 
  82 #include <windows.h>
  83 #include <sys/types.h>
  84 #include <sys/stat.h>
  85 #include <sys/timeb.h>
  86 #include <objidl.h>
  87 #include <shlobj.h>
  88 
  89 #include <malloc.h>
  90 #include <signal.h>
  91 #include <direct.h>
  92 #include <errno.h>
  93 #include <fcntl.h>
  94 #include <io.h>
  95 #include <process.h>              // For _beginthreadex(), _endthreadex()
  96 #include <imagehlp.h>             // For os::dll_address_to_function_name
  97 // for enumerating dll libraries
  98 #include <vdmdbg.h>
  99 
 100 // for timer info max values which include all bits
 101 #define ALL_64_BITS CONST64(-1)
 102 
 103 // For DLL loading/load error detection
 104 // Values of PE COFF
 105 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 106 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 107 
 108 static HANDLE main_process;
 109 static HANDLE main_thread;
 110 static int    main_thread_id;
 111 
 112 static FILETIME process_creation_time;
 113 static FILETIME process_exit_time;
 114 static FILETIME process_user_time;
 115 static FILETIME process_kernel_time;
 116 
 117 #ifdef _M_IA64
 118   #define __CPU__ ia64
 119 #else
 120   #ifdef _M_AMD64
 121     #define __CPU__ amd64
 122   #else
 123     #define __CPU__ i486
 124   #endif
 125 #endif
 126 
 127 // save DLL module handle, used by GetModuleFileName
 128 
 129 HINSTANCE vm_lib_handle;
 130 
 131 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 132   switch (reason) {
 133   case DLL_PROCESS_ATTACH:
 134     vm_lib_handle = hinst;
 135     if (ForceTimeHighResolution) {
 136       timeBeginPeriod(1L);
 137     }
 138     break;
 139   case DLL_PROCESS_DETACH:
 140     if (ForceTimeHighResolution) {
 141       timeEndPeriod(1L);
 142     }
 143     break;
 144   default:
 145     break;
 146   }
 147   return true;
 148 }
 149 
 150 static inline double fileTimeAsDouble(FILETIME* time) {
 151   const double high  = (double) ((unsigned int) ~0);
 152   const double split = 10000000.0;
 153   double result = (time->dwLowDateTime / split) +
 154                    time->dwHighDateTime * (high/split);
 155   return result;
 156 }
 157 
 158 // Implementation of os
 159 
 160 bool os::unsetenv(const char* name) {
 161   assert(name != NULL, "Null pointer");
 162   return (SetEnvironmentVariable(name, NULL) == TRUE);
 163 }
 164 
 165 // No setuid programs under Windows.
 166 bool os::have_special_privileges() {
 167   return false;
 168 }
 169 
 170 
 171 // This method is  a periodic task to check for misbehaving JNI applications
 172 // under CheckJNI, we can add any periodic checks here.
 173 // For Windows at the moment does nothing
 174 void os::run_periodic_checks() {
 175   return;
 176 }
 177 
 178 // previous UnhandledExceptionFilter, if there is one
 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 180 
 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 182 
 183 void os::init_system_properties_values() {
 184   // sysclasspath, java_home, dll_dir
 185   {
 186     char *home_path;
 187     char *dll_path;
 188     char *pslash;
 189     char *bin = "\\bin";
 190     char home_dir[MAX_PATH + 1];
 191     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 192 
 193     if (alt_home_dir != NULL)  {
 194       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 195       home_dir[MAX_PATH] = '\0';
 196     } else {
 197       os::jvm_path(home_dir, sizeof(home_dir));
 198       // Found the full path to jvm.dll.
 199       // Now cut the path to <java_home>/jre if we can.
 200       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 201       pslash = strrchr(home_dir, '\\');
 202       if (pslash != NULL) {
 203         *pslash = '\0';                   // get rid of \{client|server}
 204         pslash = strrchr(home_dir, '\\');
 205         if (pslash != NULL) {
 206           *pslash = '\0';                 // get rid of \bin
 207         }
 208       }
 209     }
 210 
 211     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 212     if (home_path == NULL) {
 213       return;
 214     }
 215     strcpy(home_path, home_dir);
 216     Arguments::set_java_home(home_path);
 217     FREE_C_HEAP_ARRAY(char, home_path);
 218 
 219     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 220                                 mtInternal);
 221     if (dll_path == NULL) {
 222       return;
 223     }
 224     strcpy(dll_path, home_dir);
 225     strcat(dll_path, bin);
 226     Arguments::set_dll_dir(dll_path);
 227     FREE_C_HEAP_ARRAY(char, dll_path);
 228 
 229     if (!set_boot_path('\\', ';')) {
 230       return;
 231     }
 232   }
 233 
 234 // library_path
 235 #define EXT_DIR "\\lib\\ext"
 236 #define BIN_DIR "\\bin"
 237 #define PACKAGE_DIR "\\Sun\\Java"
 238   {
 239     // Win32 library search order (See the documentation for LoadLibrary):
 240     //
 241     // 1. The directory from which application is loaded.
 242     // 2. The system wide Java Extensions directory (Java only)
 243     // 3. System directory (GetSystemDirectory)
 244     // 4. Windows directory (GetWindowsDirectory)
 245     // 5. The PATH environment variable
 246     // 6. The current directory
 247 
 248     char *library_path;
 249     char tmp[MAX_PATH];
 250     char *path_str = ::getenv("PATH");
 251 
 252     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 253                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 254 
 255     library_path[0] = '\0';
 256 
 257     GetModuleFileName(NULL, tmp, sizeof(tmp));
 258     *(strrchr(tmp, '\\')) = '\0';
 259     strcat(library_path, tmp);
 260 
 261     GetWindowsDirectory(tmp, sizeof(tmp));
 262     strcat(library_path, ";");
 263     strcat(library_path, tmp);
 264     strcat(library_path, PACKAGE_DIR BIN_DIR);
 265 
 266     GetSystemDirectory(tmp, sizeof(tmp));
 267     strcat(library_path, ";");
 268     strcat(library_path, tmp);
 269 
 270     GetWindowsDirectory(tmp, sizeof(tmp));
 271     strcat(library_path, ";");
 272     strcat(library_path, tmp);
 273 
 274     if (path_str) {
 275       strcat(library_path, ";");
 276       strcat(library_path, path_str);
 277     }
 278 
 279     strcat(library_path, ";.");
 280 
 281     Arguments::set_library_path(library_path);
 282     FREE_C_HEAP_ARRAY(char, library_path);
 283   }
 284 
 285   // Default extensions directory
 286   {
 287     char path[MAX_PATH];
 288     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 289     GetWindowsDirectory(path, MAX_PATH);
 290     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 291             path, PACKAGE_DIR, EXT_DIR);
 292     Arguments::set_ext_dirs(buf);
 293   }
 294   #undef EXT_DIR
 295   #undef BIN_DIR
 296   #undef PACKAGE_DIR
 297 
 298 #ifndef _WIN64
 299   // set our UnhandledExceptionFilter and save any previous one
 300   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 301 #endif
 302 
 303   // Done
 304   return;
 305 }
 306 
 307 void os::breakpoint() {
 308   DebugBreak();
 309 }
 310 
 311 // Invoked from the BREAKPOINT Macro
 312 extern "C" void breakpoint() {
 313   os::breakpoint();
 314 }
 315 
 316 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 317 // So far, this method is only used by Native Memory Tracking, which is
 318 // only supported on Windows XP or later.
 319 //
 320 int os::get_native_stack(address* stack, int frames, int toSkip) {
 321   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 322   for (int index = captured; index < frames; index ++) {
 323     stack[index] = NULL;
 324   }
 325   return captured;
 326 }
 327 
 328 
 329 // os::current_stack_base()
 330 //
 331 //   Returns the base of the stack, which is the stack's
 332 //   starting address.  This function must be called
 333 //   while running on the stack of the thread being queried.
 334 
 335 address os::current_stack_base() {
 336   MEMORY_BASIC_INFORMATION minfo;
 337   address stack_bottom;
 338   size_t stack_size;
 339 
 340   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 341   stack_bottom =  (address)minfo.AllocationBase;
 342   stack_size = minfo.RegionSize;
 343 
 344   // Add up the sizes of all the regions with the same
 345   // AllocationBase.
 346   while (1) {
 347     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 348     if (stack_bottom == (address)minfo.AllocationBase) {
 349       stack_size += minfo.RegionSize;
 350     } else {
 351       break;
 352     }
 353   }
 354 
 355 #ifdef _M_IA64
 356   // IA64 has memory and register stacks
 357   //
 358   // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
 359   // at thread creation (1MB backing store growing upwards, 1MB memory stack
 360   // growing downwards, 2MB summed up)
 361   //
 362   // ...
 363   // ------- top of stack (high address) -----
 364   // |
 365   // |      1MB
 366   // |      Backing Store (Register Stack)
 367   // |
 368   // |         / \
 369   // |          |
 370   // |          |
 371   // |          |
 372   // ------------------------ stack base -----
 373   // |      1MB
 374   // |      Memory Stack
 375   // |
 376   // |          |
 377   // |          |
 378   // |          |
 379   // |         \ /
 380   // |
 381   // ----- bottom of stack (low address) -----
 382   // ...
 383 
 384   stack_size = stack_size / 2;
 385 #endif
 386   return stack_bottom + stack_size;
 387 }
 388 
 389 size_t os::current_stack_size() {
 390   size_t sz;
 391   MEMORY_BASIC_INFORMATION minfo;
 392   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 393   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 394   return sz;
 395 }
 396 
 397 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 398   const struct tm* time_struct_ptr = localtime(clock);
 399   if (time_struct_ptr != NULL) {
 400     *res = *time_struct_ptr;
 401     return res;
 402   }
 403   return NULL;
 404 }
 405 
 406 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 407   const struct tm* time_struct_ptr = gmtime(clock);
 408   if (time_struct_ptr != NULL) {
 409     *res = *time_struct_ptr;
 410     return res;
 411   }
 412   return NULL;
 413 }
 414 
 415 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 416 
 417 // Thread start routine for all newly created threads
 418 static unsigned __stdcall thread_native_entry(Thread* thread) {
 419   // Try to randomize the cache line index of hot stack frames.
 420   // This helps when threads of the same stack traces evict each other's
 421   // cache lines. The threads can be either from the same JVM instance, or
 422   // from different JVM instances. The benefit is especially true for
 423   // processors with hyperthreading technology.
 424   static int counter = 0;
 425   int pid = os::current_process_id();
 426   _alloca(((pid ^ counter++) & 7) * 128);
 427 
 428   thread->initialize_thread_current();
 429 
 430   OSThread* osthr = thread->osthread();
 431   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 432 
 433   if (UseNUMA) {
 434     int lgrp_id = os::numa_get_group_id();
 435     if (lgrp_id != -1) {
 436       thread->set_lgrp_id(lgrp_id);
 437     }
 438   }
 439 
 440   // Diagnostic code to investigate JDK-6573254
 441   int res = 30115;  // non-java thread
 442   if (thread->is_Java_thread()) {
 443     res = 20115;    // java thread
 444   }
 445 
 446   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 447 
 448   // Install a win32 structured exception handler around every thread created
 449   // by VM, so VM can generate error dump when an exception occurred in non-
 450   // Java thread (e.g. VM thread).
 451   __try {
 452     thread->run();
 453   } __except(topLevelExceptionFilter(
 454                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 455     // Nothing to do.
 456   }
 457 
 458   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 459 
 460   // One less thread is executing
 461   // When the VMThread gets here, the main thread may have already exited
 462   // which frees the CodeHeap containing the Atomic::add code
 463   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 464     Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
 465   }
 466 
 467   // If a thread has not deleted itself ("delete this") as part of its
 468   // termination sequence, we have to ensure thread-local-storage is
 469   // cleared before we actually terminate. No threads should ever be
 470   // deleted asynchronously with respect to their termination.
 471   if (Thread::current_or_null_safe() != NULL) {
 472     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 473     thread->clear_thread_current();
 474   }
 475 
 476   // Thread must not return from exit_process_or_thread(), but if it does,
 477   // let it proceed to exit normally
 478   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 479 }
 480 
 481 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 482                                   int thread_id) {
 483   // Allocate the OSThread object
 484   OSThread* osthread = new OSThread(NULL, NULL);
 485   if (osthread == NULL) return NULL;
 486 
 487   // Initialize support for Java interrupts
 488   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 489   if (interrupt_event == NULL) {
 490     delete osthread;
 491     return NULL;
 492   }
 493   osthread->set_interrupt_event(interrupt_event);
 494 
 495   // Store info on the Win32 thread into the OSThread
 496   osthread->set_thread_handle(thread_handle);
 497   osthread->set_thread_id(thread_id);
 498 
 499   if (UseNUMA) {
 500     int lgrp_id = os::numa_get_group_id();
 501     if (lgrp_id != -1) {
 502       thread->set_lgrp_id(lgrp_id);
 503     }
 504   }
 505 
 506   // Initial thread state is INITIALIZED, not SUSPENDED
 507   osthread->set_state(INITIALIZED);
 508 
 509   return osthread;
 510 }
 511 
 512 
 513 bool os::create_attached_thread(JavaThread* thread) {
 514 #ifdef ASSERT
 515   thread->verify_not_published();
 516 #endif
 517   HANDLE thread_h;
 518   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 519                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 520     fatal("DuplicateHandle failed\n");
 521   }
 522   OSThread* osthread = create_os_thread(thread, thread_h,
 523                                         (int)current_thread_id());
 524   if (osthread == NULL) {
 525     return false;
 526   }
 527 
 528   // Initial thread state is RUNNABLE
 529   osthread->set_state(RUNNABLE);
 530 
 531   thread->set_osthread(osthread);
 532 
 533   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 534     os::current_thread_id());
 535 
 536   return true;
 537 }
 538 
 539 bool os::create_main_thread(JavaThread* thread) {
 540 #ifdef ASSERT
 541   thread->verify_not_published();
 542 #endif
 543   if (_starting_thread == NULL) {
 544     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 545     if (_starting_thread == NULL) {
 546       return false;
 547     }
 548   }
 549 
 550   // The primordial thread is runnable from the start)
 551   _starting_thread->set_state(RUNNABLE);
 552 
 553   thread->set_osthread(_starting_thread);
 554   return true;
 555 }
 556 
 557 // Helper function to trace _beginthreadex attributes,
 558 //  similar to os::Posix::describe_pthread_attr()
 559 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 560                                                size_t stacksize, unsigned initflag) {
 561   stringStream ss(buf, buflen);
 562   if (stacksize == 0) {
 563     ss.print("stacksize: default, ");
 564   } else {
 565     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 566   }
 567   ss.print("flags: ");
 568   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 569   #define ALL(X) \
 570     X(CREATE_SUSPENDED) \
 571     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 572   ALL(PRINT_FLAG)
 573   #undef ALL
 574   #undef PRINT_FLAG
 575   return buf;
 576 }
 577 
 578 // Allocate and initialize a new OSThread
 579 bool os::create_thread(Thread* thread, ThreadType thr_type,
 580                        size_t stack_size) {
 581   unsigned thread_id;
 582 
 583   // Allocate the OSThread object
 584   OSThread* osthread = new OSThread(NULL, NULL);
 585   if (osthread == NULL) {
 586     return false;
 587   }
 588 
 589   // Initialize support for Java interrupts
 590   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 591   if (interrupt_event == NULL) {
 592     delete osthread;
 593     return NULL;
 594   }
 595   osthread->set_interrupt_event(interrupt_event);
 596   osthread->set_interrupted(false);
 597 
 598   thread->set_osthread(osthread);
 599 
 600   if (stack_size == 0) {
 601     switch (thr_type) {
 602     case os::java_thread:
 603       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 604       if (JavaThread::stack_size_at_create() > 0) {
 605         stack_size = JavaThread::stack_size_at_create();
 606       }
 607       break;
 608     case os::compiler_thread:
 609       if (CompilerThreadStackSize > 0) {
 610         stack_size = (size_t)(CompilerThreadStackSize * K);
 611         break;
 612       } // else fall through:
 613         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 614     case os::vm_thread:
 615     case os::pgc_thread:
 616     case os::cgc_thread:
 617     case os::watcher_thread:
 618       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 619       break;
 620     }
 621   }
 622 
 623   // Create the Win32 thread
 624   //
 625   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 626   // does not specify stack size. Instead, it specifies the size of
 627   // initially committed space. The stack size is determined by
 628   // PE header in the executable. If the committed "stack_size" is larger
 629   // than default value in the PE header, the stack is rounded up to the
 630   // nearest multiple of 1MB. For example if the launcher has default
 631   // stack size of 320k, specifying any size less than 320k does not
 632   // affect the actual stack size at all, it only affects the initial
 633   // commitment. On the other hand, specifying 'stack_size' larger than
 634   // default value may cause significant increase in memory usage, because
 635   // not only the stack space will be rounded up to MB, but also the
 636   // entire space is committed upfront.
 637   //
 638   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 639   // for CreateThread() that can treat 'stack_size' as stack size. However we
 640   // are not supposed to call CreateThread() directly according to MSDN
 641   // document because JVM uses C runtime library. The good news is that the
 642   // flag appears to work with _beginthredex() as well.
 643 
 644   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 645   HANDLE thread_handle =
 646     (HANDLE)_beginthreadex(NULL,
 647                            (unsigned)stack_size,
 648                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 649                            thread,
 650                            initflag,
 651                            &thread_id);
 652 
 653   char buf[64];
 654   if (thread_handle != NULL) {
 655     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 656       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 657   } else {
 658     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 659       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 660   }
 661 
 662   if (thread_handle == NULL) {
 663     // Need to clean up stuff we've allocated so far
 664     CloseHandle(osthread->interrupt_event());
 665     thread->set_osthread(NULL);
 666     delete osthread;
 667     return NULL;
 668   }
 669 
 670   Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
 671 
 672   // Store info on the Win32 thread into the OSThread
 673   osthread->set_thread_handle(thread_handle);
 674   osthread->set_thread_id(thread_id);
 675 
 676   // Initial thread state is INITIALIZED, not SUSPENDED
 677   osthread->set_state(INITIALIZED);
 678 
 679   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 680   return true;
 681 }
 682 
 683 
 684 // Free Win32 resources related to the OSThread
 685 void os::free_thread(OSThread* osthread) {
 686   assert(osthread != NULL, "osthread not set");
 687 
 688   // We are told to free resources of the argument thread,
 689   // but we can only really operate on the current thread.
 690   assert(Thread::current()->osthread() == osthread,
 691          "os::free_thread but not current thread");
 692 
 693   CloseHandle(osthread->thread_handle());
 694   CloseHandle(osthread->interrupt_event());
 695   delete osthread;
 696 }
 697 
 698 static jlong first_filetime;
 699 static jlong initial_performance_count;
 700 static jlong performance_frequency;
 701 
 702 
 703 jlong as_long(LARGE_INTEGER x) {
 704   jlong result = 0; // initialization to avoid warning
 705   set_high(&result, x.HighPart);
 706   set_low(&result, x.LowPart);
 707   return result;
 708 }
 709 
 710 
 711 jlong os::elapsed_counter() {
 712   LARGE_INTEGER count;
 713   QueryPerformanceCounter(&count);
 714   return as_long(count) - initial_performance_count;
 715 }
 716 
 717 
 718 jlong os::elapsed_frequency() {
 719   return performance_frequency;
 720 }
 721 
 722 
 723 julong os::available_memory() {
 724   return win32::available_memory();
 725 }
 726 
 727 julong os::win32::available_memory() {
 728   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 729   // value if total memory is larger than 4GB
 730   MEMORYSTATUSEX ms;
 731   ms.dwLength = sizeof(ms);
 732   GlobalMemoryStatusEx(&ms);
 733 
 734   return (julong)ms.ullAvailPhys;
 735 }
 736 
 737 julong os::physical_memory() {
 738   return win32::physical_memory();
 739 }
 740 
 741 bool os::has_allocatable_memory_limit(julong* limit) {
 742   MEMORYSTATUSEX ms;
 743   ms.dwLength = sizeof(ms);
 744   GlobalMemoryStatusEx(&ms);
 745 #ifdef _LP64
 746   *limit = (julong)ms.ullAvailVirtual;
 747   return true;
 748 #else
 749   // Limit to 1400m because of the 2gb address space wall
 750   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 751   return true;
 752 #endif
 753 }
 754 
 755 int os::active_processor_count() {
 756   DWORD_PTR lpProcessAffinityMask = 0;
 757   DWORD_PTR lpSystemAffinityMask = 0;
 758   int proc_count = processor_count();
 759   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 760       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 761     // Nof active processors is number of bits in process affinity mask
 762     int bitcount = 0;
 763     while (lpProcessAffinityMask != 0) {
 764       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 765       bitcount++;
 766     }
 767     return bitcount;
 768   } else {
 769     return proc_count;
 770   }
 771 }
 772 
 773 void os::set_native_thread_name(const char *name) {
 774 
 775   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 776   //
 777   // Note that unfortunately this only works if the process
 778   // is already attached to a debugger; debugger must observe
 779   // the exception below to show the correct name.
 780 
 781   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 782   struct {
 783     DWORD dwType;     // must be 0x1000
 784     LPCSTR szName;    // pointer to name (in user addr space)
 785     DWORD dwThreadID; // thread ID (-1=caller thread)
 786     DWORD dwFlags;    // reserved for future use, must be zero
 787   } info;
 788 
 789   info.dwType = 0x1000;
 790   info.szName = name;
 791   info.dwThreadID = -1;
 792   info.dwFlags = 0;
 793 
 794   __try {
 795     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 796   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 797 }
 798 
 799 bool os::distribute_processes(uint length, uint* distribution) {
 800   // Not yet implemented.
 801   return false;
 802 }
 803 
 804 bool os::bind_to_processor(uint processor_id) {
 805   // Not yet implemented.
 806   return false;
 807 }
 808 
 809 void os::win32::initialize_performance_counter() {
 810   LARGE_INTEGER count;
 811   QueryPerformanceFrequency(&count);
 812   performance_frequency = as_long(count);
 813   QueryPerformanceCounter(&count);
 814   initial_performance_count = as_long(count);
 815 }
 816 
 817 
 818 double os::elapsedTime() {
 819   return (double) elapsed_counter() / (double) elapsed_frequency();
 820 }
 821 
 822 
 823 // Windows format:
 824 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 825 // Java format:
 826 //   Java standards require the number of milliseconds since 1/1/1970
 827 
 828 // Constant offset - calculated using offset()
 829 static jlong  _offset   = 116444736000000000;
 830 // Fake time counter for reproducible results when debugging
 831 static jlong  fake_time = 0;
 832 
 833 #ifdef ASSERT
 834 // Just to be safe, recalculate the offset in debug mode
 835 static jlong _calculated_offset = 0;
 836 static int   _has_calculated_offset = 0;
 837 
 838 jlong offset() {
 839   if (_has_calculated_offset) return _calculated_offset;
 840   SYSTEMTIME java_origin;
 841   java_origin.wYear          = 1970;
 842   java_origin.wMonth         = 1;
 843   java_origin.wDayOfWeek     = 0; // ignored
 844   java_origin.wDay           = 1;
 845   java_origin.wHour          = 0;
 846   java_origin.wMinute        = 0;
 847   java_origin.wSecond        = 0;
 848   java_origin.wMilliseconds  = 0;
 849   FILETIME jot;
 850   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 851     fatal("Error = %d\nWindows error", GetLastError());
 852   }
 853   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 854   _has_calculated_offset = 1;
 855   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 856   return _calculated_offset;
 857 }
 858 #else
 859 jlong offset() {
 860   return _offset;
 861 }
 862 #endif
 863 
 864 jlong windows_to_java_time(FILETIME wt) {
 865   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 866   return (a - offset()) / 10000;
 867 }
 868 
 869 // Returns time ticks in (10th of micro seconds)
 870 jlong windows_to_time_ticks(FILETIME wt) {
 871   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 872   return (a - offset());
 873 }
 874 
 875 FILETIME java_to_windows_time(jlong l) {
 876   jlong a = (l * 10000) + offset();
 877   FILETIME result;
 878   result.dwHighDateTime = high(a);
 879   result.dwLowDateTime  = low(a);
 880   return result;
 881 }
 882 
 883 bool os::supports_vtime() { return true; }
 884 bool os::enable_vtime() { return false; }
 885 bool os::vtime_enabled() { return false; }
 886 
 887 double os::elapsedVTime() {
 888   FILETIME created;
 889   FILETIME exited;
 890   FILETIME kernel;
 891   FILETIME user;
 892   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 893     // the resolution of windows_to_java_time() should be sufficient (ms)
 894     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 895   } else {
 896     return elapsedTime();
 897   }
 898 }
 899 
 900 jlong os::javaTimeMillis() {
 901   if (UseFakeTimers) {
 902     return fake_time++;
 903   } else {
 904     FILETIME wt;
 905     GetSystemTimeAsFileTime(&wt);
 906     return windows_to_java_time(wt);
 907   }
 908 }
 909 
 910 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 911   FILETIME wt;
 912   GetSystemTimeAsFileTime(&wt);
 913   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 914   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 915   seconds = secs;
 916   nanos = jlong(ticks - (secs*10000000)) * 100;
 917 }
 918 
 919 jlong os::javaTimeNanos() {
 920     LARGE_INTEGER current_count;
 921     QueryPerformanceCounter(&current_count);
 922     double current = as_long(current_count);
 923     double freq = performance_frequency;
 924     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 925     return time;
 926 }
 927 
 928 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 929   jlong freq = performance_frequency;
 930   if (freq < NANOSECS_PER_SEC) {
 931     // the performance counter is 64 bits and we will
 932     // be multiplying it -- so no wrap in 64 bits
 933     info_ptr->max_value = ALL_64_BITS;
 934   } else if (freq > NANOSECS_PER_SEC) {
 935     // use the max value the counter can reach to
 936     // determine the max value which could be returned
 937     julong max_counter = (julong)ALL_64_BITS;
 938     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 939   } else {
 940     // the performance counter is 64 bits and we will
 941     // be using it directly -- so no wrap in 64 bits
 942     info_ptr->max_value = ALL_64_BITS;
 943   }
 944 
 945   // using a counter, so no skipping
 946   info_ptr->may_skip_backward = false;
 947   info_ptr->may_skip_forward = false;
 948 
 949   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 950 }
 951 
 952 char* os::local_time_string(char *buf, size_t buflen) {
 953   SYSTEMTIME st;
 954   GetLocalTime(&st);
 955   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 956                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 957   return buf;
 958 }
 959 
 960 bool os::getTimesSecs(double* process_real_time,
 961                       double* process_user_time,
 962                       double* process_system_time) {
 963   HANDLE h_process = GetCurrentProcess();
 964   FILETIME create_time, exit_time, kernel_time, user_time;
 965   BOOL result = GetProcessTimes(h_process,
 966                                 &create_time,
 967                                 &exit_time,
 968                                 &kernel_time,
 969                                 &user_time);
 970   if (result != 0) {
 971     FILETIME wt;
 972     GetSystemTimeAsFileTime(&wt);
 973     jlong rtc_millis = windows_to_java_time(wt);
 974     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 975     *process_user_time =
 976       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 977     *process_system_time =
 978       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
 979     return true;
 980   } else {
 981     return false;
 982   }
 983 }
 984 
 985 void os::shutdown() {
 986   // allow PerfMemory to attempt cleanup of any persistent resources
 987   perfMemory_exit();
 988 
 989   // flush buffered output, finish log files
 990   ostream_abort();
 991 
 992   // Check for abort hook
 993   abort_hook_t abort_hook = Arguments::abort_hook();
 994   if (abort_hook != NULL) {
 995     abort_hook();
 996   }
 997 }
 998 
 999 
1000 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1001                                          PMINIDUMP_EXCEPTION_INFORMATION,
1002                                          PMINIDUMP_USER_STREAM_INFORMATION,
1003                                          PMINIDUMP_CALLBACK_INFORMATION);
1004 
1005 static HANDLE dumpFile = NULL;
1006 
1007 // Check if dump file can be created.
1008 void os::check_dump_limit(char* buffer, size_t buffsz) {
1009   bool status = true;
1010   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1011     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1012     status = false;
1013   }
1014 
1015 #ifndef ASSERT
1016   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1017     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1018     status = false;
1019   }
1020 #endif
1021 
1022   if (status) {
1023     const char* cwd = get_current_directory(NULL, 0);
1024     int pid = current_process_id();
1025     if (cwd != NULL) {
1026       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1027     } else {
1028       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1029     }
1030 
1031     if (dumpFile == NULL &&
1032        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1033                  == INVALID_HANDLE_VALUE) {
1034       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1035       status = false;
1036     }
1037   }
1038   VMError::record_coredump_status(buffer, status);
1039 }
1040 
1041 void os::abort(bool dump_core, void* siginfo, const void* context) {
1042   HINSTANCE dbghelp;
1043   EXCEPTION_POINTERS ep;
1044   MINIDUMP_EXCEPTION_INFORMATION mei;
1045   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1046 
1047   HANDLE hProcess = GetCurrentProcess();
1048   DWORD processId = GetCurrentProcessId();
1049   MINIDUMP_TYPE dumpType;
1050 
1051   shutdown();
1052   if (!dump_core || dumpFile == NULL) {
1053     if (dumpFile != NULL) {
1054       CloseHandle(dumpFile);
1055     }
1056     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1057   }
1058 
1059   dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
1060 
1061   if (dbghelp == NULL) {
1062     jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
1063     CloseHandle(dumpFile);
1064     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1065   }
1066 
1067   _MiniDumpWriteDump =
1068       CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1069                                     PMINIDUMP_EXCEPTION_INFORMATION,
1070                                     PMINIDUMP_USER_STREAM_INFORMATION,
1071                                     PMINIDUMP_CALLBACK_INFORMATION),
1072                                     GetProcAddress(dbghelp,
1073                                     "MiniDumpWriteDump"));
1074 
1075   if (_MiniDumpWriteDump == NULL) {
1076     jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
1077     CloseHandle(dumpFile);
1078     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1079   }
1080 
1081   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1082     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1083 
1084   if (siginfo != NULL && context != NULL) {
1085     ep.ContextRecord = (PCONTEXT) context;
1086     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1087 
1088     mei.ThreadId = GetCurrentThreadId();
1089     mei.ExceptionPointers = &ep;
1090     pmei = &mei;
1091   } else {
1092     pmei = NULL;
1093   }
1094 
1095   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1096   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1097   if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1098       _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1099     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1100   }
1101   CloseHandle(dumpFile);
1102   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1103 }
1104 
1105 // Die immediately, no exit hook, no abort hook, no cleanup.
1106 void os::die() {
1107   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1108 }
1109 
1110 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1111 //  * dirent_md.c       1.15 00/02/02
1112 //
1113 // The declarations for DIR and struct dirent are in jvm_win32.h.
1114 
1115 // Caller must have already run dirname through JVM_NativePath, which removes
1116 // duplicate slashes and converts all instances of '/' into '\\'.
1117 
1118 DIR * os::opendir(const char *dirname) {
1119   assert(dirname != NULL, "just checking");   // hotspot change
1120   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1121   DWORD fattr;                                // hotspot change
1122   char alt_dirname[4] = { 0, 0, 0, 0 };
1123 
1124   if (dirp == 0) {
1125     errno = ENOMEM;
1126     return 0;
1127   }
1128 
1129   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1130   // as a directory in FindFirstFile().  We detect this case here and
1131   // prepend the current drive name.
1132   //
1133   if (dirname[1] == '\0' && dirname[0] == '\\') {
1134     alt_dirname[0] = _getdrive() + 'A' - 1;
1135     alt_dirname[1] = ':';
1136     alt_dirname[2] = '\\';
1137     alt_dirname[3] = '\0';
1138     dirname = alt_dirname;
1139   }
1140 
1141   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1142   if (dirp->path == 0) {
1143     free(dirp);
1144     errno = ENOMEM;
1145     return 0;
1146   }
1147   strcpy(dirp->path, dirname);
1148 
1149   fattr = GetFileAttributes(dirp->path);
1150   if (fattr == 0xffffffff) {
1151     free(dirp->path);
1152     free(dirp);
1153     errno = ENOENT;
1154     return 0;
1155   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1156     free(dirp->path);
1157     free(dirp);
1158     errno = ENOTDIR;
1159     return 0;
1160   }
1161 
1162   // Append "*.*", or possibly "\\*.*", to path
1163   if (dirp->path[1] == ':' &&
1164       (dirp->path[2] == '\0' ||
1165       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1166     // No '\\' needed for cases like "Z:" or "Z:\"
1167     strcat(dirp->path, "*.*");
1168   } else {
1169     strcat(dirp->path, "\\*.*");
1170   }
1171 
1172   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1173   if (dirp->handle == INVALID_HANDLE_VALUE) {
1174     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1175       free(dirp->path);
1176       free(dirp);
1177       errno = EACCES;
1178       return 0;
1179     }
1180   }
1181   return dirp;
1182 }
1183 
1184 // parameter dbuf unused on Windows
1185 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) {
1186   assert(dirp != NULL, "just checking");      // hotspot change
1187   if (dirp->handle == INVALID_HANDLE_VALUE) {
1188     return 0;
1189   }
1190 
1191   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1192 
1193   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1194     if (GetLastError() == ERROR_INVALID_HANDLE) {
1195       errno = EBADF;
1196       return 0;
1197     }
1198     FindClose(dirp->handle);
1199     dirp->handle = INVALID_HANDLE_VALUE;
1200   }
1201 
1202   return &dirp->dirent;
1203 }
1204 
1205 int os::closedir(DIR *dirp) {
1206   assert(dirp != NULL, "just checking");      // hotspot change
1207   if (dirp->handle != INVALID_HANDLE_VALUE) {
1208     if (!FindClose(dirp->handle)) {
1209       errno = EBADF;
1210       return -1;
1211     }
1212     dirp->handle = INVALID_HANDLE_VALUE;
1213   }
1214   free(dirp->path);
1215   free(dirp);
1216   return 0;
1217 }
1218 
1219 // This must be hard coded because it's the system's temporary
1220 // directory not the java application's temp directory, ala java.io.tmpdir.
1221 const char* os::get_temp_directory() {
1222   static char path_buf[MAX_PATH];
1223   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1224     return path_buf;
1225   } else {
1226     path_buf[0] = '\0';
1227     return path_buf;
1228   }
1229 }
1230 
1231 static bool file_exists(const char* filename) {
1232   if (filename == NULL || strlen(filename) == 0) {
1233     return false;
1234   }
1235   return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1236 }
1237 
1238 bool os::dll_build_name(char *buffer, size_t buflen,
1239                         const char* pname, const char* fname) {
1240   bool retval = false;
1241   const size_t pnamelen = pname ? strlen(pname) : 0;
1242   const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1243 
1244   // Return error on buffer overflow.
1245   if (pnamelen + strlen(fname) + 10 > buflen) {
1246     return retval;
1247   }
1248 
1249   if (pnamelen == 0) {
1250     jio_snprintf(buffer, buflen, "%s.dll", fname);
1251     retval = true;
1252   } else if (c == ':' || c == '\\') {
1253     jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1254     retval = true;
1255   } else if (strchr(pname, *os::path_separator()) != NULL) {
1256     int n;
1257     char** pelements = split_path(pname, &n);
1258     if (pelements == NULL) {
1259       return false;
1260     }
1261     for (int i = 0; i < n; i++) {
1262       char* path = pelements[i];
1263       // Really shouldn't be NULL, but check can't hurt
1264       size_t plen = (path == NULL) ? 0 : strlen(path);
1265       if (plen == 0) {
1266         continue; // skip the empty path values
1267       }
1268       const char lastchar = path[plen - 1];
1269       if (lastchar == ':' || lastchar == '\\') {
1270         jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1271       } else {
1272         jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1273       }
1274       if (file_exists(buffer)) {
1275         retval = true;
1276         break;
1277       }
1278     }
1279     // release the storage
1280     for (int i = 0; i < n; i++) {
1281       if (pelements[i] != NULL) {
1282         FREE_C_HEAP_ARRAY(char, pelements[i]);
1283       }
1284     }
1285     if (pelements != NULL) {
1286       FREE_C_HEAP_ARRAY(char*, pelements);
1287     }
1288   } else {
1289     jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1290     retval = true;
1291   }
1292   return retval;
1293 }
1294 
1295 // Needs to be in os specific directory because windows requires another
1296 // header file <direct.h>
1297 const char* os::get_current_directory(char *buf, size_t buflen) {
1298   int n = static_cast<int>(buflen);
1299   if (buflen > INT_MAX)  n = INT_MAX;
1300   return _getcwd(buf, n);
1301 }
1302 
1303 //-----------------------------------------------------------
1304 // Helper functions for fatal error handler
1305 #ifdef _WIN64
1306 // Helper routine which returns true if address in
1307 // within the NTDLL address space.
1308 //
1309 static bool _addr_in_ntdll(address addr) {
1310   HMODULE hmod;
1311   MODULEINFO minfo;
1312 
1313   hmod = GetModuleHandle("NTDLL.DLL");
1314   if (hmod == NULL) return false;
1315   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1316                                           &minfo, sizeof(MODULEINFO))) {
1317     return false;
1318   }
1319 
1320   if ((addr >= minfo.lpBaseOfDll) &&
1321       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1322     return true;
1323   } else {
1324     return false;
1325   }
1326 }
1327 #endif
1328 
1329 struct _modinfo {
1330   address addr;
1331   char*   full_path;   // point to a char buffer
1332   int     buflen;      // size of the buffer
1333   address base_addr;
1334 };
1335 
1336 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1337                                   address top_address, void * param) {
1338   struct _modinfo *pmod = (struct _modinfo *)param;
1339   if (!pmod) return -1;
1340 
1341   if (base_addr   <= pmod->addr &&
1342       top_address > pmod->addr) {
1343     // if a buffer is provided, copy path name to the buffer
1344     if (pmod->full_path) {
1345       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1346     }
1347     pmod->base_addr = base_addr;
1348     return 1;
1349   }
1350   return 0;
1351 }
1352 
1353 bool os::dll_address_to_library_name(address addr, char* buf,
1354                                      int buflen, int* offset) {
1355   // buf is not optional, but offset is optional
1356   assert(buf != NULL, "sanity check");
1357 
1358 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1359 //       return the full path to the DLL file, sometimes it returns path
1360 //       to the corresponding PDB file (debug info); sometimes it only
1361 //       returns partial path, which makes life painful.
1362 
1363   struct _modinfo mi;
1364   mi.addr      = addr;
1365   mi.full_path = buf;
1366   mi.buflen    = buflen;
1367   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1368     // buf already contains path name
1369     if (offset) *offset = addr - mi.base_addr;
1370     return true;
1371   }
1372 
1373   buf[0] = '\0';
1374   if (offset) *offset = -1;
1375   return false;
1376 }
1377 
1378 bool os::dll_address_to_function_name(address addr, char *buf,
1379                                       int buflen, int *offset,
1380                                       bool demangle) {
1381   // buf is not optional, but offset is optional
1382   assert(buf != NULL, "sanity check");
1383 
1384   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1385     return true;
1386   }
1387   if (offset != NULL)  *offset  = -1;
1388   buf[0] = '\0';
1389   return false;
1390 }
1391 
1392 // save the start and end address of jvm.dll into param[0] and param[1]
1393 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1394                            address top_address, void * param) {
1395   if (!param) return -1;
1396 
1397   if (base_addr   <= (address)_locate_jvm_dll &&
1398       top_address > (address)_locate_jvm_dll) {
1399     ((address*)param)[0] = base_addr;
1400     ((address*)param)[1] = top_address;
1401     return 1;
1402   }
1403   return 0;
1404 }
1405 
1406 address vm_lib_location[2];    // start and end address of jvm.dll
1407 
1408 // check if addr is inside jvm.dll
1409 bool os::address_is_in_vm(address addr) {
1410   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1411     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1412       assert(false, "Can't find jvm module.");
1413       return false;
1414     }
1415   }
1416 
1417   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1418 }
1419 
1420 // print module info; param is outputStream*
1421 static int _print_module(const char* fname, address base_address,
1422                          address top_address, void* param) {
1423   if (!param) return -1;
1424 
1425   outputStream* st = (outputStream*)param;
1426 
1427   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1428   return 0;
1429 }
1430 
1431 // Loads .dll/.so and
1432 // in case of error it checks if .dll/.so was built for the
1433 // same architecture as Hotspot is running on
1434 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1435   void * result = LoadLibrary(name);
1436   if (result != NULL) {
1437     return result;
1438   }
1439 
1440   DWORD errcode = GetLastError();
1441   if (errcode == ERROR_MOD_NOT_FOUND) {
1442     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1443     ebuf[ebuflen - 1] = '\0';
1444     return NULL;
1445   }
1446 
1447   // Parsing dll below
1448   // If we can read dll-info and find that dll was built
1449   // for an architecture other than Hotspot is running in
1450   // - then print to buffer "DLL was built for a different architecture"
1451   // else call os::lasterror to obtain system error message
1452 
1453   // Read system error message into ebuf
1454   // It may or may not be overwritten below (in the for loop and just above)
1455   lasterror(ebuf, (size_t) ebuflen);
1456   ebuf[ebuflen - 1] = '\0';
1457   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1458   if (fd < 0) {
1459     return NULL;
1460   }
1461 
1462   uint32_t signature_offset;
1463   uint16_t lib_arch = 0;
1464   bool failed_to_get_lib_arch =
1465     ( // Go to position 3c in the dll
1466      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1467      ||
1468      // Read location of signature
1469      (sizeof(signature_offset) !=
1470      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1471      ||
1472      // Go to COFF File Header in dll
1473      // that is located after "signature" (4 bytes long)
1474      (os::seek_to_file_offset(fd,
1475      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1476      ||
1477      // Read field that contains code of architecture
1478      // that dll was built for
1479      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1480     );
1481 
1482   ::close(fd);
1483   if (failed_to_get_lib_arch) {
1484     // file i/o error - report os::lasterror(...) msg
1485     return NULL;
1486   }
1487 
1488   typedef struct {
1489     uint16_t arch_code;
1490     char* arch_name;
1491   } arch_t;
1492 
1493   static const arch_t arch_array[] = {
1494     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1495     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1496     {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
1497   };
1498 #if   (defined _M_IA64)
1499   static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64;
1500 #elif (defined _M_AMD64)
1501   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1502 #elif (defined _M_IX86)
1503   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1504 #else
1505   #error Method os::dll_load requires that one of following \
1506          is defined :_M_IA64,_M_AMD64 or _M_IX86
1507 #endif
1508 
1509 
1510   // Obtain a string for printf operation
1511   // lib_arch_str shall contain string what platform this .dll was built for
1512   // running_arch_str shall string contain what platform Hotspot was built for
1513   char *running_arch_str = NULL, *lib_arch_str = NULL;
1514   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1515     if (lib_arch == arch_array[i].arch_code) {
1516       lib_arch_str = arch_array[i].arch_name;
1517     }
1518     if (running_arch == arch_array[i].arch_code) {
1519       running_arch_str = arch_array[i].arch_name;
1520     }
1521   }
1522 
1523   assert(running_arch_str,
1524          "Didn't find running architecture code in arch_array");
1525 
1526   // If the architecture is right
1527   // but some other error took place - report os::lasterror(...) msg
1528   if (lib_arch == running_arch) {
1529     return NULL;
1530   }
1531 
1532   if (lib_arch_str != NULL) {
1533     ::_snprintf(ebuf, ebuflen - 1,
1534                 "Can't load %s-bit .dll on a %s-bit platform",
1535                 lib_arch_str, running_arch_str);
1536   } else {
1537     // don't know what architecture this dll was build for
1538     ::_snprintf(ebuf, ebuflen - 1,
1539                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1540                 lib_arch, running_arch_str);
1541   }
1542 
1543   return NULL;
1544 }
1545 
1546 void os::print_dll_info(outputStream *st) {
1547   st->print_cr("Dynamic libraries:");
1548   get_loaded_modules_info(_print_module, (void *)st);
1549 }
1550 
1551 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1552   HANDLE   hProcess;
1553 
1554 # define MAX_NUM_MODULES 128
1555   HMODULE     modules[MAX_NUM_MODULES];
1556   static char filename[MAX_PATH];
1557   int         result = 0;
1558 
1559   int pid = os::current_process_id();
1560   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1561                          FALSE, pid);
1562   if (hProcess == NULL) return 0;
1563 
1564   DWORD size_needed;
1565   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1566     CloseHandle(hProcess);
1567     return 0;
1568   }
1569 
1570   // number of modules that are currently loaded
1571   int num_modules = size_needed / sizeof(HMODULE);
1572 
1573   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1574     // Get Full pathname:
1575     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1576       filename[0] = '\0';
1577     }
1578 
1579     MODULEINFO modinfo;
1580     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1581       modinfo.lpBaseOfDll = NULL;
1582       modinfo.SizeOfImage = 0;
1583     }
1584 
1585     // Invoke callback function
1586     result = callback(filename, (address)modinfo.lpBaseOfDll,
1587                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1588     if (result) break;
1589   }
1590 
1591   CloseHandle(hProcess);
1592   return result;
1593 }
1594 
1595 bool os::get_host_name(char* buf, size_t buflen) {
1596   DWORD size = (DWORD)buflen;
1597   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1598 }
1599 
1600 void os::get_summary_os_info(char* buf, size_t buflen) {
1601   stringStream sst(buf, buflen);
1602   os::win32::print_windows_version(&sst);
1603   // chop off newline character
1604   char* nl = strchr(buf, '\n');
1605   if (nl != NULL) *nl = '\0';
1606 }
1607 
1608 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1609   int ret = vsnprintf(buf, len, fmt, args);
1610   // Get the correct buffer size if buf is too small
1611   if (ret < 0) {
1612     return _vscprintf(fmt, args);
1613   }
1614   return ret;
1615 }
1616 
1617 static inline time_t get_mtime(const char* filename) {
1618   struct stat st;
1619   int ret = os::stat(filename, &st);
1620   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
1621   return st.st_mtime;
1622 }
1623 
1624 int os::compare_file_modified_times(const char* file1, const char* file2) {
1625   time_t t1 = get_mtime(file1);
1626   time_t t2 = get_mtime(file2);
1627   return t1 - t2;
1628 }
1629 
1630 void os::print_os_info_brief(outputStream* st) {
1631   os::print_os_info(st);
1632 }
1633 
1634 void os::print_os_info(outputStream* st) {
1635 #ifdef ASSERT
1636   char buffer[1024];
1637   st->print("HostName: ");
1638   if (get_host_name(buffer, sizeof(buffer))) {
1639     st->print("%s ", buffer);
1640   } else {
1641     st->print("N/A ");
1642   }
1643 #endif
1644   st->print("OS:");
1645   os::win32::print_windows_version(st);
1646 }
1647 
1648 void os::win32::print_windows_version(outputStream* st) {
1649   OSVERSIONINFOEX osvi;
1650   VS_FIXEDFILEINFO *file_info;
1651   TCHAR kernel32_path[MAX_PATH];
1652   UINT len, ret;
1653 
1654   // Use the GetVersionEx information to see if we're on a server or
1655   // workstation edition of Windows. Starting with Windows 8.1 we can't
1656   // trust the OS version information returned by this API.
1657   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1658   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1659   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1660     st->print_cr("Call to GetVersionEx failed");
1661     return;
1662   }
1663   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1664 
1665   // Get the full path to \Windows\System32\kernel32.dll and use that for
1666   // determining what version of Windows we're running on.
1667   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1668   ret = GetSystemDirectory(kernel32_path, len);
1669   if (ret == 0 || ret > len) {
1670     st->print_cr("Call to GetSystemDirectory failed");
1671     return;
1672   }
1673   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1674 
1675   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1676   if (version_size == 0) {
1677     st->print_cr("Call to GetFileVersionInfoSize failed");
1678     return;
1679   }
1680 
1681   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1682   if (version_info == NULL) {
1683     st->print_cr("Failed to allocate version_info");
1684     return;
1685   }
1686 
1687   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1688     os::free(version_info);
1689     st->print_cr("Call to GetFileVersionInfo failed");
1690     return;
1691   }
1692 
1693   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1694     os::free(version_info);
1695     st->print_cr("Call to VerQueryValue failed");
1696     return;
1697   }
1698 
1699   int major_version = HIWORD(file_info->dwProductVersionMS);
1700   int minor_version = LOWORD(file_info->dwProductVersionMS);
1701   int build_number = HIWORD(file_info->dwProductVersionLS);
1702   int build_minor = LOWORD(file_info->dwProductVersionLS);
1703   int os_vers = major_version * 1000 + minor_version;
1704   os::free(version_info);
1705 
1706   st->print(" Windows ");
1707   switch (os_vers) {
1708 
1709   case 6000:
1710     if (is_workstation) {
1711       st->print("Vista");
1712     } else {
1713       st->print("Server 2008");
1714     }
1715     break;
1716 
1717   case 6001:
1718     if (is_workstation) {
1719       st->print("7");
1720     } else {
1721       st->print("Server 2008 R2");
1722     }
1723     break;
1724 
1725   case 6002:
1726     if (is_workstation) {
1727       st->print("8");
1728     } else {
1729       st->print("Server 2012");
1730     }
1731     break;
1732 
1733   case 6003:
1734     if (is_workstation) {
1735       st->print("8.1");
1736     } else {
1737       st->print("Server 2012 R2");
1738     }
1739     break;
1740 
1741   case 10000:
1742     if (is_workstation) {
1743       st->print("10");
1744     } else {
1745       st->print("Server 2016");
1746     }
1747     break;
1748 
1749   default:
1750     // Unrecognized windows, print out its major and minor versions
1751     st->print("%d.%d", major_version, minor_version);
1752     break;
1753   }
1754 
1755   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1756   // find out whether we are running on 64 bit processor or not
1757   SYSTEM_INFO si;
1758   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1759   GetNativeSystemInfo(&si);
1760   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1761     st->print(" , 64 bit");
1762   }
1763 
1764   st->print(" Build %d", build_number);
1765   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1766   st->cr();
1767 }
1768 
1769 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1770   // Nothing to do for now.
1771 }
1772 
1773 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1774   HKEY key;
1775   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1776                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1777   if (status == ERROR_SUCCESS) {
1778     DWORD size = (DWORD)buflen;
1779     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1780     if (status != ERROR_SUCCESS) {
1781         strncpy(buf, "## __CPU__", buflen);
1782     }
1783     RegCloseKey(key);
1784   } else {
1785     // Put generic cpu info to return
1786     strncpy(buf, "## __CPU__", buflen);
1787   }
1788 }
1789 
1790 void os::print_memory_info(outputStream* st) {
1791   st->print("Memory:");
1792   st->print(" %dk page", os::vm_page_size()>>10);
1793 
1794   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1795   // value if total memory is larger than 4GB
1796   MEMORYSTATUSEX ms;
1797   ms.dwLength = sizeof(ms);
1798   GlobalMemoryStatusEx(&ms);
1799 
1800   st->print(", physical %uk", os::physical_memory() >> 10);
1801   st->print("(%uk free)", os::available_memory() >> 10);
1802 
1803   st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1804   st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1805   st->cr();
1806 }
1807 
1808 void os::print_siginfo(outputStream *st, const void* siginfo) {
1809   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1810   st->print("siginfo:");
1811 
1812   char tmp[64];
1813   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1814     strcpy(tmp, "EXCEPTION_??");
1815   }
1816   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1817 
1818   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1819        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1820        er->NumberParameters >= 2) {
1821     switch (er->ExceptionInformation[0]) {
1822     case 0: st->print(", reading address"); break;
1823     case 1: st->print(", writing address"); break;
1824     case 8: st->print(", data execution prevention violation at address"); break;
1825     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1826                        er->ExceptionInformation[0]);
1827     }
1828     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1829   } else {
1830     int num = er->NumberParameters;
1831     if (num > 0) {
1832       st->print(", ExceptionInformation=");
1833       for (int i = 0; i < num; i++) {
1834         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1835       }
1836     }
1837   }
1838   st->cr();
1839 }
1840 
1841 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1842   // do nothing
1843 }
1844 
1845 static char saved_jvm_path[MAX_PATH] = {0};
1846 
1847 // Find the full path to the current module, jvm.dll
1848 void os::jvm_path(char *buf, jint buflen) {
1849   // Error checking.
1850   if (buflen < MAX_PATH) {
1851     assert(false, "must use a large-enough buffer");
1852     buf[0] = '\0';
1853     return;
1854   }
1855   // Lazy resolve the path to current module.
1856   if (saved_jvm_path[0] != 0) {
1857     strcpy(buf, saved_jvm_path);
1858     return;
1859   }
1860 
1861   buf[0] = '\0';
1862   if (Arguments::sun_java_launcher_is_altjvm()) {
1863     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1864     // for a JAVA_HOME environment variable and fix up the path so it
1865     // looks like jvm.dll is installed there (append a fake suffix
1866     // hotspot/jvm.dll).
1867     char* java_home_var = ::getenv("JAVA_HOME");
1868     if (java_home_var != NULL && java_home_var[0] != 0 &&
1869         strlen(java_home_var) < (size_t)buflen) {
1870       strncpy(buf, java_home_var, buflen);
1871 
1872       // determine if this is a legacy image or modules image
1873       // modules image doesn't have "jre" subdirectory
1874       size_t len = strlen(buf);
1875       char* jrebin_p = buf + len;
1876       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1877       if (0 != _access(buf, 0)) {
1878         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1879       }
1880       len = strlen(buf);
1881       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1882     }
1883   }
1884 
1885   if (buf[0] == '\0') {
1886     GetModuleFileName(vm_lib_handle, buf, buflen);
1887   }
1888   strncpy(saved_jvm_path, buf, MAX_PATH);
1889   saved_jvm_path[MAX_PATH - 1] = '\0';
1890 }
1891 
1892 
1893 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1894 #ifndef _WIN64
1895   st->print("_");
1896 #endif
1897 }
1898 
1899 
1900 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1901 #ifndef _WIN64
1902   st->print("@%d", args_size  * sizeof(int));
1903 #endif
1904 }
1905 
1906 // This method is a copy of JDK's sysGetLastErrorString
1907 // from src/windows/hpi/src/system_md.c
1908 
1909 size_t os::lasterror(char* buf, size_t len) {
1910   DWORD errval;
1911 
1912   if ((errval = GetLastError()) != 0) {
1913     // DOS error
1914     size_t n = (size_t)FormatMessage(
1915                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1916                                      NULL,
1917                                      errval,
1918                                      0,
1919                                      buf,
1920                                      (DWORD)len,
1921                                      NULL);
1922     if (n > 3) {
1923       // Drop final '.', CR, LF
1924       if (buf[n - 1] == '\n') n--;
1925       if (buf[n - 1] == '\r') n--;
1926       if (buf[n - 1] == '.') n--;
1927       buf[n] = '\0';
1928     }
1929     return n;
1930   }
1931 
1932   if (errno != 0) {
1933     // C runtime error that has no corresponding DOS error code
1934     const char* s = os::strerror(errno);
1935     size_t n = strlen(s);
1936     if (n >= len) n = len - 1;
1937     strncpy(buf, s, n);
1938     buf[n] = '\0';
1939     return n;
1940   }
1941 
1942   return 0;
1943 }
1944 
1945 int os::get_last_error() {
1946   DWORD error = GetLastError();
1947   if (error == 0) {
1948     error = errno;
1949   }
1950   return (int)error;
1951 }
1952 
1953 WindowsSemaphore::WindowsSemaphore(uint value) {
1954   _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
1955 
1956   guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError());
1957 }
1958 
1959 WindowsSemaphore::~WindowsSemaphore() {
1960   ::CloseHandle(_semaphore);
1961 }
1962 
1963 void WindowsSemaphore::signal(uint count) {
1964   if (count > 0) {
1965     BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
1966 
1967     assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError());
1968   }
1969 }
1970 
1971 void WindowsSemaphore::wait() {
1972   DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
1973   assert(ret != WAIT_FAILED,   "WaitForSingleObject failed with error code: %lu", GetLastError());
1974   assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret);
1975 }
1976 
1977 // sun.misc.Signal
1978 // NOTE that this is a workaround for an apparent kernel bug where if
1979 // a signal handler for SIGBREAK is installed then that signal handler
1980 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1981 // See bug 4416763.
1982 static void (*sigbreakHandler)(int) = NULL;
1983 
1984 static void UserHandler(int sig, void *siginfo, void *context) {
1985   os::signal_notify(sig);
1986   // We need to reinstate the signal handler each time...
1987   os::signal(sig, (void*)UserHandler);
1988 }
1989 
1990 void* os::user_handler() {
1991   return (void*) UserHandler;
1992 }
1993 
1994 void* os::signal(int signal_number, void* handler) {
1995   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1996     void (*oldHandler)(int) = sigbreakHandler;
1997     sigbreakHandler = (void (*)(int)) handler;
1998     return (void*) oldHandler;
1999   } else {
2000     return (void*)::signal(signal_number, (void (*)(int))handler);
2001   }
2002 }
2003 
2004 void os::signal_raise(int signal_number) {
2005   raise(signal_number);
2006 }
2007 
2008 // The Win32 C runtime library maps all console control events other than ^C
2009 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2010 // logoff, and shutdown events.  We therefore install our own console handler
2011 // that raises SIGTERM for the latter cases.
2012 //
2013 static BOOL WINAPI consoleHandler(DWORD event) {
2014   switch (event) {
2015   case CTRL_C_EVENT:
2016     if (is_error_reported()) {
2017       // Ctrl-C is pressed during error reporting, likely because the error
2018       // handler fails to abort. Let VM die immediately.
2019       os::die();
2020     }
2021 
2022     os::signal_raise(SIGINT);
2023     return TRUE;
2024     break;
2025   case CTRL_BREAK_EVENT:
2026     if (sigbreakHandler != NULL) {
2027       (*sigbreakHandler)(SIGBREAK);
2028     }
2029     return TRUE;
2030     break;
2031   case CTRL_LOGOFF_EVENT: {
2032     // Don't terminate JVM if it is running in a non-interactive session,
2033     // such as a service process.
2034     USEROBJECTFLAGS flags;
2035     HANDLE handle = GetProcessWindowStation();
2036     if (handle != NULL &&
2037         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2038         sizeof(USEROBJECTFLAGS), NULL)) {
2039       // If it is a non-interactive session, let next handler to deal
2040       // with it.
2041       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2042         return FALSE;
2043       }
2044     }
2045   }
2046   case CTRL_CLOSE_EVENT:
2047   case CTRL_SHUTDOWN_EVENT:
2048     os::signal_raise(SIGTERM);
2049     return TRUE;
2050     break;
2051   default:
2052     break;
2053   }
2054   return FALSE;
2055 }
2056 
2057 // The following code is moved from os.cpp for making this
2058 // code platform specific, which it is by its very nature.
2059 
2060 // Return maximum OS signal used + 1 for internal use only
2061 // Used as exit signal for signal_thread
2062 int os::sigexitnum_pd() {
2063   return NSIG;
2064 }
2065 
2066 // a counter for each possible signal value, including signal_thread exit signal
2067 static volatile jint pending_signals[NSIG+1] = { 0 };
2068 static HANDLE sig_sem = NULL;
2069 
2070 void os::signal_init_pd() {
2071   // Initialize signal structures
2072   memset((void*)pending_signals, 0, sizeof(pending_signals));
2073 
2074   sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2075 
2076   // Programs embedding the VM do not want it to attempt to receive
2077   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2078   // shutdown hooks mechanism introduced in 1.3.  For example, when
2079   // the VM is run as part of a Windows NT service (i.e., a servlet
2080   // engine in a web server), the correct behavior is for any console
2081   // control handler to return FALSE, not TRUE, because the OS's
2082   // "final" handler for such events allows the process to continue if
2083   // it is a service (while terminating it if it is not a service).
2084   // To make this behavior uniform and the mechanism simpler, we
2085   // completely disable the VM's usage of these console events if -Xrs
2086   // (=ReduceSignalUsage) is specified.  This means, for example, that
2087   // the CTRL-BREAK thread dump mechanism is also disabled in this
2088   // case.  See bugs 4323062, 4345157, and related bugs.
2089 
2090   if (!ReduceSignalUsage) {
2091     // Add a CTRL-C handler
2092     SetConsoleCtrlHandler(consoleHandler, TRUE);
2093   }
2094 }
2095 
2096 void os::signal_notify(int signal_number) {
2097   BOOL ret;
2098   if (sig_sem != NULL) {
2099     Atomic::inc(&pending_signals[signal_number]);
2100     ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2101     assert(ret != 0, "ReleaseSemaphore() failed");
2102   }
2103 }
2104 
2105 static int check_pending_signals(bool wait_for_signal) {
2106   DWORD ret;
2107   while (true) {
2108     for (int i = 0; i < NSIG + 1; i++) {
2109       jint n = pending_signals[i];
2110       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2111         return i;
2112       }
2113     }
2114     if (!wait_for_signal) {
2115       return -1;
2116     }
2117 
2118     JavaThread *thread = JavaThread::current();
2119 
2120     ThreadBlockInVM tbivm(thread);
2121 
2122     bool threadIsSuspended;
2123     do {
2124       thread->set_suspend_equivalent();
2125       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2126       ret = ::WaitForSingleObject(sig_sem, INFINITE);
2127       assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2128 
2129       // were we externally suspended while we were waiting?
2130       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2131       if (threadIsSuspended) {
2132         // The semaphore has been incremented, but while we were waiting
2133         // another thread suspended us. We don't want to continue running
2134         // while suspended because that would surprise the thread that
2135         // suspended us.
2136         ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2137         assert(ret != 0, "ReleaseSemaphore() failed");
2138 
2139         thread->java_suspend_self();
2140       }
2141     } while (threadIsSuspended);
2142   }
2143 }
2144 
2145 int os::signal_lookup() {
2146   return check_pending_signals(false);
2147 }
2148 
2149 int os::signal_wait() {
2150   return check_pending_signals(true);
2151 }
2152 
2153 // Implicit OS exception handling
2154 
2155 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2156                       address handler) {
2157     JavaThread* thread = (JavaThread*) Thread::current_or_null();
2158   // Save pc in thread
2159 #ifdef _M_IA64
2160   // Do not blow up if no thread info available.
2161   if (thread) {
2162     // Saving PRECISE pc (with slot information) in thread.
2163     uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2164     // Convert precise PC into "Unix" format
2165     precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2166     thread->set_saved_exception_pc((address)precise_pc);
2167   }
2168   // Set pc to handler
2169   exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2170   // Clear out psr.ri (= Restart Instruction) in order to continue
2171   // at the beginning of the target bundle.
2172   exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2173   assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2174 #else
2175   #ifdef _M_AMD64
2176   // Do not blow up if no thread info available.
2177   if (thread) {
2178     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2179   }
2180   // Set pc to handler
2181   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2182   #else
2183   // Do not blow up if no thread info available.
2184   if (thread) {
2185     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2186   }
2187   // Set pc to handler
2188   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2189   #endif
2190 #endif
2191 
2192   // Continue the execution
2193   return EXCEPTION_CONTINUE_EXECUTION;
2194 }
2195 
2196 
2197 // Used for PostMortemDump
2198 extern "C" void safepoints();
2199 extern "C" void find(int x);
2200 extern "C" void events();
2201 
2202 // According to Windows API documentation, an illegal instruction sequence should generate
2203 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2204 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2205 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2206 
2207 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2208 
2209 // From "Execution Protection in the Windows Operating System" draft 0.35
2210 // Once a system header becomes available, the "real" define should be
2211 // included or copied here.
2212 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2213 
2214 // Handle NAT Bit consumption on IA64.
2215 #ifdef _M_IA64
2216   #define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
2217 #endif
2218 
2219 // Windows Vista/2008 heap corruption check
2220 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2221 
2222 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2223 // C++ compiler contain this error code. Because this is a compiler-generated
2224 // error, the code is not listed in the Win32 API header files.
2225 // The code is actually a cryptic mnemonic device, with the initial "E"
2226 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2227 // ASCII values of "msc".
2228 
2229 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2230 
2231 #define def_excpt(val) { #val, (val) }
2232 
2233 static const struct { char* name; uint number; } exceptlabels[] = {
2234     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2235     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2236     def_excpt(EXCEPTION_BREAKPOINT),
2237     def_excpt(EXCEPTION_SINGLE_STEP),
2238     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2239     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2240     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2241     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2242     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2243     def_excpt(EXCEPTION_FLT_OVERFLOW),
2244     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2245     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2246     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2247     def_excpt(EXCEPTION_INT_OVERFLOW),
2248     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2249     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2250     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2251     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2252     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2253     def_excpt(EXCEPTION_STACK_OVERFLOW),
2254     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2255     def_excpt(EXCEPTION_GUARD_PAGE),
2256     def_excpt(EXCEPTION_INVALID_HANDLE),
2257     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2258     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2259 #ifdef _M_IA64
2260     , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION)
2261 #endif
2262 };
2263 
2264 #undef def_excpt
2265 
2266 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2267   uint code = static_cast<uint>(exception_code);
2268   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2269     if (exceptlabels[i].number == code) {
2270       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2271       return buf;
2272     }
2273   }
2274 
2275   return NULL;
2276 }
2277 
2278 //-----------------------------------------------------------------------------
2279 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2280   // handle exception caused by idiv; should only happen for -MinInt/-1
2281   // (division by zero is handled explicitly)
2282 #ifdef _M_IA64
2283   assert(0, "Fix Handle_IDiv_Exception");
2284 #else
2285   #ifdef  _M_AMD64
2286   PCONTEXT ctx = exceptionInfo->ContextRecord;
2287   address pc = (address)ctx->Rip;
2288   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2289   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2290   if (pc[0] == 0xF7) {
2291     // set correct result values and continue after idiv instruction
2292     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2293   } else {
2294     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2295   }
2296   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2297   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2298   // idiv opcode (0xF7).
2299   ctx->Rdx = (DWORD)0;             // remainder
2300   // Continue the execution
2301   #else
2302   PCONTEXT ctx = exceptionInfo->ContextRecord;
2303   address pc = (address)ctx->Eip;
2304   assert(pc[0] == 0xF7, "not an idiv opcode");
2305   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2306   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2307   // set correct result values and continue after idiv instruction
2308   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2309   ctx->Eax = (DWORD)min_jint;      // result
2310   ctx->Edx = (DWORD)0;             // remainder
2311   // Continue the execution
2312   #endif
2313 #endif
2314   return EXCEPTION_CONTINUE_EXECUTION;
2315 }
2316 
2317 //-----------------------------------------------------------------------------
2318 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2319   PCONTEXT ctx = exceptionInfo->ContextRecord;
2320 #ifndef  _WIN64
2321   // handle exception caused by native method modifying control word
2322   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2323 
2324   switch (exception_code) {
2325   case EXCEPTION_FLT_DENORMAL_OPERAND:
2326   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2327   case EXCEPTION_FLT_INEXACT_RESULT:
2328   case EXCEPTION_FLT_INVALID_OPERATION:
2329   case EXCEPTION_FLT_OVERFLOW:
2330   case EXCEPTION_FLT_STACK_CHECK:
2331   case EXCEPTION_FLT_UNDERFLOW:
2332     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2333     if (fp_control_word != ctx->FloatSave.ControlWord) {
2334       // Restore FPCW and mask out FLT exceptions
2335       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2336       // Mask out pending FLT exceptions
2337       ctx->FloatSave.StatusWord &=  0xffffff00;
2338       return EXCEPTION_CONTINUE_EXECUTION;
2339     }
2340   }
2341 
2342   if (prev_uef_handler != NULL) {
2343     // We didn't handle this exception so pass it to the previous
2344     // UnhandledExceptionFilter.
2345     return (prev_uef_handler)(exceptionInfo);
2346   }
2347 #else // !_WIN64
2348   // On Windows, the mxcsr control bits are non-volatile across calls
2349   // See also CR 6192333
2350   //
2351   jint MxCsr = INITIAL_MXCSR;
2352   // we can't use StubRoutines::addr_mxcsr_std()
2353   // because in Win64 mxcsr is not saved there
2354   if (MxCsr != ctx->MxCsr) {
2355     ctx->MxCsr = MxCsr;
2356     return EXCEPTION_CONTINUE_EXECUTION;
2357   }
2358 #endif // !_WIN64
2359 
2360   return EXCEPTION_CONTINUE_SEARCH;
2361 }
2362 
2363 static inline void report_error(Thread* t, DWORD exception_code,
2364                                 address addr, void* siginfo, void* context) {
2365   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2366 
2367   // If UseOsErrorReporting, this will return here and save the error file
2368   // somewhere where we can find it in the minidump.
2369 }
2370 
2371 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2372         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2373   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2374   address addr = (address) exceptionRecord->ExceptionInformation[1];
2375   if (Interpreter::contains(pc)) {
2376     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2377     if (!fr->is_first_java_frame()) {
2378       // get_frame_at_stack_banging_point() is only called when we
2379       // have well defined stacks so java_sender() calls do not need
2380       // to assert safe_for_sender() first.
2381       *fr = fr->java_sender();
2382     }
2383   } else {
2384     // more complex code with compiled code
2385     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2386     CodeBlob* cb = CodeCache::find_blob(pc);
2387     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2388       // Not sure where the pc points to, fallback to default
2389       // stack overflow handling
2390       return false;
2391     } else {
2392       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2393       // in compiled code, the stack banging is performed just after the return pc
2394       // has been pushed on the stack
2395       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2396       if (!fr->is_java_frame()) {
2397         // See java_sender() comment above.
2398         *fr = fr->java_sender();
2399       }
2400     }
2401   }
2402   assert(fr->is_java_frame(), "Safety check");
2403   return true;
2404 }
2405 
2406 //-----------------------------------------------------------------------------
2407 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2408   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2409   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2410 #ifdef _M_IA64
2411   // On Itanium, we need the "precise pc", which has the slot number coded
2412   // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2413   address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2414   // Convert the pc to "Unix format", which has the slot number coded
2415   // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2416   // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2417   // information is saved in the Unix format.
2418   address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2419 #else
2420   #ifdef _M_AMD64
2421   address pc = (address) exceptionInfo->ContextRecord->Rip;
2422   #else
2423   address pc = (address) exceptionInfo->ContextRecord->Eip;
2424   #endif
2425 #endif
2426   Thread* t = Thread::current_or_null_safe();
2427 
2428   // Handle SafeFetch32 and SafeFetchN exceptions.
2429   if (StubRoutines::is_safefetch_fault(pc)) {
2430     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2431   }
2432 
2433 #ifndef _WIN64
2434   // Execution protection violation - win32 running on AMD64 only
2435   // Handled first to avoid misdiagnosis as a "normal" access violation;
2436   // This is safe to do because we have a new/unique ExceptionInformation
2437   // code for this condition.
2438   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2439     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2440     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2441     address addr = (address) exceptionRecord->ExceptionInformation[1];
2442 
2443     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2444       int page_size = os::vm_page_size();
2445 
2446       // Make sure the pc and the faulting address are sane.
2447       //
2448       // If an instruction spans a page boundary, and the page containing
2449       // the beginning of the instruction is executable but the following
2450       // page is not, the pc and the faulting address might be slightly
2451       // different - we still want to unguard the 2nd page in this case.
2452       //
2453       // 15 bytes seems to be a (very) safe value for max instruction size.
2454       bool pc_is_near_addr =
2455         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2456       bool instr_spans_page_boundary =
2457         (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2458                          (intptr_t) page_size) > 0);
2459 
2460       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2461         static volatile address last_addr =
2462           (address) os::non_memory_address_word();
2463 
2464         // In conservative mode, don't unguard unless the address is in the VM
2465         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2466             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2467 
2468           // Set memory to RWX and retry
2469           address page_start =
2470             (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2471           bool res = os::protect_memory((char*) page_start, page_size,
2472                                         os::MEM_PROT_RWX);
2473 
2474           log_debug(os)("Execution protection violation "
2475                         "at " INTPTR_FORMAT
2476                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2477                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2478 
2479           // Set last_addr so if we fault again at the same address, we don't
2480           // end up in an endless loop.
2481           //
2482           // There are two potential complications here.  Two threads trapping
2483           // at the same address at the same time could cause one of the
2484           // threads to think it already unguarded, and abort the VM.  Likely
2485           // very rare.
2486           //
2487           // The other race involves two threads alternately trapping at
2488           // different addresses and failing to unguard the page, resulting in
2489           // an endless loop.  This condition is probably even more unlikely
2490           // than the first.
2491           //
2492           // Although both cases could be avoided by using locks or thread
2493           // local last_addr, these solutions are unnecessary complication:
2494           // this handler is a best-effort safety net, not a complete solution.
2495           // It is disabled by default and should only be used as a workaround
2496           // in case we missed any no-execute-unsafe VM code.
2497 
2498           last_addr = addr;
2499 
2500           return EXCEPTION_CONTINUE_EXECUTION;
2501         }
2502       }
2503 
2504       // Last unguard failed or not unguarding
2505       tty->print_raw_cr("Execution protection violation");
2506       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2507                    exceptionInfo->ContextRecord);
2508       return EXCEPTION_CONTINUE_SEARCH;
2509     }
2510   }
2511 #endif // _WIN64
2512 
2513   // Check to see if we caught the safepoint code in the
2514   // process of write protecting the memory serialization page.
2515   // It write enables the page immediately after protecting it
2516   // so just return.
2517   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2518     if (t != NULL && t->is_Java_thread()) {
2519       JavaThread* thread = (JavaThread*) t;
2520       PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2521       address addr = (address) exceptionRecord->ExceptionInformation[1];
2522       if (os::is_memory_serialize_page(thread, addr)) {
2523         // Block current thread until the memory serialize page permission restored.
2524         os::block_on_serialize_page_trap();
2525         return EXCEPTION_CONTINUE_EXECUTION;
2526       }
2527     }
2528   }
2529 
2530   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2531       VM_Version::is_cpuinfo_segv_addr(pc)) {
2532     // Verify that OS save/restore AVX registers.
2533     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2534   }
2535 
2536   if (t != NULL && t->is_Java_thread()) {
2537     JavaThread* thread = (JavaThread*) t;
2538     bool in_java = thread->thread_state() == _thread_in_Java;
2539 
2540     // Handle potential stack overflows up front.
2541     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2542 #ifdef _M_IA64
2543       // Use guard page for register stack.
2544       PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2545       address addr = (address) exceptionRecord->ExceptionInformation[1];
2546       // Check for a register stack overflow on Itanium
2547       if (thread->addr_inside_register_stack_red_zone(addr)) {
2548         // Fatal red zone violation happens if the Java program
2549         // catches a StackOverflow error and does so much processing
2550         // that it runs beyond the unprotected yellow guard zone. As
2551         // a result, we are out of here.
2552         fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2553       } else if(thread->addr_inside_register_stack(addr)) {
2554         // Disable the yellow zone which sets the state that
2555         // we've got a stack overflow problem.
2556         if (thread->stack_yellow_reserved_zone_enabled()) {
2557           thread->disable_stack_yellow_reserved_zone();
2558         }
2559         // Give us some room to process the exception.
2560         thread->disable_register_stack_guard();
2561         // Tracing with +Verbose.
2562         if (Verbose) {
2563           tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2564           tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2565           tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2566           tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2567                         thread->register_stack_base(),
2568                         thread->register_stack_base() + thread->stack_size());
2569         }
2570 
2571         // Reguard the permanent register stack red zone just to be sure.
2572         // We saw Windows silently disabling this without telling us.
2573         thread->enable_register_stack_red_zone();
2574 
2575         return Handle_Exception(exceptionInfo,
2576                                 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2577       }
2578 #endif
2579       if (thread->stack_guards_enabled()) {
2580         if (in_java) {
2581           frame fr;
2582           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2583           address addr = (address) exceptionRecord->ExceptionInformation[1];
2584           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2585             assert(fr.is_java_frame(), "Must be a Java frame");
2586             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2587           }
2588         }
2589         // Yellow zone violation.  The o/s has unprotected the first yellow
2590         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2591         // update the enabled status, even if the zone contains only one page.
2592         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2593         thread->disable_stack_yellow_reserved_zone();
2594         // If not in java code, return and hope for the best.
2595         return in_java
2596             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2597             :  EXCEPTION_CONTINUE_EXECUTION;
2598       } else {
2599         // Fatal red zone violation.
2600         thread->disable_stack_red_zone();
2601         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2602         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2603                       exceptionInfo->ContextRecord);
2604         return EXCEPTION_CONTINUE_SEARCH;
2605       }
2606     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2607       // Either stack overflow or null pointer exception.
2608       if (in_java) {
2609         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2610         address addr = (address) exceptionRecord->ExceptionInformation[1];
2611         address stack_end = thread->stack_end();
2612         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2613           // Stack overflow.
2614           assert(!os::uses_stack_guard_pages(),
2615                  "should be caught by red zone code above.");
2616           return Handle_Exception(exceptionInfo,
2617                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2618         }
2619         // Check for safepoint polling and implicit null
2620         // We only expect null pointers in the stubs (vtable)
2621         // the rest are checked explicitly now.
2622         CodeBlob* cb = CodeCache::find_blob(pc);
2623         if (cb != NULL) {
2624           if (os::is_poll_address(addr)) {
2625             address stub = SharedRuntime::get_poll_stub(pc);
2626             return Handle_Exception(exceptionInfo, stub);
2627           }
2628         }
2629         {
2630 #ifdef _WIN64
2631           // If it's a legal stack address map the entire region in
2632           //
2633           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2634           address addr = (address) exceptionRecord->ExceptionInformation[1];
2635           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2636             addr = (address)((uintptr_t)addr &
2637                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2638             os::commit_memory((char *)addr, thread->stack_base() - addr,
2639                               !ExecMem);
2640             return EXCEPTION_CONTINUE_EXECUTION;
2641           } else
2642 #endif
2643           {
2644             // Null pointer exception.
2645 #ifdef _M_IA64
2646             // Process implicit null checks in compiled code. Note: Implicit null checks
2647             // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2648             if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2649               CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2650               // Handle implicit null check in UEP method entry
2651               if (cb && (cb->is_frame_complete_at(pc) ||
2652                          (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2653                 if (Verbose) {
2654                   intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2655                   tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2656                   tty->print_cr("      to addr " INTPTR_FORMAT, addr);
2657                   tty->print_cr("      bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2658                                 *(bundle_start + 1), *bundle_start);
2659                 }
2660                 return Handle_Exception(exceptionInfo,
2661                                         SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2662               }
2663             }
2664 
2665             // Implicit null checks were processed above.  Hence, we should not reach
2666             // here in the usual case => die!
2667             if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2668             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2669                          exceptionInfo->ContextRecord);
2670             return EXCEPTION_CONTINUE_SEARCH;
2671 
2672 #else // !IA64
2673 
2674             if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
2675               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2676               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2677             }
2678             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2679                          exceptionInfo->ContextRecord);
2680             return EXCEPTION_CONTINUE_SEARCH;
2681 #endif
2682           }
2683         }
2684       }
2685 
2686 #ifdef _WIN64
2687       // Special care for fast JNI field accessors.
2688       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2689       // in and the heap gets shrunk before the field access.
2690       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2691         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2692         if (addr != (address)-1) {
2693           return Handle_Exception(exceptionInfo, addr);
2694         }
2695       }
2696 #endif
2697 
2698       // Stack overflow or null pointer exception in native code.
2699       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2700                    exceptionInfo->ContextRecord);
2701       return EXCEPTION_CONTINUE_SEARCH;
2702     } // /EXCEPTION_ACCESS_VIOLATION
2703     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2704 #if defined _M_IA64
2705     else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2706               exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2707       M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2708 
2709       // Compiled method patched to be non entrant? Following conditions must apply:
2710       // 1. must be first instruction in bundle
2711       // 2. must be a break instruction with appropriate code
2712       if ((((uint64_t) pc & 0x0F) == 0) &&
2713           (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2714         return Handle_Exception(exceptionInfo,
2715                                 (address)SharedRuntime::get_handle_wrong_method_stub());
2716       }
2717     } // /EXCEPTION_ILLEGAL_INSTRUCTION
2718 #endif
2719 
2720 
2721     if (in_java) {
2722       switch (exception_code) {
2723       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2724         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2725 
2726       case EXCEPTION_INT_OVERFLOW:
2727         return Handle_IDiv_Exception(exceptionInfo);
2728 
2729       } // switch
2730     }
2731     if (((thread->thread_state() == _thread_in_Java) ||
2732          (thread->thread_state() == _thread_in_native)) &&
2733          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2734       LONG result=Handle_FLT_Exception(exceptionInfo);
2735       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2736     }
2737   }
2738 
2739   if (exception_code != EXCEPTION_BREAKPOINT) {
2740     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2741                  exceptionInfo->ContextRecord);
2742   }
2743   return EXCEPTION_CONTINUE_SEARCH;
2744 }
2745 
2746 #ifndef _WIN64
2747 // Special care for fast JNI accessors.
2748 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2749 // the heap gets shrunk before the field access.
2750 // Need to install our own structured exception handler since native code may
2751 // install its own.
2752 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2753   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2754   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2755     address pc = (address) exceptionInfo->ContextRecord->Eip;
2756     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2757     if (addr != (address)-1) {
2758       return Handle_Exception(exceptionInfo, addr);
2759     }
2760   }
2761   return EXCEPTION_CONTINUE_SEARCH;
2762 }
2763 
2764 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2765   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2766                                                      jobject obj,           \
2767                                                      jfieldID fieldID) {    \
2768     __try {                                                                 \
2769       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2770                                                                  obj,       \
2771                                                                  fieldID);  \
2772     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2773                                               _exception_info())) {         \
2774     }                                                                       \
2775     return 0;                                                               \
2776   }
2777 
2778 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2779 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2780 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2781 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2782 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2783 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2784 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2785 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2786 
2787 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2788   switch (type) {
2789   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2790   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2791   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2792   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2793   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2794   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2795   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2796   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2797   default:        ShouldNotReachHere();
2798   }
2799   return (address)-1;
2800 }
2801 #endif
2802 
2803 // Virtual Memory
2804 
2805 int os::vm_page_size() { return os::win32::vm_page_size(); }
2806 int os::vm_allocation_granularity() {
2807   return os::win32::vm_allocation_granularity();
2808 }
2809 
2810 // Windows large page support is available on Windows 2003. In order to use
2811 // large page memory, the administrator must first assign additional privilege
2812 // to the user:
2813 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2814 //   + select Local Policies -> User Rights Assignment
2815 //   + double click "Lock pages in memory", add users and/or groups
2816 //   + reboot
2817 // Note the above steps are needed for administrator as well, as administrators
2818 // by default do not have the privilege to lock pages in memory.
2819 //
2820 // Note about Windows 2003: although the API supports committing large page
2821 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2822 // scenario, I found through experiment it only uses large page if the entire
2823 // memory region is reserved and committed in a single VirtualAlloc() call.
2824 // This makes Windows large page support more or less like Solaris ISM, in
2825 // that the entire heap must be committed upfront. This probably will change
2826 // in the future, if so the code below needs to be revisited.
2827 
2828 #ifndef MEM_LARGE_PAGES
2829   #define MEM_LARGE_PAGES 0x20000000
2830 #endif
2831 
2832 static HANDLE    _hProcess;
2833 static HANDLE    _hToken;
2834 
2835 // Container for NUMA node list info
2836 class NUMANodeListHolder {
2837  private:
2838   int *_numa_used_node_list;  // allocated below
2839   int _numa_used_node_count;
2840 
2841   void free_node_list() {
2842     if (_numa_used_node_list != NULL) {
2843       FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2844     }
2845   }
2846 
2847  public:
2848   NUMANodeListHolder() {
2849     _numa_used_node_count = 0;
2850     _numa_used_node_list = NULL;
2851     // do rest of initialization in build routine (after function pointers are set up)
2852   }
2853 
2854   ~NUMANodeListHolder() {
2855     free_node_list();
2856   }
2857 
2858   bool build() {
2859     DWORD_PTR proc_aff_mask;
2860     DWORD_PTR sys_aff_mask;
2861     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2862     ULONG highest_node_number;
2863     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2864     free_node_list();
2865     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2866     for (unsigned int i = 0; i <= highest_node_number; i++) {
2867       ULONGLONG proc_mask_numa_node;
2868       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2869       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2870         _numa_used_node_list[_numa_used_node_count++] = i;
2871       }
2872     }
2873     return (_numa_used_node_count > 1);
2874   }
2875 
2876   int get_count() { return _numa_used_node_count; }
2877   int get_node_list_entry(int n) {
2878     // for indexes out of range, returns -1
2879     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2880   }
2881 
2882 } numa_node_list_holder;
2883 
2884 
2885 
2886 static size_t _large_page_size = 0;
2887 
2888 static bool request_lock_memory_privilege() {
2889   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2890                           os::current_process_id());
2891 
2892   LUID luid;
2893   if (_hProcess != NULL &&
2894       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2895       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2896 
2897     TOKEN_PRIVILEGES tp;
2898     tp.PrivilegeCount = 1;
2899     tp.Privileges[0].Luid = luid;
2900     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2901 
2902     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2903     // privilege. Check GetLastError() too. See MSDN document.
2904     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2905         (GetLastError() == ERROR_SUCCESS)) {
2906       return true;
2907     }
2908   }
2909 
2910   return false;
2911 }
2912 
2913 static void cleanup_after_large_page_init() {
2914   if (_hProcess) CloseHandle(_hProcess);
2915   _hProcess = NULL;
2916   if (_hToken) CloseHandle(_hToken);
2917   _hToken = NULL;
2918 }
2919 
2920 static bool numa_interleaving_init() {
2921   bool success = false;
2922   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2923 
2924   // print a warning if UseNUMAInterleaving flag is specified on command line
2925   bool warn_on_failure = use_numa_interleaving_specified;
2926 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2927 
2928   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2929   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2930   NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2931 
2932   if (numa_node_list_holder.build()) {
2933     if (log_is_enabled(Debug, os, cpu)) {
2934       Log(os, cpu) log;
2935       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2936       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2937         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2938       }
2939     }
2940     success = true;
2941   } else {
2942     WARN("Process does not cover multiple NUMA nodes.");
2943   }
2944   if (!success) {
2945     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2946   }
2947   return success;
2948 #undef WARN
2949 }
2950 
2951 // this routine is used whenever we need to reserve a contiguous VA range
2952 // but we need to make separate VirtualAlloc calls for each piece of the range
2953 // Reasons for doing this:
2954 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2955 //  * UseNUMAInterleaving requires a separate node for each piece
2956 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2957                                          DWORD prot,
2958                                          bool should_inject_error = false) {
2959   char * p_buf;
2960   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2961   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2962   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2963 
2964   // first reserve enough address space in advance since we want to be
2965   // able to break a single contiguous virtual address range into multiple
2966   // large page commits but WS2003 does not allow reserving large page space
2967   // so we just use 4K pages for reserve, this gives us a legal contiguous
2968   // address space. then we will deallocate that reservation, and re alloc
2969   // using large pages
2970   const size_t size_of_reserve = bytes + chunk_size;
2971   if (bytes > size_of_reserve) {
2972     // Overflowed.
2973     return NULL;
2974   }
2975   p_buf = (char *) VirtualAlloc(addr,
2976                                 size_of_reserve,  // size of Reserve
2977                                 MEM_RESERVE,
2978                                 PAGE_READWRITE);
2979   // If reservation failed, return NULL
2980   if (p_buf == NULL) return NULL;
2981   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2982   os::release_memory(p_buf, bytes + chunk_size);
2983 
2984   // we still need to round up to a page boundary (in case we are using large pages)
2985   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2986   // instead we handle this in the bytes_to_rq computation below
2987   p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2988 
2989   // now go through and allocate one chunk at a time until all bytes are
2990   // allocated
2991   size_t  bytes_remaining = bytes;
2992   // An overflow of align_size_up() would have been caught above
2993   // in the calculation of size_of_reserve.
2994   char * next_alloc_addr = p_buf;
2995   HANDLE hProc = GetCurrentProcess();
2996 
2997 #ifdef ASSERT
2998   // Variable for the failure injection
2999   long ran_num = os::random();
3000   size_t fail_after = ran_num % bytes;
3001 #endif
3002 
3003   int count=0;
3004   while (bytes_remaining) {
3005     // select bytes_to_rq to get to the next chunk_size boundary
3006 
3007     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
3008     // Note allocate and commit
3009     char * p_new;
3010 
3011 #ifdef ASSERT
3012     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3013 #else
3014     const bool inject_error_now = false;
3015 #endif
3016 
3017     if (inject_error_now) {
3018       p_new = NULL;
3019     } else {
3020       if (!UseNUMAInterleaving) {
3021         p_new = (char *) VirtualAlloc(next_alloc_addr,
3022                                       bytes_to_rq,
3023                                       flags,
3024                                       prot);
3025       } else {
3026         // get the next node to use from the used_node_list
3027         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3028         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3029         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3030       }
3031     }
3032 
3033     if (p_new == NULL) {
3034       // Free any allocated pages
3035       if (next_alloc_addr > p_buf) {
3036         // Some memory was committed so release it.
3037         size_t bytes_to_release = bytes - bytes_remaining;
3038         // NMT has yet to record any individual blocks, so it
3039         // need to create a dummy 'reserve' record to match
3040         // the release.
3041         MemTracker::record_virtual_memory_reserve((address)p_buf,
3042                                                   bytes_to_release, CALLER_PC);
3043         os::release_memory(p_buf, bytes_to_release);
3044       }
3045 #ifdef ASSERT
3046       if (should_inject_error) {
3047         log_develop_debug(pagesize)("Reserving pages individually failed.");
3048       }
3049 #endif
3050       return NULL;
3051     }
3052 
3053     bytes_remaining -= bytes_to_rq;
3054     next_alloc_addr += bytes_to_rq;
3055     count++;
3056   }
3057   // Although the memory is allocated individually, it is returned as one.
3058   // NMT records it as one block.
3059   if ((flags & MEM_COMMIT) != 0) {
3060     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3061   } else {
3062     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3063   }
3064 
3065   // made it this far, success
3066   return p_buf;
3067 }
3068 
3069 
3070 
3071 void os::large_page_init() {
3072   if (!UseLargePages) return;
3073 
3074   // print a warning if any large page related flag is specified on command line
3075   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3076                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3077   bool success = false;
3078 
3079 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3080   if (request_lock_memory_privilege()) {
3081     size_t s = GetLargePageMinimum();
3082     if (s) {
3083 #if defined(IA32) || defined(AMD64)
3084       if (s > 4*M || LargePageSizeInBytes > 4*M) {
3085         WARN("JVM cannot use large pages bigger than 4mb.");
3086       } else {
3087 #endif
3088         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3089           _large_page_size = LargePageSizeInBytes;
3090         } else {
3091           _large_page_size = s;
3092         }
3093         success = true;
3094 #if defined(IA32) || defined(AMD64)
3095       }
3096 #endif
3097     } else {
3098       WARN("Large page is not supported by the processor.");
3099     }
3100   } else {
3101     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3102   }
3103 #undef WARN
3104 
3105   const size_t default_page_size = (size_t) vm_page_size();
3106   if (success && _large_page_size > default_page_size) {
3107     _page_sizes[0] = _large_page_size;
3108     _page_sizes[1] = default_page_size;
3109     _page_sizes[2] = 0;
3110   }
3111 
3112   cleanup_after_large_page_init();
3113   UseLargePages = success;
3114 }
3115 
3116 // Helper function to create a temp file in the given directory
3117 int os::create_file_for_heap(const char* dir, size_t size) {
3118 
3119   const char name_template[] = "/jvmheap.XXXXXX";
3120 
3121   char *fullname = (char*)alloca(strlen(dir) + sizeof(name_template));
3122   (void)strcpy(fullname, dir);
3123   (void)strcat(fullname, name_template);
3124   os::native_path(fullname);
3125 
3126   char *path = _mktemp(fullname);
3127   if (path == NULL) {
3128     return -1;
3129   }
3130 
3131   int fd = _open(path, O_RDWR | O_CREAT | O_EXCL, S_IWRITE | S_IREAD);
3132 
3133   if (fd < 0) {
3134     warning("Failure to create file %s for heap", path);
3135     return -1;
3136   }
3137 
3138   // Delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted
3139   _unlink(fullname);
3140 
3141   return fd;
3142 }
3143 
3144 // if 'base' is not NULL, function will return NULL if it cannot get 'base'
3145 //
3146 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3147   assert(fd != -1, "File descriptor is not valid");
3148 
3149   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3150   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3151                                          (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3152   if (fileMapping == NULL) {
3153     if (GetLastError() == ERROR_DISK_FULL) {
3154       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for heap"));
3155     } else {
3156       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3157     }
3158 
3159     return NULL;
3160   }
3161 
3162   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3163 
3164   CloseHandle(fileMapping);
3165   if (addr == NULL || (base != NULL && addr != base)) {
3166     if (addr != NULL) {
3167       if(!pd_unmap_memory((char*)addr, size)) {
3168         warning("Could not release memory on unsuccessful file mapping");
3169       }
3170     }
3171     return NULL;
3172   }
3173 
3174   return (char*)addr;
3175 }
3176 
3177 // On win32, one cannot release just a part of reserved memory, it's an
3178 // all or nothing deal.  When we split a reservation, we must break the
3179 // reservation into two reservations.
3180 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3181                                   bool realloc) {
3182   if (size > 0) {
3183     release_memory(base, size);
3184     if (realloc) {
3185       reserve_memory(split, base);
3186     }
3187     if (size != split) {
3188       reserve_memory(size - split, base + split);
3189     }
3190   }
3191 }
3192 
3193 // Multiple threads can race in this code but it's not possible to unmap small sections of
3194 // virtual space to get requested alignment, like posix-like os's.
3195 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3196 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3197   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3198          "Alignment must be a multiple of allocation granularity (page size)");
3199   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3200 
3201   size_t extra_size = size + alignment;
3202   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3203 
3204   char* aligned_base = NULL;
3205 
3206   do {
3207     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3208     if (extra_base == NULL) {
3209       return NULL;
3210     }
3211     // Do manual alignment
3212     aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3213 
3214     if (file_desc != -1) {
3215       os::unmap_memory(extra_base, extra_size);
3216     }
3217     else {
3218       os::release_memory(extra_base, extra_size);
3219     }
3220 
3221     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3222 
3223   } while (aligned_base == NULL);
3224 
3225   return aligned_base;
3226 }
3227 
3228 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint, int file_desc) {
3229   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3230          "reserve alignment");
3231   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3232   char* res;
3233   // note that if UseLargePages is on, all the areas that require interleaving
3234   // will go thru reserve_memory_special rather than thru here.
3235   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3236   assert(file_desc == -1 || !use_individual,  "NUMA allocation not supported when HeapDir is used");
3237   if (!use_individual) {
3238     if (file_desc != -1) {
3239       res = map_memory_to_file(addr, bytes, file_desc);
3240     } else {
3241       res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3242     }
3243   } else {
3244     elapsedTimer reserveTimer;
3245     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3246     // in numa interleaving, we have to allocate pages individually
3247     // (well really chunks of NUMAInterleaveGranularity size)
3248     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3249     if (res == NULL) {
3250       warning("NUMA page allocation failed");
3251     }
3252     if (Verbose && PrintMiscellaneous) {
3253       reserveTimer.stop();
3254       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3255                     reserveTimer.milliseconds(), reserveTimer.ticks());
3256     }
3257   }
3258   assert(res == NULL || addr == NULL || addr == res,
3259          "Unexpected address from reserve.");
3260 
3261   return res;
3262 }
3263 
3264 // Reserve memory at an arbitrary address, only if that area is
3265 // available (and not reserved for something else).
3266 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3267   // Windows os::reserve_memory() fails of the requested address range is
3268   // not avilable.
3269   return reserve_memory(bytes, requested_addr, 0, file_desc);
3270 }
3271 
3272 size_t os::large_page_size() {
3273   return _large_page_size;
3274 }
3275 
3276 bool os::can_commit_large_page_memory() {
3277   // Windows only uses large page memory when the entire region is reserved
3278   // and committed in a single VirtualAlloc() call. This may change in the
3279   // future, but with Windows 2003 it's not possible to commit on demand.
3280   return false;
3281 }
3282 
3283 bool os::can_execute_large_page_memory() {
3284   return true;
3285 }
3286 
3287 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3288                                  bool exec) {
3289   assert(UseLargePages, "only for large pages");
3290 
3291   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3292     return NULL; // Fallback to small pages.
3293   }
3294 
3295   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3296   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3297 
3298   // with large pages, there are two cases where we need to use Individual Allocation
3299   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3300   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3301   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3302     log_debug(pagesize)("Reserving large pages individually.");
3303 
3304     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3305     if (p_buf == NULL) {
3306       // give an appropriate warning message
3307       if (UseNUMAInterleaving) {
3308         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3309       }
3310       if (UseLargePagesIndividualAllocation) {
3311         warning("Individually allocated large pages failed, "
3312                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3313       }
3314       return NULL;
3315     }
3316 
3317     return p_buf;
3318 
3319   } else {
3320     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3321 
3322     // normal policy just allocate it all at once
3323     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3324     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3325     if (res != NULL) {
3326       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3327     }
3328 
3329     return res;
3330   }
3331 }
3332 
3333 bool os::release_memory_special(char* base, size_t bytes) {
3334   assert(base != NULL, "Sanity check");
3335   return release_memory(base, bytes);
3336 }
3337 
3338 void os::print_statistics() {
3339 }
3340 
3341 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3342   int err = os::get_last_error();
3343   char buf[256];
3344   size_t buf_len = os::lasterror(buf, sizeof(buf));
3345   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3346           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3347           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3348 }
3349 
3350 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3351   if (bytes == 0) {
3352     // Don't bother the OS with noops.
3353     return true;
3354   }
3355   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3356   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3357   // Don't attempt to print anything if the OS call fails. We're
3358   // probably low on resources, so the print itself may cause crashes.
3359 
3360   // unless we have NUMAInterleaving enabled, the range of a commit
3361   // is always within a reserve covered by a single VirtualAlloc
3362   // in that case we can just do a single commit for the requested size
3363   if (!UseNUMAInterleaving) {
3364     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3365       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3366       return false;
3367     }
3368     if (exec) {
3369       DWORD oldprot;
3370       // Windows doc says to use VirtualProtect to get execute permissions
3371       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3372         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3373         return false;
3374       }
3375     }
3376     return true;
3377   } else {
3378 
3379     // when NUMAInterleaving is enabled, the commit might cover a range that
3380     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3381     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3382     // returns represents the number of bytes that can be committed in one step.
3383     size_t bytes_remaining = bytes;
3384     char * next_alloc_addr = addr;
3385     while (bytes_remaining > 0) {
3386       MEMORY_BASIC_INFORMATION alloc_info;
3387       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3388       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3389       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3390                        PAGE_READWRITE) == NULL) {
3391         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3392                                             exec);)
3393         return false;
3394       }
3395       if (exec) {
3396         DWORD oldprot;
3397         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3398                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3399           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3400                                               exec);)
3401           return false;
3402         }
3403       }
3404       bytes_remaining -= bytes_to_rq;
3405       next_alloc_addr += bytes_to_rq;
3406     }
3407   }
3408   // if we made it this far, return true
3409   return true;
3410 }
3411 
3412 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3413                           bool exec) {
3414   // alignment_hint is ignored on this OS
3415   return pd_commit_memory(addr, size, exec);
3416 }
3417 
3418 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3419                                   const char* mesg) {
3420   assert(mesg != NULL, "mesg must be specified");
3421   if (!pd_commit_memory(addr, size, exec)) {
3422     warn_fail_commit_memory(addr, size, exec);
3423     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3424   }
3425 }
3426 
3427 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3428                                   size_t alignment_hint, bool exec,
3429                                   const char* mesg) {
3430   // alignment_hint is ignored on this OS
3431   pd_commit_memory_or_exit(addr, size, exec, mesg);
3432 }
3433 
3434 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3435   if (bytes == 0) {
3436     // Don't bother the OS with noops.
3437     return true;
3438   }
3439   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3440   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3441   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3442 }
3443 
3444 bool os::pd_release_memory(char* addr, size_t bytes) {
3445   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3446 }
3447 
3448 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3449   return os::commit_memory(addr, size, !ExecMem);
3450 }
3451 
3452 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3453   return os::uncommit_memory(addr, size);
3454 }
3455 
3456 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3457   uint count = 0;
3458   bool ret = false;
3459   size_t bytes_remaining = bytes;
3460   char * next_protect_addr = addr;
3461 
3462   // Use VirtualQuery() to get the chunk size.
3463   while (bytes_remaining) {
3464     MEMORY_BASIC_INFORMATION alloc_info;
3465     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3466       return false;
3467     }
3468 
3469     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3470     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3471     // but we don't distinguish here as both cases are protected by same API.
3472     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3473     warning("Failed protecting pages individually for chunk #%u", count);
3474     if (!ret) {
3475       return false;
3476     }
3477 
3478     bytes_remaining -= bytes_to_protect;
3479     next_protect_addr += bytes_to_protect;
3480     count++;
3481   }
3482   return ret;
3483 }
3484 
3485 // Set protections specified
3486 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3487                         bool is_committed) {
3488   unsigned int p = 0;
3489   switch (prot) {
3490   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3491   case MEM_PROT_READ: p = PAGE_READONLY; break;
3492   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3493   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3494   default:
3495     ShouldNotReachHere();
3496   }
3497 
3498   DWORD old_status;
3499 
3500   // Strange enough, but on Win32 one can change protection only for committed
3501   // memory, not a big deal anyway, as bytes less or equal than 64K
3502   if (!is_committed) {
3503     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3504                           "cannot commit protection page");
3505   }
3506   // One cannot use os::guard_memory() here, as on Win32 guard page
3507   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3508   //
3509   // Pages in the region become guard pages. Any attempt to access a guard page
3510   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3511   // the guard page status. Guard pages thus act as a one-time access alarm.
3512   bool ret;
3513   if (UseNUMAInterleaving) {
3514     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3515     // so we must protect the chunks individually.
3516     ret = protect_pages_individually(addr, bytes, p, &old_status);
3517   } else {
3518     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3519   }
3520 #ifdef ASSERT
3521   if (!ret) {
3522     int err = os::get_last_error();
3523     char buf[256];
3524     size_t buf_len = os::lasterror(buf, sizeof(buf));
3525     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3526           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3527           buf_len != 0 ? buf : "<no_error_string>", err);
3528   }
3529 #endif
3530   return ret;
3531 }
3532 
3533 bool os::guard_memory(char* addr, size_t bytes) {
3534   DWORD old_status;
3535   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3536 }
3537 
3538 bool os::unguard_memory(char* addr, size_t bytes) {
3539   DWORD old_status;
3540   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3541 }
3542 
3543 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3544 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3545 void os::numa_make_global(char *addr, size_t bytes)    { }
3546 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3547 bool os::numa_topology_changed()                       { return false; }
3548 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3549 int os::numa_get_group_id()                            { return 0; }
3550 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3551   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3552     // Provide an answer for UMA systems
3553     ids[0] = 0;
3554     return 1;
3555   } else {
3556     // check for size bigger than actual groups_num
3557     size = MIN2(size, numa_get_groups_num());
3558     for (int i = 0; i < (int)size; i++) {
3559       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3560     }
3561     return size;
3562   }
3563 }
3564 
3565 bool os::get_page_info(char *start, page_info* info) {
3566   return false;
3567 }
3568 
3569 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3570                      page_info* page_found) {
3571   return end;
3572 }
3573 
3574 char* os::non_memory_address_word() {
3575   // Must never look like an address returned by reserve_memory,
3576   // even in its subfields (as defined by the CPU immediate fields,
3577   // if the CPU splits constants across multiple instructions).
3578   return (char*)-1;
3579 }
3580 
3581 #define MAX_ERROR_COUNT 100
3582 #define SYS_THREAD_ERROR 0xffffffffUL
3583 
3584 void os::pd_start_thread(Thread* thread) {
3585   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3586   // Returns previous suspend state:
3587   // 0:  Thread was not suspended
3588   // 1:  Thread is running now
3589   // >1: Thread is still suspended.
3590   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3591 }
3592 
3593 class HighResolutionInterval : public CHeapObj<mtThread> {
3594   // The default timer resolution seems to be 10 milliseconds.
3595   // (Where is this written down?)
3596   // If someone wants to sleep for only a fraction of the default,
3597   // then we set the timer resolution down to 1 millisecond for
3598   // the duration of their interval.
3599   // We carefully set the resolution back, since otherwise we
3600   // seem to incur an overhead (3%?) that we don't need.
3601   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3602   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3603   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3604   // timeBeginPeriod() if the relative error exceeded some threshold.
3605   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3606   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
3607   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3608   // resolution timers running.
3609  private:
3610   jlong resolution;
3611  public:
3612   HighResolutionInterval(jlong ms) {
3613     resolution = ms % 10L;
3614     if (resolution != 0) {
3615       MMRESULT result = timeBeginPeriod(1L);
3616     }
3617   }
3618   ~HighResolutionInterval() {
3619     if (resolution != 0) {
3620       MMRESULT result = timeEndPeriod(1L);
3621     }
3622     resolution = 0L;
3623   }
3624 };
3625 
3626 int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3627   jlong limit = (jlong) MAXDWORD;
3628 
3629   while (ms > limit) {
3630     int res;
3631     if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) {
3632       return res;
3633     }
3634     ms -= limit;
3635   }
3636 
3637   assert(thread == Thread::current(), "thread consistency check");
3638   OSThread* osthread = thread->osthread();
3639   OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3640   int result;
3641   if (interruptable) {
3642     assert(thread->is_Java_thread(), "must be java thread");
3643     JavaThread *jt = (JavaThread *) thread;
3644     ThreadBlockInVM tbivm(jt);
3645 
3646     jt->set_suspend_equivalent();
3647     // cleared by handle_special_suspend_equivalent_condition() or
3648     // java_suspend_self() via check_and_wait_while_suspended()
3649 
3650     HANDLE events[1];
3651     events[0] = osthread->interrupt_event();
3652     HighResolutionInterval *phri=NULL;
3653     if (!ForceTimeHighResolution) {
3654       phri = new HighResolutionInterval(ms);
3655     }
3656     if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3657       result = OS_TIMEOUT;
3658     } else {
3659       ResetEvent(osthread->interrupt_event());
3660       osthread->set_interrupted(false);
3661       result = OS_INTRPT;
3662     }
3663     delete phri; //if it is NULL, harmless
3664 
3665     // were we externally suspended while we were waiting?
3666     jt->check_and_wait_while_suspended();
3667   } else {
3668     assert(!thread->is_Java_thread(), "must not be java thread");
3669     Sleep((long) ms);
3670     result = OS_TIMEOUT;
3671   }
3672   return result;
3673 }
3674 
3675 // Short sleep, direct OS call.
3676 //
3677 // ms = 0, means allow others (if any) to run.
3678 //
3679 void os::naked_short_sleep(jlong ms) {
3680   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3681   Sleep(ms);
3682 }
3683 
3684 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3685 void os::infinite_sleep() {
3686   while (true) {    // sleep forever ...
3687     Sleep(100000);  // ... 100 seconds at a time
3688   }
3689 }
3690 
3691 typedef BOOL (WINAPI * STTSignature)(void);
3692 
3693 void os::naked_yield() {
3694   // Consider passing back the return value from SwitchToThread().
3695   SwitchToThread();
3696 }
3697 
3698 // Win32 only gives you access to seven real priorities at a time,
3699 // so we compress Java's ten down to seven.  It would be better
3700 // if we dynamically adjusted relative priorities.
3701 
3702 int os::java_to_os_priority[CriticalPriority + 1] = {
3703   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3704   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3705   THREAD_PRIORITY_LOWEST,                       // 2
3706   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3707   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3708   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3709   THREAD_PRIORITY_NORMAL,                       // 6
3710   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3711   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3712   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3713   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3714   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3715 };
3716 
3717 int prio_policy1[CriticalPriority + 1] = {
3718   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3719   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3720   THREAD_PRIORITY_LOWEST,                       // 2
3721   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3722   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3723   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3724   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3725   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3726   THREAD_PRIORITY_HIGHEST,                      // 8
3727   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3728   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3729   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3730 };
3731 
3732 static int prio_init() {
3733   // If ThreadPriorityPolicy is 1, switch tables
3734   if (ThreadPriorityPolicy == 1) {
3735     int i;
3736     for (i = 0; i < CriticalPriority + 1; i++) {
3737       os::java_to_os_priority[i] = prio_policy1[i];
3738     }
3739   }
3740   if (UseCriticalJavaThreadPriority) {
3741     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3742   }
3743   return 0;
3744 }
3745 
3746 OSReturn os::set_native_priority(Thread* thread, int priority) {
3747   if (!UseThreadPriorities) return OS_OK;
3748   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3749   return ret ? OS_OK : OS_ERR;
3750 }
3751 
3752 OSReturn os::get_native_priority(const Thread* const thread,
3753                                  int* priority_ptr) {
3754   if (!UseThreadPriorities) {
3755     *priority_ptr = java_to_os_priority[NormPriority];
3756     return OS_OK;
3757   }
3758   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3759   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3760     assert(false, "GetThreadPriority failed");
3761     return OS_ERR;
3762   }
3763   *priority_ptr = os_prio;
3764   return OS_OK;
3765 }
3766 
3767 
3768 // Hint to the underlying OS that a task switch would not be good.
3769 // Void return because it's a hint and can fail.
3770 void os::hint_no_preempt() {}
3771 
3772 void os::interrupt(Thread* thread) {
3773   assert(!thread->is_Java_thread() || Thread::current() == thread ||
3774          Threads_lock->owned_by_self(),
3775          "possibility of dangling Thread pointer");
3776 
3777   OSThread* osthread = thread->osthread();
3778   osthread->set_interrupted(true);
3779   // More than one thread can get here with the same value of osthread,
3780   // resulting in multiple notifications.  We do, however, want the store
3781   // to interrupted() to be visible to other threads before we post
3782   // the interrupt event.
3783   OrderAccess::release();
3784   SetEvent(osthread->interrupt_event());
3785   // For JSR166:  unpark after setting status
3786   if (thread->is_Java_thread()) {
3787     ((JavaThread*)thread)->parker()->unpark();
3788   }
3789 
3790   ParkEvent * ev = thread->_ParkEvent;
3791   if (ev != NULL) ev->unpark();
3792 }
3793 
3794 
3795 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3796   assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3797          "possibility of dangling Thread pointer");
3798 
3799   OSThread* osthread = thread->osthread();
3800   // There is no synchronization between the setting of the interrupt
3801   // and it being cleared here. It is critical - see 6535709 - that
3802   // we only clear the interrupt state, and reset the interrupt event,
3803   // if we are going to report that we were indeed interrupted - else
3804   // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3805   // depending on the timing. By checking thread interrupt event to see
3806   // if the thread gets real interrupt thus prevent spurious wakeup.
3807   bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3808   if (interrupted && clear_interrupted) {
3809     osthread->set_interrupted(false);
3810     ResetEvent(osthread->interrupt_event());
3811   } // Otherwise leave the interrupted state alone
3812 
3813   return interrupted;
3814 }
3815 
3816 // Get's a pc (hint) for a running thread. Currently used only for profiling.
3817 ExtendedPC os::get_thread_pc(Thread* thread) {
3818   CONTEXT context;
3819   context.ContextFlags = CONTEXT_CONTROL;
3820   HANDLE handle = thread->osthread()->thread_handle();
3821 #ifdef _M_IA64
3822   assert(0, "Fix get_thread_pc");
3823   return ExtendedPC(NULL);
3824 #else
3825   if (GetThreadContext(handle, &context)) {
3826 #ifdef _M_AMD64
3827     return ExtendedPC((address) context.Rip);
3828 #else
3829     return ExtendedPC((address) context.Eip);
3830 #endif
3831   } else {
3832     return ExtendedPC(NULL);
3833   }
3834 #endif
3835 }
3836 
3837 // GetCurrentThreadId() returns DWORD
3838 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3839 
3840 static int _initial_pid = 0;
3841 
3842 int os::current_process_id() {
3843   return (_initial_pid ? _initial_pid : _getpid());
3844 }
3845 
3846 int    os::win32::_vm_page_size              = 0;
3847 int    os::win32::_vm_allocation_granularity = 0;
3848 int    os::win32::_processor_type            = 0;
3849 // Processor level is not available on non-NT systems, use vm_version instead
3850 int    os::win32::_processor_level           = 0;
3851 julong os::win32::_physical_memory           = 0;
3852 size_t os::win32::_default_stack_size        = 0;
3853 
3854 intx          os::win32::_os_thread_limit    = 0;
3855 volatile intx os::win32::_os_thread_count    = 0;
3856 
3857 bool   os::win32::_is_windows_server         = false;
3858 
3859 // 6573254
3860 // Currently, the bug is observed across all the supported Windows releases,
3861 // including the latest one (as of this writing - Windows Server 2012 R2)
3862 bool   os::win32::_has_exit_bug              = true;
3863 
3864 void os::win32::initialize_system_info() {
3865   SYSTEM_INFO si;
3866   GetSystemInfo(&si);
3867   _vm_page_size    = si.dwPageSize;
3868   _vm_allocation_granularity = si.dwAllocationGranularity;
3869   _processor_type  = si.dwProcessorType;
3870   _processor_level = si.wProcessorLevel;
3871   set_processor_count(si.dwNumberOfProcessors);
3872 
3873   MEMORYSTATUSEX ms;
3874   ms.dwLength = sizeof(ms);
3875 
3876   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3877   // dwMemoryLoad (% of memory in use)
3878   GlobalMemoryStatusEx(&ms);
3879   _physical_memory = ms.ullTotalPhys;
3880 
3881   if (FLAG_IS_DEFAULT(MaxRAM)) {
3882     // Adjust MaxRAM according to the maximum virtual address space available.
3883     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3884   }
3885 
3886   OSVERSIONINFOEX oi;
3887   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3888   GetVersionEx((OSVERSIONINFO*)&oi);
3889   switch (oi.dwPlatformId) {
3890   case VER_PLATFORM_WIN32_NT:
3891     {
3892       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3893       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3894           oi.wProductType == VER_NT_SERVER) {
3895         _is_windows_server = true;
3896       }
3897     }
3898     break;
3899   default: fatal("Unknown platform");
3900   }
3901 
3902   _default_stack_size = os::current_stack_size();
3903   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3904   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3905          "stack size not a multiple of page size");
3906 
3907   initialize_performance_counter();
3908 }
3909 
3910 
3911 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3912                                       int ebuflen) {
3913   char path[MAX_PATH];
3914   DWORD size;
3915   DWORD pathLen = (DWORD)sizeof(path);
3916   HINSTANCE result = NULL;
3917 
3918   // only allow library name without path component
3919   assert(strchr(name, '\\') == NULL, "path not allowed");
3920   assert(strchr(name, ':') == NULL, "path not allowed");
3921   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3922     jio_snprintf(ebuf, ebuflen,
3923                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3924     return NULL;
3925   }
3926 
3927   // search system directory
3928   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3929     if (size >= pathLen) {
3930       return NULL; // truncated
3931     }
3932     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3933       return NULL; // truncated
3934     }
3935     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3936       return result;
3937     }
3938   }
3939 
3940   // try Windows directory
3941   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3942     if (size >= pathLen) {
3943       return NULL; // truncated
3944     }
3945     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3946       return NULL; // truncated
3947     }
3948     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3949       return result;
3950     }
3951   }
3952 
3953   jio_snprintf(ebuf, ebuflen,
3954                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3955   return NULL;
3956 }
3957 
3958 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3959 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3960 
3961 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3962   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3963   return TRUE;
3964 }
3965 
3966 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3967   // Basic approach:
3968   //  - Each exiting thread registers its intent to exit and then does so.
3969   //  - A thread trying to terminate the process must wait for all
3970   //    threads currently exiting to complete their exit.
3971 
3972   if (os::win32::has_exit_bug()) {
3973     // The array holds handles of the threads that have started exiting by calling
3974     // _endthreadex().
3975     // Should be large enough to avoid blocking the exiting thread due to lack of
3976     // a free slot.
3977     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3978     static int handle_count = 0;
3979 
3980     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3981     static CRITICAL_SECTION crit_sect;
3982     static volatile jint process_exiting = 0;
3983     int i, j;
3984     DWORD res;
3985     HANDLE hproc, hthr;
3986 
3987     // We only attempt to register threads until a process exiting
3988     // thread manages to set the process_exiting flag. Any threads
3989     // that come through here after the process_exiting flag is set
3990     // are unregistered and will be caught in the SuspendThread()
3991     // infinite loop below.
3992     bool registered = false;
3993 
3994     // The first thread that reached this point, initializes the critical section.
3995     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3996       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3997     } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3998       if (what != EPT_THREAD) {
3999         // Atomically set process_exiting before the critical section
4000         // to increase the visibility between racing threads.
4001         Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0);
4002       }
4003       EnterCriticalSection(&crit_sect);
4004 
4005       if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
4006         // Remove from the array those handles of the threads that have completed exiting.
4007         for (i = 0, j = 0; i < handle_count; ++i) {
4008           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
4009           if (res == WAIT_TIMEOUT) {
4010             handles[j++] = handles[i];
4011           } else {
4012             if (res == WAIT_FAILED) {
4013               warning("WaitForSingleObject failed (%u) in %s: %d\n",
4014                       GetLastError(), __FILE__, __LINE__);
4015             }
4016             // Don't keep the handle, if we failed waiting for it.
4017             CloseHandle(handles[i]);
4018           }
4019         }
4020 
4021         // If there's no free slot in the array of the kept handles, we'll have to
4022         // wait until at least one thread completes exiting.
4023         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
4024           // Raise the priority of the oldest exiting thread to increase its chances
4025           // to complete sooner.
4026           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
4027           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
4028           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
4029             i = (res - WAIT_OBJECT_0);
4030             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
4031             for (; i < handle_count; ++i) {
4032               handles[i] = handles[i + 1];
4033             }
4034           } else {
4035             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4036                     (res == WAIT_FAILED ? "failed" : "timed out"),
4037                     GetLastError(), __FILE__, __LINE__);
4038             // Don't keep handles, if we failed waiting for them.
4039             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
4040               CloseHandle(handles[i]);
4041             }
4042             handle_count = 0;
4043           }
4044         }
4045 
4046         // Store a duplicate of the current thread handle in the array of handles.
4047         hproc = GetCurrentProcess();
4048         hthr = GetCurrentThread();
4049         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
4050                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
4051           warning("DuplicateHandle failed (%u) in %s: %d\n",
4052                   GetLastError(), __FILE__, __LINE__);
4053 
4054           // We can't register this thread (no more handles) so this thread
4055           // may be racing with a thread that is calling exit(). If the thread
4056           // that is calling exit() has managed to set the process_exiting
4057           // flag, then this thread will be caught in the SuspendThread()
4058           // infinite loop below which closes that race. A small timing
4059           // window remains before the process_exiting flag is set, but it
4060           // is only exposed when we are out of handles.
4061         } else {
4062           ++handle_count;
4063           registered = true;
4064 
4065           // The current exiting thread has stored its handle in the array, and now
4066           // should leave the critical section before calling _endthreadex().
4067         }
4068 
4069       } else if (what != EPT_THREAD && handle_count > 0) {
4070         jlong start_time, finish_time, timeout_left;
4071         // Before ending the process, make sure all the threads that had called
4072         // _endthreadex() completed.
4073 
4074         // Set the priority level of the current thread to the same value as
4075         // the priority level of exiting threads.
4076         // This is to ensure it will be given a fair chance to execute if
4077         // the timeout expires.
4078         hthr = GetCurrentThread();
4079         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
4080         start_time = os::javaTimeNanos();
4081         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
4082         for (i = 0; ; ) {
4083           int portion_count = handle_count - i;
4084           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
4085             portion_count = MAXIMUM_WAIT_OBJECTS;
4086           }
4087           for (j = 0; j < portion_count; ++j) {
4088             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4089           }
4090           timeout_left = (finish_time - start_time) / 1000000L;
4091           if (timeout_left < 0) {
4092             timeout_left = 0;
4093           }
4094           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4095           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4096             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4097                     (res == WAIT_FAILED ? "failed" : "timed out"),
4098                     GetLastError(), __FILE__, __LINE__);
4099             // Reset portion_count so we close the remaining
4100             // handles due to this error.
4101             portion_count = handle_count - i;
4102           }
4103           for (j = 0; j < portion_count; ++j) {
4104             CloseHandle(handles[i + j]);
4105           }
4106           if ((i += portion_count) >= handle_count) {
4107             break;
4108           }
4109           start_time = os::javaTimeNanos();
4110         }
4111         handle_count = 0;
4112       }
4113 
4114       LeaveCriticalSection(&crit_sect);
4115     }
4116 
4117     if (!registered &&
4118         OrderAccess::load_acquire(&process_exiting) != 0 &&
4119         process_exiting != (jint)GetCurrentThreadId()) {
4120       // Some other thread is about to call exit(), so we don't let
4121       // the current unregistered thread proceed to exit() or _endthreadex()
4122       while (true) {
4123         SuspendThread(GetCurrentThread());
4124         // Avoid busy-wait loop, if SuspendThread() failed.
4125         Sleep(EXIT_TIMEOUT);
4126       }
4127     }
4128   }
4129 
4130   // We are here if either
4131   // - there's no 'race at exit' bug on this OS release;
4132   // - initialization of the critical section failed (unlikely);
4133   // - the current thread has registered itself and left the critical section;
4134   // - the process-exiting thread has raised the flag and left the critical section.
4135   if (what == EPT_THREAD) {
4136     _endthreadex((unsigned)exit_code);
4137   } else if (what == EPT_PROCESS) {
4138     ::exit(exit_code);
4139   } else {
4140     _exit(exit_code);
4141   }
4142 
4143   // Should not reach here
4144   return exit_code;
4145 }
4146 
4147 #undef EXIT_TIMEOUT
4148 
4149 void os::win32::setmode_streams() {
4150   _setmode(_fileno(stdin), _O_BINARY);
4151   _setmode(_fileno(stdout), _O_BINARY);
4152   _setmode(_fileno(stderr), _O_BINARY);
4153 }
4154 
4155 
4156 bool os::is_debugger_attached() {
4157   return IsDebuggerPresent() ? true : false;
4158 }
4159 
4160 
4161 void os::wait_for_keypress_at_exit(void) {
4162   if (PauseAtExit) {
4163     fprintf(stderr, "Press any key to continue...\n");
4164     fgetc(stdin);
4165   }
4166 }
4167 
4168 
4169 bool os::message_box(const char* title, const char* message) {
4170   int result = MessageBox(NULL, message, title,
4171                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4172   return result == IDYES;
4173 }
4174 
4175 #ifndef PRODUCT
4176 #ifndef _WIN64
4177 // Helpers to check whether NX protection is enabled
4178 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4179   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4180       pex->ExceptionRecord->NumberParameters > 0 &&
4181       pex->ExceptionRecord->ExceptionInformation[0] ==
4182       EXCEPTION_INFO_EXEC_VIOLATION) {
4183     return EXCEPTION_EXECUTE_HANDLER;
4184   }
4185   return EXCEPTION_CONTINUE_SEARCH;
4186 }
4187 
4188 void nx_check_protection() {
4189   // If NX is enabled we'll get an exception calling into code on the stack
4190   char code[] = { (char)0xC3 }; // ret
4191   void *code_ptr = (void *)code;
4192   __try {
4193     __asm call code_ptr
4194   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4195     tty->print_raw_cr("NX protection detected.");
4196   }
4197 }
4198 #endif // _WIN64
4199 #endif // PRODUCT
4200 
4201 // This is called _before_ the global arguments have been parsed
4202 void os::init(void) {
4203   _initial_pid = _getpid();
4204 
4205   init_random(1234567);
4206 
4207   win32::initialize_system_info();
4208   win32::setmode_streams();
4209   init_page_sizes((size_t) win32::vm_page_size());
4210 
4211   // This may be overridden later when argument processing is done.
4212   FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false);
4213 
4214   // Initialize main_process and main_thread
4215   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4216   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4217                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4218     fatal("DuplicateHandle failed\n");
4219   }
4220   main_thread_id = (int) GetCurrentThreadId();
4221 
4222   // initialize fast thread access - only used for 32-bit
4223   win32::initialize_thread_ptr_offset();
4224 }
4225 
4226 // To install functions for atexit processing
4227 extern "C" {
4228   static void perfMemory_exit_helper() {
4229     perfMemory_exit();
4230   }
4231 }
4232 
4233 static jint initSock();
4234 
4235 // this is called _after_ the global arguments have been parsed
4236 jint os::init_2(void) {
4237   // Allocate a single page and mark it as readable for safepoint polling
4238   address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
4239   guarantee(polling_page != NULL, "Reserve Failed for polling page");
4240 
4241   address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
4242   guarantee(return_page != NULL, "Commit Failed for polling page");
4243 
4244   os::set_polling_page(polling_page);
4245   log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
4246 
4247   if (!UseMembar) {
4248     address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
4249     guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
4250 
4251     return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
4252     guarantee(return_page != NULL, "Commit Failed for memory serialize page");
4253 
4254     os::set_memory_serialize_page(mem_serialize_page);
4255     log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
4256   }
4257 
4258   // Setup Windows Exceptions
4259 
4260   // for debugging float code generation bugs
4261   if (ForceFloatExceptions) {
4262 #ifndef  _WIN64
4263     static long fp_control_word = 0;
4264     __asm { fstcw fp_control_word }
4265     // see Intel PPro Manual, Vol. 2, p 7-16
4266     const long precision = 0x20;
4267     const long underflow = 0x10;
4268     const long overflow  = 0x08;
4269     const long zero_div  = 0x04;
4270     const long denorm    = 0x02;
4271     const long invalid   = 0x01;
4272     fp_control_word |= invalid;
4273     __asm { fldcw fp_control_word }
4274 #endif
4275   }
4276 
4277   // If stack_commit_size is 0, windows will reserve the default size,
4278   // but only commit a small portion of it.
4279   size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
4280   size_t default_reserve_size = os::win32::default_stack_size();
4281   size_t actual_reserve_size = stack_commit_size;
4282   if (stack_commit_size < default_reserve_size) {
4283     // If stack_commit_size == 0, we want this too
4284     actual_reserve_size = default_reserve_size;
4285   }
4286 
4287   // Check minimum allowable stack size for thread creation and to initialize
4288   // the java system classes, including StackOverflowError - depends on page
4289   // size.  Add two 4K pages for compiler2 recursion in main thread.
4290   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4291   // class initialization depending on 32 or 64 bit VM.
4292   size_t min_stack_allowed =
4293             (size_t)(JavaThread::stack_guard_zone_size() +
4294                      JavaThread::stack_shadow_zone_size() +
4295                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4296 
4297   min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
4298 
4299   if (actual_reserve_size < min_stack_allowed) {
4300     tty->print_cr("\nThe Java thread stack size specified is too small. "
4301                   "Specify at least %dk",
4302                   min_stack_allowed / K);
4303     return JNI_ERR;
4304   }
4305 
4306   JavaThread::set_stack_size_at_create(stack_commit_size);
4307 
4308   // Calculate theoretical max. size of Threads to guard gainst artifical
4309   // out-of-memory situations, where all available address-space has been
4310   // reserved by thread stacks.
4311   assert(actual_reserve_size != 0, "Must have a stack");
4312 
4313   // Calculate the thread limit when we should start doing Virtual Memory
4314   // banging. Currently when the threads will have used all but 200Mb of space.
4315   //
4316   // TODO: consider performing a similar calculation for commit size instead
4317   // as reserve size, since on a 64-bit platform we'll run into that more
4318   // often than running out of virtual memory space.  We can use the
4319   // lower value of the two calculations as the os_thread_limit.
4320   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4321   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4322 
4323   // at exit methods are called in the reverse order of their registration.
4324   // there is no limit to the number of functions registered. atexit does
4325   // not set errno.
4326 
4327   if (PerfAllowAtExitRegistration) {
4328     // only register atexit functions if PerfAllowAtExitRegistration is set.
4329     // atexit functions can be delayed until process exit time, which
4330     // can be problematic for embedded VM situations. Embedded VMs should
4331     // call DestroyJavaVM() to assure that VM resources are released.
4332 
4333     // note: perfMemory_exit_helper atexit function may be removed in
4334     // the future if the appropriate cleanup code can be added to the
4335     // VM_Exit VMOperation's doit method.
4336     if (atexit(perfMemory_exit_helper) != 0) {
4337       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4338     }
4339   }
4340 
4341 #ifndef _WIN64
4342   // Print something if NX is enabled (win32 on AMD64)
4343   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4344 #endif
4345 
4346   // initialize thread priority policy
4347   prio_init();
4348 
4349   if (UseNUMA && !ForceNUMA) {
4350     UseNUMA = false; // We don't fully support this yet
4351   }
4352 
4353   if (UseNUMAInterleaving) {
4354     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4355     bool success = numa_interleaving_init();
4356     if (!success) UseNUMAInterleaving = false;
4357   }
4358 
4359   if (initSock() != JNI_OK) {
4360     return JNI_ERR;
4361   }
4362 
4363   return JNI_OK;
4364 }
4365 
4366 // Mark the polling page as unreadable
4367 void os::make_polling_page_unreadable(void) {
4368   DWORD old_status;
4369   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4370                       PAGE_NOACCESS, &old_status)) {
4371     fatal("Could not disable polling page");
4372   }
4373 }
4374 
4375 // Mark the polling page as readable
4376 void os::make_polling_page_readable(void) {
4377   DWORD old_status;
4378   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4379                       PAGE_READONLY, &old_status)) {
4380     fatal("Could not enable polling page");
4381   }
4382 }
4383 
4384 
4385 int os::stat(const char *path, struct stat *sbuf) {
4386   char pathbuf[MAX_PATH];
4387   if (strlen(path) > MAX_PATH - 1) {
4388     errno = ENAMETOOLONG;
4389     return -1;
4390   }
4391   os::native_path(strcpy(pathbuf, path));
4392   int ret = ::stat(pathbuf, sbuf);
4393   if (sbuf != NULL && UseUTCFileTimestamp) {
4394     // Fix for 6539723.  st_mtime returned from stat() is dependent on
4395     // the system timezone and so can return different values for the
4396     // same file if/when daylight savings time changes.  This adjustment
4397     // makes sure the same timestamp is returned regardless of the TZ.
4398     //
4399     // See:
4400     // http://msdn.microsoft.com/library/
4401     //   default.asp?url=/library/en-us/sysinfo/base/
4402     //   time_zone_information_str.asp
4403     // and
4404     // http://msdn.microsoft.com/library/default.asp?url=
4405     //   /library/en-us/sysinfo/base/settimezoneinformation.asp
4406     //
4407     // NOTE: there is a insidious bug here:  If the timezone is changed
4408     // after the call to stat() but before 'GetTimeZoneInformation()', then
4409     // the adjustment we do here will be wrong and we'll return the wrong
4410     // value (which will likely end up creating an invalid class data
4411     // archive).  Absent a better API for this, or some time zone locking
4412     // mechanism, we'll have to live with this risk.
4413     TIME_ZONE_INFORMATION tz;
4414     DWORD tzid = GetTimeZoneInformation(&tz);
4415     int daylightBias =
4416       (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
4417     sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4418   }
4419   return ret;
4420 }
4421 
4422 
4423 #define FT2INT64(ft) \
4424   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4425 
4426 
4427 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4428 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4429 // of a thread.
4430 //
4431 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4432 // the fast estimate available on the platform.
4433 
4434 // current_thread_cpu_time() is not optimized for Windows yet
4435 jlong os::current_thread_cpu_time() {
4436   // return user + sys since the cost is the same
4437   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4438 }
4439 
4440 jlong os::thread_cpu_time(Thread* thread) {
4441   // consistent with what current_thread_cpu_time() returns.
4442   return os::thread_cpu_time(thread, true /* user+sys */);
4443 }
4444 
4445 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4446   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4447 }
4448 
4449 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4450   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4451   // If this function changes, os::is_thread_cpu_time_supported() should too
4452   FILETIME CreationTime;
4453   FILETIME ExitTime;
4454   FILETIME KernelTime;
4455   FILETIME UserTime;
4456 
4457   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4458                       &ExitTime, &KernelTime, &UserTime) == 0) {
4459     return -1;
4460   } else if (user_sys_cpu_time) {
4461     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4462   } else {
4463     return FT2INT64(UserTime) * 100;
4464   }
4465 }
4466 
4467 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4468   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4469   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4470   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4471   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4472 }
4473 
4474 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4475   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4476   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4477   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4478   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4479 }
4480 
4481 bool os::is_thread_cpu_time_supported() {
4482   // see os::thread_cpu_time
4483   FILETIME CreationTime;
4484   FILETIME ExitTime;
4485   FILETIME KernelTime;
4486   FILETIME UserTime;
4487 
4488   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4489                       &KernelTime, &UserTime) == 0) {
4490     return false;
4491   } else {
4492     return true;
4493   }
4494 }
4495 
4496 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4497 // It does have primitives (PDH API) to get CPU usage and run queue length.
4498 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4499 // If we wanted to implement loadavg on Windows, we have a few options:
4500 //
4501 // a) Query CPU usage and run queue length and "fake" an answer by
4502 //    returning the CPU usage if it's under 100%, and the run queue
4503 //    length otherwise.  It turns out that querying is pretty slow
4504 //    on Windows, on the order of 200 microseconds on a fast machine.
4505 //    Note that on the Windows the CPU usage value is the % usage
4506 //    since the last time the API was called (and the first call
4507 //    returns 100%), so we'd have to deal with that as well.
4508 //
4509 // b) Sample the "fake" answer using a sampling thread and store
4510 //    the answer in a global variable.  The call to loadavg would
4511 //    just return the value of the global, avoiding the slow query.
4512 //
4513 // c) Sample a better answer using exponential decay to smooth the
4514 //    value.  This is basically the algorithm used by UNIX kernels.
4515 //
4516 // Note that sampling thread starvation could affect both (b) and (c).
4517 int os::loadavg(double loadavg[], int nelem) {
4518   return -1;
4519 }
4520 
4521 
4522 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4523 bool os::dont_yield() {
4524   return DontYieldALot;
4525 }
4526 
4527 // This method is a slightly reworked copy of JDK's sysOpen
4528 // from src/windows/hpi/src/sys_api_md.c
4529 
4530 int os::open(const char *path, int oflag, int mode) {
4531   char pathbuf[MAX_PATH];
4532 
4533   if (strlen(path) > MAX_PATH - 1) {
4534     errno = ENAMETOOLONG;
4535     return -1;
4536   }
4537   os::native_path(strcpy(pathbuf, path));
4538   return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4539 }
4540 
4541 FILE* os::open(int fd, const char* mode) {
4542   return ::_fdopen(fd, mode);
4543 }
4544 
4545 // Is a (classpath) directory empty?
4546 bool os::dir_is_empty(const char* path) {
4547   WIN32_FIND_DATA fd;
4548   HANDLE f = FindFirstFile(path, &fd);
4549   if (f == INVALID_HANDLE_VALUE) {
4550     return true;
4551   }
4552   FindClose(f);
4553   return false;
4554 }
4555 
4556 // create binary file, rewriting existing file if required
4557 int os::create_binary_file(const char* path, bool rewrite_existing) {
4558   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4559   if (!rewrite_existing) {
4560     oflags |= _O_EXCL;
4561   }
4562   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4563 }
4564 
4565 // return current position of file pointer
4566 jlong os::current_file_offset(int fd) {
4567   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4568 }
4569 
4570 // move file pointer to the specified offset
4571 jlong os::seek_to_file_offset(int fd, jlong offset) {
4572   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4573 }
4574 
4575 
4576 jlong os::lseek(int fd, jlong offset, int whence) {
4577   return (jlong) ::_lseeki64(fd, offset, whence);
4578 }
4579 
4580 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4581   OVERLAPPED ov;
4582   DWORD nread;
4583   BOOL result;
4584 
4585   ZeroMemory(&ov, sizeof(ov));
4586   ov.Offset = (DWORD)offset;
4587   ov.OffsetHigh = (DWORD)(offset >> 32);
4588 
4589   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4590 
4591   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4592 
4593   return result ? nread : 0;
4594 }
4595 
4596 
4597 // This method is a slightly reworked copy of JDK's sysNativePath
4598 // from src/windows/hpi/src/path_md.c
4599 
4600 // Convert a pathname to native format.  On win32, this involves forcing all
4601 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4602 // sometimes rejects '/') and removing redundant separators.  The input path is
4603 // assumed to have been converted into the character encoding used by the local
4604 // system.  Because this might be a double-byte encoding, care is taken to
4605 // treat double-byte lead characters correctly.
4606 //
4607 // This procedure modifies the given path in place, as the result is never
4608 // longer than the original.  There is no error return; this operation always
4609 // succeeds.
4610 char * os::native_path(char *path) {
4611   char *src = path, *dst = path, *end = path;
4612   char *colon = NULL;  // If a drive specifier is found, this will
4613                        // point to the colon following the drive letter
4614 
4615   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4616   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4617           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4618 
4619   // Check for leading separators
4620 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4621   while (isfilesep(*src)) {
4622     src++;
4623   }
4624 
4625   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4626     // Remove leading separators if followed by drive specifier.  This
4627     // hack is necessary to support file URLs containing drive
4628     // specifiers (e.g., "file://c:/path").  As a side effect,
4629     // "/c:/path" can be used as an alternative to "c:/path".
4630     *dst++ = *src++;
4631     colon = dst;
4632     *dst++ = ':';
4633     src++;
4634   } else {
4635     src = path;
4636     if (isfilesep(src[0]) && isfilesep(src[1])) {
4637       // UNC pathname: Retain first separator; leave src pointed at
4638       // second separator so that further separators will be collapsed
4639       // into the second separator.  The result will be a pathname
4640       // beginning with "\\\\" followed (most likely) by a host name.
4641       src = dst = path + 1;
4642       path[0] = '\\';     // Force first separator to '\\'
4643     }
4644   }
4645 
4646   end = dst;
4647 
4648   // Remove redundant separators from remainder of path, forcing all
4649   // separators to be '\\' rather than '/'. Also, single byte space
4650   // characters are removed from the end of the path because those
4651   // are not legal ending characters on this operating system.
4652   //
4653   while (*src != '\0') {
4654     if (isfilesep(*src)) {
4655       *dst++ = '\\'; src++;
4656       while (isfilesep(*src)) src++;
4657       if (*src == '\0') {
4658         // Check for trailing separator
4659         end = dst;
4660         if (colon == dst - 2) break;  // "z:\\"
4661         if (dst == path + 1) break;   // "\\"
4662         if (dst == path + 2 && isfilesep(path[0])) {
4663           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4664           // beginning of a UNC pathname.  Even though it is not, by
4665           // itself, a valid UNC pathname, we leave it as is in order
4666           // to be consistent with the path canonicalizer as well
4667           // as the win32 APIs, which treat this case as an invalid
4668           // UNC pathname rather than as an alias for the root
4669           // directory of the current drive.
4670           break;
4671         }
4672         end = --dst;  // Path does not denote a root directory, so
4673                       // remove trailing separator
4674         break;
4675       }
4676       end = dst;
4677     } else {
4678       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4679         *dst++ = *src++;
4680         if (*src) *dst++ = *src++;
4681         end = dst;
4682       } else {  // Copy a single-byte character
4683         char c = *src++;
4684         *dst++ = c;
4685         // Space is not a legal ending character
4686         if (c != ' ') end = dst;
4687       }
4688     }
4689   }
4690 
4691   *end = '\0';
4692 
4693   // For "z:", add "." to work around a bug in the C runtime library
4694   if (colon == dst - 1) {
4695     path[2] = '.';
4696     path[3] = '\0';
4697   }
4698 
4699   return path;
4700 }
4701 
4702 // This code is a copy of JDK's sysSetLength
4703 // from src/windows/hpi/src/sys_api_md.c
4704 
4705 int os::ftruncate(int fd, jlong length) {
4706   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4707   long high = (long)(length >> 32);
4708   DWORD ret;
4709 
4710   if (h == (HANDLE)(-1)) {
4711     return -1;
4712   }
4713 
4714   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4715   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4716     return -1;
4717   }
4718 
4719   if (::SetEndOfFile(h) == FALSE) {
4720     return -1;
4721   }
4722 
4723   return 0;
4724 }
4725 
4726 int os::get_fileno(FILE* fp) {
4727   return _fileno(fp);
4728 }
4729 
4730 // This code is a copy of JDK's sysSync
4731 // from src/windows/hpi/src/sys_api_md.c
4732 // except for the legacy workaround for a bug in Win 98
4733 
4734 int os::fsync(int fd) {
4735   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4736 
4737   if ((!::FlushFileBuffers(handle)) &&
4738       (GetLastError() != ERROR_ACCESS_DENIED)) {
4739     // from winerror.h
4740     return -1;
4741   }
4742   return 0;
4743 }
4744 
4745 static int nonSeekAvailable(int, long *);
4746 static int stdinAvailable(int, long *);
4747 
4748 #define S_ISCHR(mode)   (((mode) & _S_IFCHR) == _S_IFCHR)
4749 #define S_ISFIFO(mode)  (((mode) & _S_IFIFO) == _S_IFIFO)
4750 
4751 // This code is a copy of JDK's sysAvailable
4752 // from src/windows/hpi/src/sys_api_md.c
4753 
4754 int os::available(int fd, jlong *bytes) {
4755   jlong cur, end;
4756   struct _stati64 stbuf64;
4757 
4758   if (::_fstati64(fd, &stbuf64) >= 0) {
4759     int mode = stbuf64.st_mode;
4760     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4761       int ret;
4762       long lpbytes;
4763       if (fd == 0) {
4764         ret = stdinAvailable(fd, &lpbytes);
4765       } else {
4766         ret = nonSeekAvailable(fd, &lpbytes);
4767       }
4768       (*bytes) = (jlong)(lpbytes);
4769       return ret;
4770     }
4771     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4772       return FALSE;
4773     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4774       return FALSE;
4775     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4776       return FALSE;
4777     }
4778     *bytes = end - cur;
4779     return TRUE;
4780   } else {
4781     return FALSE;
4782   }
4783 }
4784 
4785 void os::flockfile(FILE* fp) {
4786   _lock_file(fp);
4787 }
4788 
4789 void os::funlockfile(FILE* fp) {
4790   _unlock_file(fp);
4791 }
4792 
4793 // This code is a copy of JDK's nonSeekAvailable
4794 // from src/windows/hpi/src/sys_api_md.c
4795 
4796 static int nonSeekAvailable(int fd, long *pbytes) {
4797   // This is used for available on non-seekable devices
4798   // (like both named and anonymous pipes, such as pipes
4799   //  connected to an exec'd process).
4800   // Standard Input is a special case.
4801   HANDLE han;
4802 
4803   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4804     return FALSE;
4805   }
4806 
4807   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4808     // PeekNamedPipe fails when at EOF.  In that case we
4809     // simply make *pbytes = 0 which is consistent with the
4810     // behavior we get on Solaris when an fd is at EOF.
4811     // The only alternative is to raise an Exception,
4812     // which isn't really warranted.
4813     //
4814     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4815       return FALSE;
4816     }
4817     *pbytes = 0;
4818   }
4819   return TRUE;
4820 }
4821 
4822 #define MAX_INPUT_EVENTS 2000
4823 
4824 // This code is a copy of JDK's stdinAvailable
4825 // from src/windows/hpi/src/sys_api_md.c
4826 
4827 static int stdinAvailable(int fd, long *pbytes) {
4828   HANDLE han;
4829   DWORD numEventsRead = 0;  // Number of events read from buffer
4830   DWORD numEvents = 0;      // Number of events in buffer
4831   DWORD i = 0;              // Loop index
4832   DWORD curLength = 0;      // Position marker
4833   DWORD actualLength = 0;   // Number of bytes readable
4834   BOOL error = FALSE;       // Error holder
4835   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4836 
4837   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4838     return FALSE;
4839   }
4840 
4841   // Construct an array of input records in the console buffer
4842   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4843   if (error == 0) {
4844     return nonSeekAvailable(fd, pbytes);
4845   }
4846 
4847   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4848   if (numEvents > MAX_INPUT_EVENTS) {
4849     numEvents = MAX_INPUT_EVENTS;
4850   }
4851 
4852   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4853   if (lpBuffer == NULL) {
4854     return FALSE;
4855   }
4856 
4857   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4858   if (error == 0) {
4859     os::free(lpBuffer);
4860     return FALSE;
4861   }
4862 
4863   // Examine input records for the number of bytes available
4864   for (i=0; i<numEvents; i++) {
4865     if (lpBuffer[i].EventType == KEY_EVENT) {
4866 
4867       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4868                                       &(lpBuffer[i].Event);
4869       if (keyRecord->bKeyDown == TRUE) {
4870         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4871         curLength++;
4872         if (*keyPressed == '\r') {
4873           actualLength = curLength;
4874         }
4875       }
4876     }
4877   }
4878 
4879   if (lpBuffer != NULL) {
4880     os::free(lpBuffer);
4881   }
4882 
4883   *pbytes = (long) actualLength;
4884   return TRUE;
4885 }
4886 
4887 // Map a block of memory.
4888 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4889                         char *addr, size_t bytes, bool read_only,
4890                         bool allow_exec) {
4891   HANDLE hFile;
4892   char* base;
4893 
4894   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4895                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4896   if (hFile == NULL) {
4897     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4898     return NULL;
4899   }
4900 
4901   if (allow_exec) {
4902     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4903     // unless it comes from a PE image (which the shared archive is not.)
4904     // Even VirtualProtect refuses to give execute access to mapped memory
4905     // that was not previously executable.
4906     //
4907     // Instead, stick the executable region in anonymous memory.  Yuck.
4908     // Penalty is that ~4 pages will not be shareable - in the future
4909     // we might consider DLLizing the shared archive with a proper PE
4910     // header so that mapping executable + sharing is possible.
4911 
4912     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4913                                 PAGE_READWRITE);
4914     if (base == NULL) {
4915       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4916       CloseHandle(hFile);
4917       return NULL;
4918     }
4919 
4920     DWORD bytes_read;
4921     OVERLAPPED overlapped;
4922     overlapped.Offset = (DWORD)file_offset;
4923     overlapped.OffsetHigh = 0;
4924     overlapped.hEvent = NULL;
4925     // ReadFile guarantees that if the return value is true, the requested
4926     // number of bytes were read before returning.
4927     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4928     if (!res) {
4929       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4930       release_memory(base, bytes);
4931       CloseHandle(hFile);
4932       return NULL;
4933     }
4934   } else {
4935     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4936                                     NULL /* file_name */);
4937     if (hMap == NULL) {
4938       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4939       CloseHandle(hFile);
4940       return NULL;
4941     }
4942 
4943     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4944     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4945                                   (DWORD)bytes, addr);
4946     if (base == NULL) {
4947       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4948       CloseHandle(hMap);
4949       CloseHandle(hFile);
4950       return NULL;
4951     }
4952 
4953     if (CloseHandle(hMap) == 0) {
4954       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4955       CloseHandle(hFile);
4956       return base;
4957     }
4958   }
4959 
4960   if (allow_exec) {
4961     DWORD old_protect;
4962     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4963     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4964 
4965     if (!res) {
4966       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4967       // Don't consider this a hard error, on IA32 even if the
4968       // VirtualProtect fails, we should still be able to execute
4969       CloseHandle(hFile);
4970       return base;
4971     }
4972   }
4973 
4974   if (CloseHandle(hFile) == 0) {
4975     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4976     return base;
4977   }
4978 
4979   return base;
4980 }
4981 
4982 // Remap a block of memory.
4983 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4984                           char *addr, size_t bytes, bool read_only,
4985                           bool allow_exec) {
4986   // This OS does not allow existing memory maps to be remapped so we
4987   // have to unmap the memory before we remap it.
4988   if (!os::unmap_memory(addr, bytes)) {
4989     return NULL;
4990   }
4991 
4992   // There is a very small theoretical window between the unmap_memory()
4993   // call above and the map_memory() call below where a thread in native
4994   // code may be able to access an address that is no longer mapped.
4995 
4996   return os::map_memory(fd, file_name, file_offset, addr, bytes,
4997                         read_only, allow_exec);
4998 }
4999 
5000 
5001 // Unmap a block of memory.
5002 // Returns true=success, otherwise false.
5003 
5004 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5005   MEMORY_BASIC_INFORMATION mem_info;
5006   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5007     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5008     return false;
5009   }
5010 
5011   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5012   // Instead, executable region was allocated using VirtualAlloc(). See
5013   // pd_map_memory() above.
5014   //
5015   // The following flags should match the 'exec_access' flages used for
5016   // VirtualProtect() in pd_map_memory().
5017   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5018       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5019     return pd_release_memory(addr, bytes);
5020   }
5021 
5022   BOOL result = UnmapViewOfFile(addr);
5023   if (result == 0) {
5024     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
5025     return false;
5026   }
5027   return true;
5028 }
5029 
5030 void os::pause() {
5031   char filename[MAX_PATH];
5032   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5033     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5034   } else {
5035     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5036   }
5037 
5038   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5039   if (fd != -1) {
5040     struct stat buf;
5041     ::close(fd);
5042     while (::stat(filename, &buf) == 0) {
5043       Sleep(100);
5044     }
5045   } else {
5046     jio_fprintf(stderr,
5047                 "Could not open pause file '%s', continuing immediately.\n", filename);
5048   }
5049 }
5050 
5051 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
5052   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
5053 }
5054 
5055 // See the caveats for this class in os_windows.hpp
5056 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5057 // into this method and returns false. If no OS EXCEPTION was raised, returns
5058 // true.
5059 // The callback is supposed to provide the method that should be protected.
5060 //
5061 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5062   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
5063   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
5064          "crash_protection already set?");
5065 
5066   bool success = true;
5067   __try {
5068     WatcherThread::watcher_thread()->set_crash_protection(this);
5069     cb.call();
5070   } __except(EXCEPTION_EXECUTE_HANDLER) {
5071     // only for protection, nothing to do
5072     success = false;
5073   }
5074   WatcherThread::watcher_thread()->set_crash_protection(NULL);
5075   return success;
5076 }
5077 
5078 // An Event wraps a win32 "CreateEvent" kernel handle.
5079 //
5080 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5081 //
5082 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5083 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5084 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5085 //     In addition, an unpark() operation might fetch the handle field, but the
5086 //     event could recycle between the fetch and the SetEvent() operation.
5087 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5088 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5089 //     on an stale but recycled handle would be harmless, but in practice this might
5090 //     confuse other non-Sun code, so it's not a viable approach.
5091 //
5092 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5093 //     with the Event.  The event handle is never closed.  This could be construed
5094 //     as handle leakage, but only up to the maximum # of threads that have been extant
5095 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5096 //     permit a process to have hundreds of thousands of open handles.
5097 //
5098 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5099 //     and release unused handles.
5100 //
5101 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5102 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5103 //
5104 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5105 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5106 //
5107 // We use (2).
5108 //
5109 // TODO-FIXME:
5110 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5111 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5112 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5113 // 3.  Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
5114 //     into a single win32 CreateEvent() handle.
5115 //
5116 // Assumption:
5117 //    Only one parker can exist on an event, which is why we allocate
5118 //    them per-thread. Multiple unparkers can coexist.
5119 //
5120 // _Event transitions in park()
5121 //   -1 => -1 : illegal
5122 //    1 =>  0 : pass - return immediately
5123 //    0 => -1 : block; then set _Event to 0 before returning
5124 //
5125 // _Event transitions in unpark()
5126 //    0 => 1 : just return
5127 //    1 => 1 : just return
5128 //   -1 => either 0 or 1; must signal target thread
5129 //         That is, we can safely transition _Event from -1 to either
5130 //         0 or 1.
5131 //
5132 // _Event serves as a restricted-range semaphore.
5133 //   -1 : thread is blocked, i.e. there is a waiter
5134 //    0 : neutral: thread is running or ready,
5135 //        could have been signaled after a wait started
5136 //    1 : signaled - thread is running or ready
5137 //
5138 // Another possible encoding of _Event would be with
5139 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5140 //
5141 
5142 int os::PlatformEvent::park(jlong Millis) {
5143   // Transitions for _Event:
5144   //   -1 => -1 : illegal
5145   //    1 =>  0 : pass - return immediately
5146   //    0 => -1 : block; then set _Event to 0 before returning
5147 
5148   guarantee(_ParkHandle != NULL , "Invariant");
5149   guarantee(Millis > 0          , "Invariant");
5150 
5151   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5152   // the initial park() operation.
5153   // Consider: use atomic decrement instead of CAS-loop
5154 
5155   int v;
5156   for (;;) {
5157     v = _Event;
5158     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5159   }
5160   guarantee((v == 0) || (v == 1), "invariant");
5161   if (v != 0) return OS_OK;
5162 
5163   // Do this the hard way by blocking ...
5164   // TODO: consider a brief spin here, gated on the success of recent
5165   // spin attempts by this thread.
5166   //
5167   // We decompose long timeouts into series of shorter timed waits.
5168   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5169   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5170   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5171   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5172   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5173   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5174   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5175   // for the already waited time.  This policy does not admit any new outcomes.
5176   // In the future, however, we might want to track the accumulated wait time and
5177   // adjust Millis accordingly if we encounter a spurious wakeup.
5178 
5179   const int MAXTIMEOUT = 0x10000000;
5180   DWORD rv = WAIT_TIMEOUT;
5181   while (_Event < 0 && Millis > 0) {
5182     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5183     if (Millis > MAXTIMEOUT) {
5184       prd = MAXTIMEOUT;
5185     }
5186     rv = ::WaitForSingleObject(_ParkHandle, prd);
5187     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5188     if (rv == WAIT_TIMEOUT) {
5189       Millis -= prd;
5190     }
5191   }
5192   v = _Event;
5193   _Event = 0;
5194   // see comment at end of os::PlatformEvent::park() below:
5195   OrderAccess::fence();
5196   // If we encounter a nearly simultanous timeout expiry and unpark()
5197   // we return OS_OK indicating we awoke via unpark().
5198   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5199   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5200 }
5201 
5202 void os::PlatformEvent::park() {
5203   // Transitions for _Event:
5204   //   -1 => -1 : illegal
5205   //    1 =>  0 : pass - return immediately
5206   //    0 => -1 : block; then set _Event to 0 before returning
5207 
5208   guarantee(_ParkHandle != NULL, "Invariant");
5209   // Invariant: Only the thread associated with the Event/PlatformEvent
5210   // may call park().
5211   // Consider: use atomic decrement instead of CAS-loop
5212   int v;
5213   for (;;) {
5214     v = _Event;
5215     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5216   }
5217   guarantee((v == 0) || (v == 1), "invariant");
5218   if (v != 0) return;
5219 
5220   // Do this the hard way by blocking ...
5221   // TODO: consider a brief spin here, gated on the success of recent
5222   // spin attempts by this thread.
5223   while (_Event < 0) {
5224     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5225     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5226   }
5227 
5228   // Usually we'll find _Event == 0 at this point, but as
5229   // an optional optimization we clear it, just in case can
5230   // multiple unpark() operations drove _Event up to 1.
5231   _Event = 0;
5232   OrderAccess::fence();
5233   guarantee(_Event >= 0, "invariant");
5234 }
5235 
5236 void os::PlatformEvent::unpark() {
5237   guarantee(_ParkHandle != NULL, "Invariant");
5238 
5239   // Transitions for _Event:
5240   //    0 => 1 : just return
5241   //    1 => 1 : just return
5242   //   -1 => either 0 or 1; must signal target thread
5243   //         That is, we can safely transition _Event from -1 to either
5244   //         0 or 1.
5245   // See also: "Semaphores in Plan 9" by Mullender & Cox
5246   //
5247   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5248   // that it will take two back-to-back park() calls for the owning
5249   // thread to block. This has the benefit of forcing a spurious return
5250   // from the first park() call after an unpark() call which will help
5251   // shake out uses of park() and unpark() without condition variables.
5252 
5253   if (Atomic::xchg(1, &_Event) >= 0) return;
5254 
5255   ::SetEvent(_ParkHandle);
5256 }
5257 
5258 
5259 // JSR166
5260 // -------------------------------------------------------
5261 
5262 // The Windows implementation of Park is very straightforward: Basic
5263 // operations on Win32 Events turn out to have the right semantics to
5264 // use them directly. We opportunistically resuse the event inherited
5265 // from Monitor.
5266 
5267 void Parker::park(bool isAbsolute, jlong time) {
5268   guarantee(_ParkEvent != NULL, "invariant");
5269   // First, demultiplex/decode time arguments
5270   if (time < 0) { // don't wait
5271     return;
5272   } else if (time == 0 && !isAbsolute) {
5273     time = INFINITE;
5274   } else if (isAbsolute) {
5275     time -= os::javaTimeMillis(); // convert to relative time
5276     if (time <= 0) {  // already elapsed
5277       return;
5278     }
5279   } else { // relative
5280     time /= 1000000;  // Must coarsen from nanos to millis
5281     if (time == 0) {  // Wait for the minimal time unit if zero
5282       time = 1;
5283     }
5284   }
5285 
5286   JavaThread* thread = JavaThread::current();
5287 
5288   // Don't wait if interrupted or already triggered
5289   if (Thread::is_interrupted(thread, false) ||
5290       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5291     ResetEvent(_ParkEvent);
5292     return;
5293   } else {
5294     ThreadBlockInVM tbivm(thread);
5295     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5296     thread->set_suspend_equivalent();
5297 
5298     WaitForSingleObject(_ParkEvent, time);
5299     ResetEvent(_ParkEvent);
5300 
5301     // If externally suspended while waiting, re-suspend
5302     if (thread->handle_special_suspend_equivalent_condition()) {
5303       thread->java_suspend_self();
5304     }
5305   }
5306 }
5307 
5308 void Parker::unpark() {
5309   guarantee(_ParkEvent != NULL, "invariant");
5310   SetEvent(_ParkEvent);
5311 }
5312 
5313 // Run the specified command in a separate process. Return its exit value,
5314 // or -1 on failure (e.g. can't create a new process).
5315 int os::fork_and_exec(char* cmd) {
5316   STARTUPINFO si;
5317   PROCESS_INFORMATION pi;
5318 
5319   memset(&si, 0, sizeof(si));
5320   si.cb = sizeof(si);
5321   memset(&pi, 0, sizeof(pi));
5322   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5323                             cmd,    // command line
5324                             NULL,   // process security attribute
5325                             NULL,   // thread security attribute
5326                             TRUE,   // inherits system handles
5327                             0,      // no creation flags
5328                             NULL,   // use parent's environment block
5329                             NULL,   // use parent's starting directory
5330                             &si,    // (in) startup information
5331                             &pi);   // (out) process information
5332 
5333   if (rslt) {
5334     // Wait until child process exits.
5335     WaitForSingleObject(pi.hProcess, INFINITE);
5336 
5337     DWORD exit_code;
5338     GetExitCodeProcess(pi.hProcess, &exit_code);
5339 
5340     // Close process and thread handles.
5341     CloseHandle(pi.hProcess);
5342     CloseHandle(pi.hThread);
5343 
5344     return (int)exit_code;
5345   } else {
5346     return -1;
5347   }
5348 }
5349 
5350 bool os::find(address addr, outputStream* st) {
5351   int offset = -1;
5352   bool result = false;
5353   char buf[256];
5354   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5355     st->print(PTR_FORMAT " ", addr);
5356     if (strlen(buf) < sizeof(buf) - 1) {
5357       char* p = strrchr(buf, '\\');
5358       if (p) {
5359         st->print("%s", p + 1);
5360       } else {
5361         st->print("%s", buf);
5362       }
5363     } else {
5364         // The library name is probably truncated. Let's omit the library name.
5365         // See also JDK-8147512.
5366     }
5367     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5368       st->print("::%s + 0x%x", buf, offset);
5369     }
5370     st->cr();
5371     result = true;
5372   }
5373   return result;
5374 }
5375 
5376 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5377   DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5378 
5379   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
5380     JavaThread* thread = JavaThread::current();
5381     PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5382     address addr = (address) exceptionRecord->ExceptionInformation[1];
5383 
5384     if (os::is_memory_serialize_page(thread, addr)) {
5385       return EXCEPTION_CONTINUE_EXECUTION;
5386     }
5387   }
5388 
5389   return EXCEPTION_CONTINUE_SEARCH;
5390 }
5391 
5392 // We don't build a headless jre for Windows
5393 bool os::is_headless_jre() { return false; }
5394 
5395 static jint initSock() {
5396   WSADATA wsadata;
5397 
5398   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5399     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5400                 ::GetLastError());
5401     return JNI_ERR;
5402   }
5403   return JNI_OK;
5404 }
5405 
5406 struct hostent* os::get_host_by_name(char* name) {
5407   return (struct hostent*)gethostbyname(name);
5408 }
5409 
5410 int os::socket_close(int fd) {
5411   return ::closesocket(fd);
5412 }
5413 
5414 int os::socket(int domain, int type, int protocol) {
5415   return ::socket(domain, type, protocol);
5416 }
5417 
5418 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5419   return ::connect(fd, him, len);
5420 }
5421 
5422 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5423   return ::recv(fd, buf, (int)nBytes, flags);
5424 }
5425 
5426 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5427   return ::send(fd, buf, (int)nBytes, flags);
5428 }
5429 
5430 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5431   return ::send(fd, buf, (int)nBytes, flags);
5432 }
5433 
5434 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5435 #if defined(IA32)
5436   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5437 #elif defined (AMD64)
5438   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5439 #endif
5440 
5441 // returns true if thread could be suspended,
5442 // false otherwise
5443 static bool do_suspend(HANDLE* h) {
5444   if (h != NULL) {
5445     if (SuspendThread(*h) != ~0) {
5446       return true;
5447     }
5448   }
5449   return false;
5450 }
5451 
5452 // resume the thread
5453 // calling resume on an active thread is a no-op
5454 static void do_resume(HANDLE* h) {
5455   if (h != NULL) {
5456     ResumeThread(*h);
5457   }
5458 }
5459 
5460 // retrieve a suspend/resume context capable handle
5461 // from the tid. Caller validates handle return value.
5462 void get_thread_handle_for_extended_context(HANDLE* h,
5463                                             OSThread::thread_id_t tid) {
5464   if (h != NULL) {
5465     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5466   }
5467 }
5468 
5469 // Thread sampling implementation
5470 //
5471 void os::SuspendedThreadTask::internal_do_task() {
5472   CONTEXT    ctxt;
5473   HANDLE     h = NULL;
5474 
5475   // get context capable handle for thread
5476   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5477 
5478   // sanity
5479   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5480     return;
5481   }
5482 
5483   // suspend the thread
5484   if (do_suspend(&h)) {
5485     ctxt.ContextFlags = sampling_context_flags;
5486     // get thread context
5487     GetThreadContext(h, &ctxt);
5488     SuspendedThreadTaskContext context(_thread, &ctxt);
5489     // pass context to Thread Sampling impl
5490     do_task(context);
5491     // resume thread
5492     do_resume(&h);
5493   }
5494 
5495   // close handle
5496   CloseHandle(h);
5497 }
5498 
5499 bool os::start_debugging(char *buf, int buflen) {
5500   int len = (int)strlen(buf);
5501   char *p = &buf[len];
5502 
5503   jio_snprintf(p, buflen-len,
5504              "\n\n"
5505              "Do you want to debug the problem?\n\n"
5506              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5507              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5508              "Otherwise, select 'No' to abort...",
5509              os::current_process_id(), os::current_thread_id());
5510 
5511   bool yes = os::message_box("Unexpected Error", buf);
5512 
5513   if (yes) {
5514     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5515     // exception. If VM is running inside a debugger, the debugger will
5516     // catch the exception. Otherwise, the breakpoint exception will reach
5517     // the default windows exception handler, which can spawn a debugger and
5518     // automatically attach to the dying VM.
5519     os::breakpoint();
5520     yes = false;
5521   }
5522   return yes;
5523 }
5524 
5525 void* os::get_default_process_handle() {
5526   return (void*)GetModuleHandle(NULL);
5527 }
5528 
5529 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5530 // which is used to find statically linked in agents.
5531 // Additionally for windows, takes into account __stdcall names.
5532 // Parameters:
5533 //            sym_name: Symbol in library we are looking for
5534 //            lib_name: Name of library to look in, NULL for shared libs.
5535 //            is_absolute_path == true if lib_name is absolute path to agent
5536 //                                     such as "C:/a/b/L.dll"
5537 //            == false if only the base name of the library is passed in
5538 //               such as "L"
5539 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5540                                     bool is_absolute_path) {
5541   char *agent_entry_name;
5542   size_t len;
5543   size_t name_len;
5544   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5545   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5546   const char *start;
5547 
5548   if (lib_name != NULL) {
5549     len = name_len = strlen(lib_name);
5550     if (is_absolute_path) {
5551       // Need to strip path, prefix and suffix
5552       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5553         lib_name = ++start;
5554       } else {
5555         // Need to check for drive prefix
5556         if ((start = strchr(lib_name, ':')) != NULL) {
5557           lib_name = ++start;
5558         }
5559       }
5560       if (len <= (prefix_len + suffix_len)) {
5561         return NULL;
5562       }
5563       lib_name += prefix_len;
5564       name_len = strlen(lib_name) - suffix_len;
5565     }
5566   }
5567   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5568   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5569   if (agent_entry_name == NULL) {
5570     return NULL;
5571   }
5572   if (lib_name != NULL) {
5573     const char *p = strrchr(sym_name, '@');
5574     if (p != NULL && p != sym_name) {
5575       // sym_name == _Agent_OnLoad@XX
5576       strncpy(agent_entry_name, sym_name, (p - sym_name));
5577       agent_entry_name[(p-sym_name)] = '\0';
5578       // agent_entry_name == _Agent_OnLoad
5579       strcat(agent_entry_name, "_");
5580       strncat(agent_entry_name, lib_name, name_len);
5581       strcat(agent_entry_name, p);
5582       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5583     } else {
5584       strcpy(agent_entry_name, sym_name);
5585       strcat(agent_entry_name, "_");
5586       strncat(agent_entry_name, lib_name, name_len);
5587     }
5588   } else {
5589     strcpy(agent_entry_name, sym_name);
5590   }
5591   return agent_entry_name;
5592 }
5593 
5594 #ifndef PRODUCT
5595 
5596 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5597 // contiguous memory block at a particular address.
5598 // The test first tries to find a good approximate address to allocate at by using the same
5599 // method to allocate some memory at any address. The test then tries to allocate memory in
5600 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5601 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5602 // the previously allocated memory is available for allocation. The only actual failure
5603 // that is reported is when the test tries to allocate at a particular location but gets a
5604 // different valid one. A NULL return value at this point is not considered an error but may
5605 // be legitimate.
5606 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5607 void TestReserveMemorySpecial_test() {
5608   if (!UseLargePages) {
5609     if (VerboseInternalVMTests) {
5610       tty->print("Skipping test because large pages are disabled");
5611     }
5612     return;
5613   }
5614   // save current value of globals
5615   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5616   bool old_use_numa_interleaving = UseNUMAInterleaving;
5617 
5618   // set globals to make sure we hit the correct code path
5619   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5620 
5621   // do an allocation at an address selected by the OS to get a good one.
5622   const size_t large_allocation_size = os::large_page_size() * 4;
5623   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5624   if (result == NULL) {
5625     if (VerboseInternalVMTests) {
5626       tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5627                           large_allocation_size);
5628     }
5629   } else {
5630     os::release_memory_special(result, large_allocation_size);
5631 
5632     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5633     // we managed to get it once.
5634     const size_t expected_allocation_size = os::large_page_size();
5635     char* expected_location = result + os::large_page_size();
5636     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5637     if (actual_location == NULL) {
5638       if (VerboseInternalVMTests) {
5639         tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5640                             expected_location, large_allocation_size);
5641       }
5642     } else {
5643       // release memory
5644       os::release_memory_special(actual_location, expected_allocation_size);
5645       // only now check, after releasing any memory to avoid any leaks.
5646       assert(actual_location == expected_location,
5647              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5648              expected_location, expected_allocation_size, actual_location);
5649     }
5650   }
5651 
5652   // restore globals
5653   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5654   UseNUMAInterleaving = old_use_numa_interleaving;
5655 }
5656 #endif // PRODUCT
5657 
5658 /*
5659   All the defined signal names for Windows.
5660 
5661   NOTE that not all of these names are accepted by FindSignal!
5662 
5663   For various reasons some of these may be rejected at runtime.
5664 
5665   Here are the names currently accepted by a user of sun.misc.Signal with
5666   1.4.1 (ignoring potential interaction with use of chaining, etc):
5667 
5668      (LIST TBD)
5669 
5670 */
5671 int os::get_signal_number(const char* name) {
5672   static const struct {
5673     char* name;
5674     int   number;
5675   } siglabels [] =
5676     // derived from version 6.0 VC98/include/signal.h
5677   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5678   "FPE",        SIGFPE,         // floating point exception
5679   "SEGV",       SIGSEGV,        // segment violation
5680   "INT",        SIGINT,         // interrupt
5681   "TERM",       SIGTERM,        // software term signal from kill
5682   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5683   "ILL",        SIGILL};        // illegal instruction
5684   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5685     if (strcmp(name, siglabels[i].name) == 0) {
5686       return siglabels[i].number;
5687     }
5688   }
5689   return -1;
5690 }
5691 
5692 // Fast current thread access
5693 
5694 int os::win32::_thread_ptr_offset = 0;
5695 
5696 static void call_wrapper_dummy() {}
5697 
5698 // We need to call the os_exception_wrapper once so that it sets
5699 // up the offset from FS of the thread pointer.
5700 void os::win32::initialize_thread_ptr_offset() {
5701   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5702                            NULL, NULL, NULL, NULL);
5703 }