1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "classfile/classLoader.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/disassembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "jvm_windows.h"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/filemap.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "os_share_windows.hpp"
  43 #include "os_windows.inline.hpp"
  44 #include "prims/jniFastGetField.hpp"
  45 #include "prims/jvm.h"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.inline.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "semaphore_windows.hpp"
  67 #include "services/attachListener.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/growableArray.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 
  77 #ifdef _DEBUG
  78 #include <crtdbg.h>
  79 #endif
  80 
  81 
  82 #include <windows.h>
  83 #include <sys/types.h>
  84 #include <sys/stat.h>
  85 #include <sys/timeb.h>
  86 #include <objidl.h>
  87 #include <shlobj.h>
  88 
  89 #include <malloc.h>
  90 #include <signal.h>
  91 #include <direct.h>
  92 #include <errno.h>
  93 #include <fcntl.h>
  94 #include <io.h>
  95 #include <process.h>              // For _beginthreadex(), _endthreadex()
  96 #include <imagehlp.h>             // For os::dll_address_to_function_name
  97 // for enumerating dll libraries
  98 #include <vdmdbg.h>
  99 
 100 // for timer info max values which include all bits
 101 #define ALL_64_BITS CONST64(-1)
 102 
 103 // For DLL loading/load error detection
 104 // Values of PE COFF
 105 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 106 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 107 
 108 static HANDLE main_process;
 109 static HANDLE main_thread;
 110 static int    main_thread_id;
 111 
 112 static FILETIME process_creation_time;
 113 static FILETIME process_exit_time;
 114 static FILETIME process_user_time;
 115 static FILETIME process_kernel_time;
 116 
 117 #ifdef _M_IA64
 118   #define __CPU__ ia64
 119 #else
 120   #ifdef _M_AMD64
 121     #define __CPU__ amd64
 122   #else
 123     #define __CPU__ i486
 124   #endif
 125 #endif
 126 
 127 // save DLL module handle, used by GetModuleFileName
 128 
 129 HINSTANCE vm_lib_handle;
 130 
 131 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 132   switch (reason) {
 133   case DLL_PROCESS_ATTACH:
 134     vm_lib_handle = hinst;
 135     if (ForceTimeHighResolution) {
 136       timeBeginPeriod(1L);
 137     }
 138     break;
 139   case DLL_PROCESS_DETACH:
 140     if (ForceTimeHighResolution) {
 141       timeEndPeriod(1L);
 142     }
 143     break;
 144   default:
 145     break;
 146   }
 147   return true;
 148 }
 149 
 150 static inline double fileTimeAsDouble(FILETIME* time) {
 151   const double high  = (double) ((unsigned int) ~0);
 152   const double split = 10000000.0;
 153   double result = (time->dwLowDateTime / split) +
 154                    time->dwHighDateTime * (high/split);
 155   return result;
 156 }
 157 
 158 // Implementation of os
 159 
 160 bool os::unsetenv(const char* name) {
 161   assert(name != NULL, "Null pointer");
 162   return (SetEnvironmentVariable(name, NULL) == TRUE);
 163 }
 164 
 165 // No setuid programs under Windows.
 166 bool os::have_special_privileges() {
 167   return false;
 168 }
 169 
 170 
 171 // This method is  a periodic task to check for misbehaving JNI applications
 172 // under CheckJNI, we can add any periodic checks here.
 173 // For Windows at the moment does nothing
 174 void os::run_periodic_checks() {
 175   return;
 176 }
 177 
 178 // previous UnhandledExceptionFilter, if there is one
 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 180 
 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 182 
 183 void os::init_system_properties_values() {
 184   // sysclasspath, java_home, dll_dir
 185   {
 186     char *home_path;
 187     char *dll_path;
 188     char *pslash;
 189     char *bin = "\\bin";
 190     char home_dir[MAX_PATH + 1];
 191     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 192 
 193     if (alt_home_dir != NULL)  {
 194       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 195       home_dir[MAX_PATH] = '\0';
 196     } else {
 197       os::jvm_path(home_dir, sizeof(home_dir));
 198       // Found the full path to jvm.dll.
 199       // Now cut the path to <java_home>/jre if we can.
 200       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 201       pslash = strrchr(home_dir, '\\');
 202       if (pslash != NULL) {
 203         *pslash = '\0';                   // get rid of \{client|server}
 204         pslash = strrchr(home_dir, '\\');
 205         if (pslash != NULL) {
 206           *pslash = '\0';                 // get rid of \bin
 207         }
 208       }
 209     }
 210 
 211     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 212     if (home_path == NULL) {
 213       return;
 214     }
 215     strcpy(home_path, home_dir);
 216     Arguments::set_java_home(home_path);
 217     FREE_C_HEAP_ARRAY(char, home_path);
 218 
 219     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 220                                 mtInternal);
 221     if (dll_path == NULL) {
 222       return;
 223     }
 224     strcpy(dll_path, home_dir);
 225     strcat(dll_path, bin);
 226     Arguments::set_dll_dir(dll_path);
 227     FREE_C_HEAP_ARRAY(char, dll_path);
 228 
 229     if (!set_boot_path('\\', ';')) {
 230       return;
 231     }
 232   }
 233 
 234 // library_path
 235 #define EXT_DIR "\\lib\\ext"
 236 #define BIN_DIR "\\bin"
 237 #define PACKAGE_DIR "\\Sun\\Java"
 238   {
 239     // Win32 library search order (See the documentation for LoadLibrary):
 240     //
 241     // 1. The directory from which application is loaded.
 242     // 2. The system wide Java Extensions directory (Java only)
 243     // 3. System directory (GetSystemDirectory)
 244     // 4. Windows directory (GetWindowsDirectory)
 245     // 5. The PATH environment variable
 246     // 6. The current directory
 247 
 248     char *library_path;
 249     char tmp[MAX_PATH];
 250     char *path_str = ::getenv("PATH");
 251 
 252     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 253                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 254 
 255     library_path[0] = '\0';
 256 
 257     GetModuleFileName(NULL, tmp, sizeof(tmp));
 258     *(strrchr(tmp, '\\')) = '\0';
 259     strcat(library_path, tmp);
 260 
 261     GetWindowsDirectory(tmp, sizeof(tmp));
 262     strcat(library_path, ";");
 263     strcat(library_path, tmp);
 264     strcat(library_path, PACKAGE_DIR BIN_DIR);
 265 
 266     GetSystemDirectory(tmp, sizeof(tmp));
 267     strcat(library_path, ";");
 268     strcat(library_path, tmp);
 269 
 270     GetWindowsDirectory(tmp, sizeof(tmp));
 271     strcat(library_path, ";");
 272     strcat(library_path, tmp);
 273 
 274     if (path_str) {
 275       strcat(library_path, ";");
 276       strcat(library_path, path_str);
 277     }
 278 
 279     strcat(library_path, ";.");
 280 
 281     Arguments::set_library_path(library_path);
 282     FREE_C_HEAP_ARRAY(char, library_path);
 283   }
 284 
 285   // Default extensions directory
 286   {
 287     char path[MAX_PATH];
 288     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 289     GetWindowsDirectory(path, MAX_PATH);
 290     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 291             path, PACKAGE_DIR, EXT_DIR);
 292     Arguments::set_ext_dirs(buf);
 293   }
 294   #undef EXT_DIR
 295   #undef BIN_DIR
 296   #undef PACKAGE_DIR
 297 
 298 #ifndef _WIN64
 299   // set our UnhandledExceptionFilter and save any previous one
 300   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 301 #endif
 302 
 303   // Done
 304   return;
 305 }
 306 
 307 void os::breakpoint() {
 308   DebugBreak();
 309 }
 310 
 311 // Invoked from the BREAKPOINT Macro
 312 extern "C" void breakpoint() {
 313   os::breakpoint();
 314 }
 315 
 316 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 317 // So far, this method is only used by Native Memory Tracking, which is
 318 // only supported on Windows XP or later.
 319 //
 320 int os::get_native_stack(address* stack, int frames, int toSkip) {
 321   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 322   for (int index = captured; index < frames; index ++) {
 323     stack[index] = NULL;
 324   }
 325   return captured;
 326 }
 327 
 328 
 329 // os::current_stack_base()
 330 //
 331 //   Returns the base of the stack, which is the stack's
 332 //   starting address.  This function must be called
 333 //   while running on the stack of the thread being queried.
 334 
 335 address os::current_stack_base() {
 336   MEMORY_BASIC_INFORMATION minfo;
 337   address stack_bottom;
 338   size_t stack_size;
 339 
 340   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 341   stack_bottom =  (address)minfo.AllocationBase;
 342   stack_size = minfo.RegionSize;
 343 
 344   // Add up the sizes of all the regions with the same
 345   // AllocationBase.
 346   while (1) {
 347     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 348     if (stack_bottom == (address)minfo.AllocationBase) {
 349       stack_size += minfo.RegionSize;
 350     } else {
 351       break;
 352     }
 353   }
 354 
 355 #ifdef _M_IA64
 356   // IA64 has memory and register stacks
 357   //
 358   // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
 359   // at thread creation (1MB backing store growing upwards, 1MB memory stack
 360   // growing downwards, 2MB summed up)
 361   //
 362   // ...
 363   // ------- top of stack (high address) -----
 364   // |
 365   // |      1MB
 366   // |      Backing Store (Register Stack)
 367   // |
 368   // |         / \
 369   // |          |
 370   // |          |
 371   // |          |
 372   // ------------------------ stack base -----
 373   // |      1MB
 374   // |      Memory Stack
 375   // |
 376   // |          |
 377   // |          |
 378   // |          |
 379   // |         \ /
 380   // |
 381   // ----- bottom of stack (low address) -----
 382   // ...
 383 
 384   stack_size = stack_size / 2;
 385 #endif
 386   return stack_bottom + stack_size;
 387 }
 388 
 389 size_t os::current_stack_size() {
 390   size_t sz;
 391   MEMORY_BASIC_INFORMATION minfo;
 392   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 393   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 394   return sz;
 395 }
 396 
 397 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 398   const struct tm* time_struct_ptr = localtime(clock);
 399   if (time_struct_ptr != NULL) {
 400     *res = *time_struct_ptr;
 401     return res;
 402   }
 403   return NULL;
 404 }
 405 
 406 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 407   const struct tm* time_struct_ptr = gmtime(clock);
 408   if (time_struct_ptr != NULL) {
 409     *res = *time_struct_ptr;
 410     return res;
 411   }
 412   return NULL;
 413 }
 414 
 415 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 416 
 417 // Thread start routine for all newly created threads
 418 static unsigned __stdcall thread_native_entry(Thread* thread) {
 419   // Try to randomize the cache line index of hot stack frames.
 420   // This helps when threads of the same stack traces evict each other's
 421   // cache lines. The threads can be either from the same JVM instance, or
 422   // from different JVM instances. The benefit is especially true for
 423   // processors with hyperthreading technology.
 424   static int counter = 0;
 425   int pid = os::current_process_id();
 426   _alloca(((pid ^ counter++) & 7) * 128);
 427 
 428   thread->initialize_thread_current();
 429 
 430   OSThread* osthr = thread->osthread();
 431   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 432 
 433   if (UseNUMA) {
 434     int lgrp_id = os::numa_get_group_id();
 435     if (lgrp_id != -1) {
 436       thread->set_lgrp_id(lgrp_id);
 437     }
 438   }
 439 
 440   // Diagnostic code to investigate JDK-6573254
 441   int res = 30115;  // non-java thread
 442   if (thread->is_Java_thread()) {
 443     res = 20115;    // java thread
 444   }
 445 
 446   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 447 
 448   // Install a win32 structured exception handler around every thread created
 449   // by VM, so VM can generate error dump when an exception occurred in non-
 450   // Java thread (e.g. VM thread).
 451   __try {
 452     thread->run();
 453   } __except(topLevelExceptionFilter(
 454                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 455     // Nothing to do.
 456   }
 457 
 458   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 459 
 460   // One less thread is executing
 461   // When the VMThread gets here, the main thread may have already exited
 462   // which frees the CodeHeap containing the Atomic::add code
 463   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 464     Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
 465   }
 466 
 467   // If a thread has not deleted itself ("delete this") as part of its
 468   // termination sequence, we have to ensure thread-local-storage is
 469   // cleared before we actually terminate. No threads should ever be
 470   // deleted asynchronously with respect to their termination.
 471   if (Thread::current_or_null_safe() != NULL) {
 472     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 473     thread->clear_thread_current();
 474   }
 475 
 476   // Thread must not return from exit_process_or_thread(), but if it does,
 477   // let it proceed to exit normally
 478   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 479 }
 480 
 481 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 482                                   int thread_id) {
 483   // Allocate the OSThread object
 484   OSThread* osthread = new OSThread(NULL, NULL);
 485   if (osthread == NULL) return NULL;
 486 
 487   // Initialize support for Java interrupts
 488   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 489   if (interrupt_event == NULL) {
 490     delete osthread;
 491     return NULL;
 492   }
 493   osthread->set_interrupt_event(interrupt_event);
 494 
 495   // Store info on the Win32 thread into the OSThread
 496   osthread->set_thread_handle(thread_handle);
 497   osthread->set_thread_id(thread_id);
 498 
 499   if (UseNUMA) {
 500     int lgrp_id = os::numa_get_group_id();
 501     if (lgrp_id != -1) {
 502       thread->set_lgrp_id(lgrp_id);
 503     }
 504   }
 505 
 506   // Initial thread state is INITIALIZED, not SUSPENDED
 507   osthread->set_state(INITIALIZED);
 508 
 509   return osthread;
 510 }
 511 
 512 
 513 bool os::create_attached_thread(JavaThread* thread) {
 514 #ifdef ASSERT
 515   thread->verify_not_published();
 516 #endif
 517   HANDLE thread_h;
 518   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 519                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 520     fatal("DuplicateHandle failed\n");
 521   }
 522   OSThread* osthread = create_os_thread(thread, thread_h,
 523                                         (int)current_thread_id());
 524   if (osthread == NULL) {
 525     return false;
 526   }
 527 
 528   // Initial thread state is RUNNABLE
 529   osthread->set_state(RUNNABLE);
 530 
 531   thread->set_osthread(osthread);
 532 
 533   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 534     os::current_thread_id());
 535 
 536   return true;
 537 }
 538 
 539 bool os::create_main_thread(JavaThread* thread) {
 540 #ifdef ASSERT
 541   thread->verify_not_published();
 542 #endif
 543   if (_starting_thread == NULL) {
 544     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 545     if (_starting_thread == NULL) {
 546       return false;
 547     }
 548   }
 549 
 550   // The primordial thread is runnable from the start)
 551   _starting_thread->set_state(RUNNABLE);
 552 
 553   thread->set_osthread(_starting_thread);
 554   return true;
 555 }
 556 
 557 // Helper function to trace _beginthreadex attributes,
 558 //  similar to os::Posix::describe_pthread_attr()
 559 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 560                                                size_t stacksize, unsigned initflag) {
 561   stringStream ss(buf, buflen);
 562   if (stacksize == 0) {
 563     ss.print("stacksize: default, ");
 564   } else {
 565     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 566   }
 567   ss.print("flags: ");
 568   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 569   #define ALL(X) \
 570     X(CREATE_SUSPENDED) \
 571     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 572   ALL(PRINT_FLAG)
 573   #undef ALL
 574   #undef PRINT_FLAG
 575   return buf;
 576 }
 577 
 578 // Allocate and initialize a new OSThread
 579 bool os::create_thread(Thread* thread, ThreadType thr_type,
 580                        size_t stack_size) {
 581   unsigned thread_id;
 582 
 583   // Allocate the OSThread object
 584   OSThread* osthread = new OSThread(NULL, NULL);
 585   if (osthread == NULL) {
 586     return false;
 587   }
 588 
 589   // Initialize support for Java interrupts
 590   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 591   if (interrupt_event == NULL) {
 592     delete osthread;
 593     return NULL;
 594   }
 595   osthread->set_interrupt_event(interrupt_event);
 596   osthread->set_interrupted(false);
 597 
 598   thread->set_osthread(osthread);
 599 
 600   if (stack_size == 0) {
 601     switch (thr_type) {
 602     case os::java_thread:
 603       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 604       if (JavaThread::stack_size_at_create() > 0) {
 605         stack_size = JavaThread::stack_size_at_create();
 606       }
 607       break;
 608     case os::compiler_thread:
 609       if (CompilerThreadStackSize > 0) {
 610         stack_size = (size_t)(CompilerThreadStackSize * K);
 611         break;
 612       } // else fall through:
 613         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 614     case os::vm_thread:
 615     case os::pgc_thread:
 616     case os::cgc_thread:
 617     case os::watcher_thread:
 618       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 619       break;
 620     }
 621   }
 622 
 623   // Create the Win32 thread
 624   //
 625   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 626   // does not specify stack size. Instead, it specifies the size of
 627   // initially committed space. The stack size is determined by
 628   // PE header in the executable. If the committed "stack_size" is larger
 629   // than default value in the PE header, the stack is rounded up to the
 630   // nearest multiple of 1MB. For example if the launcher has default
 631   // stack size of 320k, specifying any size less than 320k does not
 632   // affect the actual stack size at all, it only affects the initial
 633   // commitment. On the other hand, specifying 'stack_size' larger than
 634   // default value may cause significant increase in memory usage, because
 635   // not only the stack space will be rounded up to MB, but also the
 636   // entire space is committed upfront.
 637   //
 638   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 639   // for CreateThread() that can treat 'stack_size' as stack size. However we
 640   // are not supposed to call CreateThread() directly according to MSDN
 641   // document because JVM uses C runtime library. The good news is that the
 642   // flag appears to work with _beginthredex() as well.
 643 
 644   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 645   HANDLE thread_handle =
 646     (HANDLE)_beginthreadex(NULL,
 647                            (unsigned)stack_size,
 648                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 649                            thread,
 650                            initflag,
 651                            &thread_id);
 652 
 653   char buf[64];
 654   if (thread_handle != NULL) {
 655     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 656       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 657   } else {
 658     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 659       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 660   }
 661 
 662   if (thread_handle == NULL) {
 663     // Need to clean up stuff we've allocated so far
 664     CloseHandle(osthread->interrupt_event());
 665     thread->set_osthread(NULL);
 666     delete osthread;
 667     return NULL;
 668   }
 669 
 670   Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
 671 
 672   // Store info on the Win32 thread into the OSThread
 673   osthread->set_thread_handle(thread_handle);
 674   osthread->set_thread_id(thread_id);
 675 
 676   // Initial thread state is INITIALIZED, not SUSPENDED
 677   osthread->set_state(INITIALIZED);
 678 
 679   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 680   return true;
 681 }
 682 
 683 
 684 // Free Win32 resources related to the OSThread
 685 void os::free_thread(OSThread* osthread) {
 686   assert(osthread != NULL, "osthread not set");
 687 
 688   // We are told to free resources of the argument thread,
 689   // but we can only really operate on the current thread.
 690   assert(Thread::current()->osthread() == osthread,
 691          "os::free_thread but not current thread");
 692 
 693   CloseHandle(osthread->thread_handle());
 694   CloseHandle(osthread->interrupt_event());
 695   delete osthread;
 696 }
 697 
 698 static jlong first_filetime;
 699 static jlong initial_performance_count;
 700 static jlong performance_frequency;
 701 
 702 
 703 jlong as_long(LARGE_INTEGER x) {
 704   jlong result = 0; // initialization to avoid warning
 705   set_high(&result, x.HighPart);
 706   set_low(&result, x.LowPart);
 707   return result;
 708 }
 709 
 710 
 711 jlong os::elapsed_counter() {
 712   LARGE_INTEGER count;
 713   QueryPerformanceCounter(&count);
 714   return as_long(count) - initial_performance_count;
 715 }
 716 
 717 
 718 jlong os::elapsed_frequency() {
 719   return performance_frequency;
 720 }
 721 
 722 
 723 julong os::available_memory() {
 724   return win32::available_memory();
 725 }
 726 
 727 julong os::win32::available_memory() {
 728   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 729   // value if total memory is larger than 4GB
 730   MEMORYSTATUSEX ms;
 731   ms.dwLength = sizeof(ms);
 732   GlobalMemoryStatusEx(&ms);
 733 
 734   return (julong)ms.ullAvailPhys;
 735 }
 736 
 737 julong os::physical_memory() {
 738   return win32::physical_memory();
 739 }
 740 
 741 bool os::has_allocatable_memory_limit(julong* limit) {
 742   MEMORYSTATUSEX ms;
 743   ms.dwLength = sizeof(ms);
 744   GlobalMemoryStatusEx(&ms);
 745 #ifdef _LP64
 746   *limit = (julong)ms.ullAvailVirtual;
 747   return true;
 748 #else
 749   // Limit to 1400m because of the 2gb address space wall
 750   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 751   return true;
 752 #endif
 753 }
 754 
 755 int os::active_processor_count() {
 756   DWORD_PTR lpProcessAffinityMask = 0;
 757   DWORD_PTR lpSystemAffinityMask = 0;
 758   int proc_count = processor_count();
 759   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 760       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 761     // Nof active processors is number of bits in process affinity mask
 762     int bitcount = 0;
 763     while (lpProcessAffinityMask != 0) {
 764       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 765       bitcount++;
 766     }
 767     return bitcount;
 768   } else {
 769     return proc_count;
 770   }
 771 }
 772 
 773 void os::set_native_thread_name(const char *name) {
 774 
 775   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 776   //
 777   // Note that unfortunately this only works if the process
 778   // is already attached to a debugger; debugger must observe
 779   // the exception below to show the correct name.
 780 
 781   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 782   struct {
 783     DWORD dwType;     // must be 0x1000
 784     LPCSTR szName;    // pointer to name (in user addr space)
 785     DWORD dwThreadID; // thread ID (-1=caller thread)
 786     DWORD dwFlags;    // reserved for future use, must be zero
 787   } info;
 788 
 789   info.dwType = 0x1000;
 790   info.szName = name;
 791   info.dwThreadID = -1;
 792   info.dwFlags = 0;
 793 
 794   __try {
 795     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 796   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 797 }
 798 
 799 bool os::distribute_processes(uint length, uint* distribution) {
 800   // Not yet implemented.
 801   return false;
 802 }
 803 
 804 bool os::bind_to_processor(uint processor_id) {
 805   // Not yet implemented.
 806   return false;
 807 }
 808 
 809 void os::win32::initialize_performance_counter() {
 810   LARGE_INTEGER count;
 811   QueryPerformanceFrequency(&count);
 812   performance_frequency = as_long(count);
 813   QueryPerformanceCounter(&count);
 814   initial_performance_count = as_long(count);
 815 }
 816 
 817 
 818 double os::elapsedTime() {
 819   return (double) elapsed_counter() / (double) elapsed_frequency();
 820 }
 821 
 822 
 823 // Windows format:
 824 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 825 // Java format:
 826 //   Java standards require the number of milliseconds since 1/1/1970
 827 
 828 // Constant offset - calculated using offset()
 829 static jlong  _offset   = 116444736000000000;
 830 // Fake time counter for reproducible results when debugging
 831 static jlong  fake_time = 0;
 832 
 833 #ifdef ASSERT
 834 // Just to be safe, recalculate the offset in debug mode
 835 static jlong _calculated_offset = 0;
 836 static int   _has_calculated_offset = 0;
 837 
 838 jlong offset() {
 839   if (_has_calculated_offset) return _calculated_offset;
 840   SYSTEMTIME java_origin;
 841   java_origin.wYear          = 1970;
 842   java_origin.wMonth         = 1;
 843   java_origin.wDayOfWeek     = 0; // ignored
 844   java_origin.wDay           = 1;
 845   java_origin.wHour          = 0;
 846   java_origin.wMinute        = 0;
 847   java_origin.wSecond        = 0;
 848   java_origin.wMilliseconds  = 0;
 849   FILETIME jot;
 850   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 851     fatal("Error = %d\nWindows error", GetLastError());
 852   }
 853   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 854   _has_calculated_offset = 1;
 855   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 856   return _calculated_offset;
 857 }
 858 #else
 859 jlong offset() {
 860   return _offset;
 861 }
 862 #endif
 863 
 864 jlong windows_to_java_time(FILETIME wt) {
 865   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 866   return (a - offset()) / 10000;
 867 }
 868 
 869 // Returns time ticks in (10th of micro seconds)
 870 jlong windows_to_time_ticks(FILETIME wt) {
 871   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 872   return (a - offset());
 873 }
 874 
 875 FILETIME java_to_windows_time(jlong l) {
 876   jlong a = (l * 10000) + offset();
 877   FILETIME result;
 878   result.dwHighDateTime = high(a);
 879   result.dwLowDateTime  = low(a);
 880   return result;
 881 }
 882 
 883 bool os::supports_vtime() { return true; }
 884 bool os::enable_vtime() { return false; }
 885 bool os::vtime_enabled() { return false; }
 886 
 887 double os::elapsedVTime() {
 888   FILETIME created;
 889   FILETIME exited;
 890   FILETIME kernel;
 891   FILETIME user;
 892   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 893     // the resolution of windows_to_java_time() should be sufficient (ms)
 894     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 895   } else {
 896     return elapsedTime();
 897   }
 898 }
 899 
 900 jlong os::javaTimeMillis() {
 901   if (UseFakeTimers) {
 902     return fake_time++;
 903   } else {
 904     FILETIME wt;
 905     GetSystemTimeAsFileTime(&wt);
 906     return windows_to_java_time(wt);
 907   }
 908 }
 909 
 910 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 911   FILETIME wt;
 912   GetSystemTimeAsFileTime(&wt);
 913   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 914   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 915   seconds = secs;
 916   nanos = jlong(ticks - (secs*10000000)) * 100;
 917 }
 918 
 919 jlong os::javaTimeNanos() {
 920     LARGE_INTEGER current_count;
 921     QueryPerformanceCounter(&current_count);
 922     double current = as_long(current_count);
 923     double freq = performance_frequency;
 924     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 925     return time;
 926 }
 927 
 928 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 929   jlong freq = performance_frequency;
 930   if (freq < NANOSECS_PER_SEC) {
 931     // the performance counter is 64 bits and we will
 932     // be multiplying it -- so no wrap in 64 bits
 933     info_ptr->max_value = ALL_64_BITS;
 934   } else if (freq > NANOSECS_PER_SEC) {
 935     // use the max value the counter can reach to
 936     // determine the max value which could be returned
 937     julong max_counter = (julong)ALL_64_BITS;
 938     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 939   } else {
 940     // the performance counter is 64 bits and we will
 941     // be using it directly -- so no wrap in 64 bits
 942     info_ptr->max_value = ALL_64_BITS;
 943   }
 944 
 945   // using a counter, so no skipping
 946   info_ptr->may_skip_backward = false;
 947   info_ptr->may_skip_forward = false;
 948 
 949   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 950 }
 951 
 952 char* os::local_time_string(char *buf, size_t buflen) {
 953   SYSTEMTIME st;
 954   GetLocalTime(&st);
 955   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 956                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 957   return buf;
 958 }
 959 
 960 bool os::getTimesSecs(double* process_real_time,
 961                       double* process_user_time,
 962                       double* process_system_time) {
 963   HANDLE h_process = GetCurrentProcess();
 964   FILETIME create_time, exit_time, kernel_time, user_time;
 965   BOOL result = GetProcessTimes(h_process,
 966                                 &create_time,
 967                                 &exit_time,
 968                                 &kernel_time,
 969                                 &user_time);
 970   if (result != 0) {
 971     FILETIME wt;
 972     GetSystemTimeAsFileTime(&wt);
 973     jlong rtc_millis = windows_to_java_time(wt);
 974     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 975     *process_user_time =
 976       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 977     *process_system_time =
 978       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
 979     return true;
 980   } else {
 981     return false;
 982   }
 983 }
 984 
 985 void os::shutdown() {
 986   // allow PerfMemory to attempt cleanup of any persistent resources
 987   perfMemory_exit();
 988 
 989   // flush buffered output, finish log files
 990   ostream_abort();
 991 
 992   // Check for abort hook
 993   abort_hook_t abort_hook = Arguments::abort_hook();
 994   if (abort_hook != NULL) {
 995     abort_hook();
 996   }
 997 }
 998 
 999 
1000 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1001                                          PMINIDUMP_EXCEPTION_INFORMATION,
1002                                          PMINIDUMP_USER_STREAM_INFORMATION,
1003                                          PMINIDUMP_CALLBACK_INFORMATION);
1004 
1005 static HANDLE dumpFile = NULL;
1006 
1007 // Check if dump file can be created.
1008 void os::check_dump_limit(char* buffer, size_t buffsz) {
1009   bool status = true;
1010   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1011     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1012     status = false;
1013   }
1014 
1015 #ifndef ASSERT
1016   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1017     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1018     status = false;
1019   }
1020 #endif
1021 
1022   if (status) {
1023     const char* cwd = get_current_directory(NULL, 0);
1024     int pid = current_process_id();
1025     if (cwd != NULL) {
1026       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1027     } else {
1028       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1029     }
1030 
1031     if (dumpFile == NULL &&
1032        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1033                  == INVALID_HANDLE_VALUE) {
1034       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1035       status = false;
1036     }
1037   }
1038   VMError::record_coredump_status(buffer, status);
1039 }
1040 
1041 void os::abort(bool dump_core, void* siginfo, const void* context) {
1042   HINSTANCE dbghelp;
1043   EXCEPTION_POINTERS ep;
1044   MINIDUMP_EXCEPTION_INFORMATION mei;
1045   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1046 
1047   HANDLE hProcess = GetCurrentProcess();
1048   DWORD processId = GetCurrentProcessId();
1049   MINIDUMP_TYPE dumpType;
1050 
1051   shutdown();
1052   if (!dump_core || dumpFile == NULL) {
1053     if (dumpFile != NULL) {
1054       CloseHandle(dumpFile);
1055     }
1056     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1057   }
1058 
1059   dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
1060 
1061   if (dbghelp == NULL) {
1062     jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
1063     CloseHandle(dumpFile);
1064     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1065   }
1066 
1067   _MiniDumpWriteDump =
1068       CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1069                                     PMINIDUMP_EXCEPTION_INFORMATION,
1070                                     PMINIDUMP_USER_STREAM_INFORMATION,
1071                                     PMINIDUMP_CALLBACK_INFORMATION),
1072                                     GetProcAddress(dbghelp,
1073                                     "MiniDumpWriteDump"));
1074 
1075   if (_MiniDumpWriteDump == NULL) {
1076     jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
1077     CloseHandle(dumpFile);
1078     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1079   }
1080 
1081   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1082     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1083 
1084   if (siginfo != NULL && context != NULL) {
1085     ep.ContextRecord = (PCONTEXT) context;
1086     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1087 
1088     mei.ThreadId = GetCurrentThreadId();
1089     mei.ExceptionPointers = &ep;
1090     pmei = &mei;
1091   } else {
1092     pmei = NULL;
1093   }
1094 
1095   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1096   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1097   if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1098       _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1099     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1100   }
1101   CloseHandle(dumpFile);
1102   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1103 }
1104 
1105 // Die immediately, no exit hook, no abort hook, no cleanup.
1106 void os::die() {
1107   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1108 }
1109 
1110 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1111 //  * dirent_md.c       1.15 00/02/02
1112 //
1113 // The declarations for DIR and struct dirent are in jvm_win32.h.
1114 
1115 // Caller must have already run dirname through JVM_NativePath, which removes
1116 // duplicate slashes and converts all instances of '/' into '\\'.
1117 
1118 DIR * os::opendir(const char *dirname) {
1119   assert(dirname != NULL, "just checking");   // hotspot change
1120   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1121   DWORD fattr;                                // hotspot change
1122   char alt_dirname[4] = { 0, 0, 0, 0 };
1123 
1124   if (dirp == 0) {
1125     errno = ENOMEM;
1126     return 0;
1127   }
1128 
1129   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1130   // as a directory in FindFirstFile().  We detect this case here and
1131   // prepend the current drive name.
1132   //
1133   if (dirname[1] == '\0' && dirname[0] == '\\') {
1134     alt_dirname[0] = _getdrive() + 'A' - 1;
1135     alt_dirname[1] = ':';
1136     alt_dirname[2] = '\\';
1137     alt_dirname[3] = '\0';
1138     dirname = alt_dirname;
1139   }
1140 
1141   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1142   if (dirp->path == 0) {
1143     free(dirp);
1144     errno = ENOMEM;
1145     return 0;
1146   }
1147   strcpy(dirp->path, dirname);
1148 
1149   fattr = GetFileAttributes(dirp->path);
1150   if (fattr == 0xffffffff) {
1151     free(dirp->path);
1152     free(dirp);
1153     errno = ENOENT;
1154     return 0;
1155   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1156     free(dirp->path);
1157     free(dirp);
1158     errno = ENOTDIR;
1159     return 0;
1160   }
1161 
1162   // Append "*.*", or possibly "\\*.*", to path
1163   if (dirp->path[1] == ':' &&
1164       (dirp->path[2] == '\0' ||
1165       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1166     // No '\\' needed for cases like "Z:" or "Z:\"
1167     strcat(dirp->path, "*.*");
1168   } else {
1169     strcat(dirp->path, "\\*.*");
1170   }
1171 
1172   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1173   if (dirp->handle == INVALID_HANDLE_VALUE) {
1174     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1175       free(dirp->path);
1176       free(dirp);
1177       errno = EACCES;
1178       return 0;
1179     }
1180   }
1181   return dirp;
1182 }
1183 
1184 // parameter dbuf unused on Windows
1185 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) {
1186   assert(dirp != NULL, "just checking");      // hotspot change
1187   if (dirp->handle == INVALID_HANDLE_VALUE) {
1188     return 0;
1189   }
1190 
1191   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1192 
1193   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1194     if (GetLastError() == ERROR_INVALID_HANDLE) {
1195       errno = EBADF;
1196       return 0;
1197     }
1198     FindClose(dirp->handle);
1199     dirp->handle = INVALID_HANDLE_VALUE;
1200   }
1201 
1202   return &dirp->dirent;
1203 }
1204 
1205 int os::closedir(DIR *dirp) {
1206   assert(dirp != NULL, "just checking");      // hotspot change
1207   if (dirp->handle != INVALID_HANDLE_VALUE) {
1208     if (!FindClose(dirp->handle)) {
1209       errno = EBADF;
1210       return -1;
1211     }
1212     dirp->handle = INVALID_HANDLE_VALUE;
1213   }
1214   free(dirp->path);
1215   free(dirp);
1216   return 0;
1217 }
1218 
1219 // This must be hard coded because it's the system's temporary
1220 // directory not the java application's temp directory, ala java.io.tmpdir.
1221 const char* os::get_temp_directory() {
1222   static char path_buf[MAX_PATH];
1223   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1224     return path_buf;
1225   } else {
1226     path_buf[0] = '\0';
1227     return path_buf;
1228   }
1229 }
1230 
1231 static bool file_exists(const char* filename) {
1232   if (filename == NULL || strlen(filename) == 0) {
1233     return false;
1234   }
1235   return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1236 }
1237 
1238 bool os::dll_build_name(char *buffer, size_t buflen,
1239                         const char* pname, const char* fname) {
1240   bool retval = false;
1241   const size_t pnamelen = pname ? strlen(pname) : 0;
1242   const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1243 
1244   // Return error on buffer overflow.
1245   if (pnamelen + strlen(fname) + 10 > buflen) {
1246     return retval;
1247   }
1248 
1249   if (pnamelen == 0) {
1250     jio_snprintf(buffer, buflen, "%s.dll", fname);
1251     retval = true;
1252   } else if (c == ':' || c == '\\') {
1253     jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1254     retval = true;
1255   } else if (strchr(pname, *os::path_separator()) != NULL) {
1256     int n;
1257     char** pelements = split_path(pname, &n);
1258     if (pelements == NULL) {
1259       return false;
1260     }
1261     for (int i = 0; i < n; i++) {
1262       char* path = pelements[i];
1263       // Really shouldn't be NULL, but check can't hurt
1264       size_t plen = (path == NULL) ? 0 : strlen(path);
1265       if (plen == 0) {
1266         continue; // skip the empty path values
1267       }
1268       const char lastchar = path[plen - 1];
1269       if (lastchar == ':' || lastchar == '\\') {
1270         jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1271       } else {
1272         jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1273       }
1274       if (file_exists(buffer)) {
1275         retval = true;
1276         break;
1277       }
1278     }
1279     // release the storage
1280     for (int i = 0; i < n; i++) {
1281       if (pelements[i] != NULL) {
1282         FREE_C_HEAP_ARRAY(char, pelements[i]);
1283       }
1284     }
1285     if (pelements != NULL) {
1286       FREE_C_HEAP_ARRAY(char*, pelements);
1287     }
1288   } else {
1289     jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1290     retval = true;
1291   }
1292   return retval;
1293 }
1294 
1295 // Needs to be in os specific directory because windows requires another
1296 // header file <direct.h>
1297 const char* os::get_current_directory(char *buf, size_t buflen) {
1298   int n = static_cast<int>(buflen);
1299   if (buflen > INT_MAX)  n = INT_MAX;
1300   return _getcwd(buf, n);
1301 }
1302 
1303 //-----------------------------------------------------------
1304 // Helper functions for fatal error handler
1305 #ifdef _WIN64
1306 // Helper routine which returns true if address in
1307 // within the NTDLL address space.
1308 //
1309 static bool _addr_in_ntdll(address addr) {
1310   HMODULE hmod;
1311   MODULEINFO minfo;
1312 
1313   hmod = GetModuleHandle("NTDLL.DLL");
1314   if (hmod == NULL) return false;
1315   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1316                                           &minfo, sizeof(MODULEINFO))) {
1317     return false;
1318   }
1319 
1320   if ((addr >= minfo.lpBaseOfDll) &&
1321       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1322     return true;
1323   } else {
1324     return false;
1325   }
1326 }
1327 #endif
1328 
1329 struct _modinfo {
1330   address addr;
1331   char*   full_path;   // point to a char buffer
1332   int     buflen;      // size of the buffer
1333   address base_addr;
1334 };
1335 
1336 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1337                                   address top_address, void * param) {
1338   struct _modinfo *pmod = (struct _modinfo *)param;
1339   if (!pmod) return -1;
1340 
1341   if (base_addr   <= pmod->addr &&
1342       top_address > pmod->addr) {
1343     // if a buffer is provided, copy path name to the buffer
1344     if (pmod->full_path) {
1345       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1346     }
1347     pmod->base_addr = base_addr;
1348     return 1;
1349   }
1350   return 0;
1351 }
1352 
1353 bool os::dll_address_to_library_name(address addr, char* buf,
1354                                      int buflen, int* offset) {
1355   // buf is not optional, but offset is optional
1356   assert(buf != NULL, "sanity check");
1357 
1358 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1359 //       return the full path to the DLL file, sometimes it returns path
1360 //       to the corresponding PDB file (debug info); sometimes it only
1361 //       returns partial path, which makes life painful.
1362 
1363   struct _modinfo mi;
1364   mi.addr      = addr;
1365   mi.full_path = buf;
1366   mi.buflen    = buflen;
1367   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1368     // buf already contains path name
1369     if (offset) *offset = addr - mi.base_addr;
1370     return true;
1371   }
1372 
1373   buf[0] = '\0';
1374   if (offset) *offset = -1;
1375   return false;
1376 }
1377 
1378 bool os::dll_address_to_function_name(address addr, char *buf,
1379                                       int buflen, int *offset,
1380                                       bool demangle) {
1381   // buf is not optional, but offset is optional
1382   assert(buf != NULL, "sanity check");
1383 
1384   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1385     return true;
1386   }
1387   if (offset != NULL)  *offset  = -1;
1388   buf[0] = '\0';
1389   return false;
1390 }
1391 
1392 // save the start and end address of jvm.dll into param[0] and param[1]
1393 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1394                            address top_address, void * param) {
1395   if (!param) return -1;
1396 
1397   if (base_addr   <= (address)_locate_jvm_dll &&
1398       top_address > (address)_locate_jvm_dll) {
1399     ((address*)param)[0] = base_addr;
1400     ((address*)param)[1] = top_address;
1401     return 1;
1402   }
1403   return 0;
1404 }
1405 
1406 address vm_lib_location[2];    // start and end address of jvm.dll
1407 
1408 // check if addr is inside jvm.dll
1409 bool os::address_is_in_vm(address addr) {
1410   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1411     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1412       assert(false, "Can't find jvm module.");
1413       return false;
1414     }
1415   }
1416 
1417   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1418 }
1419 
1420 // print module info; param is outputStream*
1421 static int _print_module(const char* fname, address base_address,
1422                          address top_address, void* param) {
1423   if (!param) return -1;
1424 
1425   outputStream* st = (outputStream*)param;
1426 
1427   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1428   return 0;
1429 }
1430 
1431 // Loads .dll/.so and
1432 // in case of error it checks if .dll/.so was built for the
1433 // same architecture as Hotspot is running on
1434 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1435   void * result = LoadLibrary(name);
1436   if (result != NULL) {
1437     return result;
1438   }
1439 
1440   DWORD errcode = GetLastError();
1441   if (errcode == ERROR_MOD_NOT_FOUND) {
1442     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1443     ebuf[ebuflen - 1] = '\0';
1444     return NULL;
1445   }
1446 
1447   // Parsing dll below
1448   // If we can read dll-info and find that dll was built
1449   // for an architecture other than Hotspot is running in
1450   // - then print to buffer "DLL was built for a different architecture"
1451   // else call os::lasterror to obtain system error message
1452 
1453   // Read system error message into ebuf
1454   // It may or may not be overwritten below (in the for loop and just above)
1455   lasterror(ebuf, (size_t) ebuflen);
1456   ebuf[ebuflen - 1] = '\0';
1457   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1458   if (fd < 0) {
1459     return NULL;
1460   }
1461 
1462   uint32_t signature_offset;
1463   uint16_t lib_arch = 0;
1464   bool failed_to_get_lib_arch =
1465     ( // Go to position 3c in the dll
1466      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1467      ||
1468      // Read location of signature
1469      (sizeof(signature_offset) !=
1470      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1471      ||
1472      // Go to COFF File Header in dll
1473      // that is located after "signature" (4 bytes long)
1474      (os::seek_to_file_offset(fd,
1475      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1476      ||
1477      // Read field that contains code of architecture
1478      // that dll was built for
1479      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1480     );
1481 
1482   ::close(fd);
1483   if (failed_to_get_lib_arch) {
1484     // file i/o error - report os::lasterror(...) msg
1485     return NULL;
1486   }
1487 
1488   typedef struct {
1489     uint16_t arch_code;
1490     char* arch_name;
1491   } arch_t;
1492 
1493   static const arch_t arch_array[] = {
1494     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1495     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1496     {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
1497   };
1498 #if   (defined _M_IA64)
1499   static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64;
1500 #elif (defined _M_AMD64)
1501   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1502 #elif (defined _M_IX86)
1503   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1504 #else
1505   #error Method os::dll_load requires that one of following \
1506          is defined :_M_IA64,_M_AMD64 or _M_IX86
1507 #endif
1508 
1509 
1510   // Obtain a string for printf operation
1511   // lib_arch_str shall contain string what platform this .dll was built for
1512   // running_arch_str shall string contain what platform Hotspot was built for
1513   char *running_arch_str = NULL, *lib_arch_str = NULL;
1514   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1515     if (lib_arch == arch_array[i].arch_code) {
1516       lib_arch_str = arch_array[i].arch_name;
1517     }
1518     if (running_arch == arch_array[i].arch_code) {
1519       running_arch_str = arch_array[i].arch_name;
1520     }
1521   }
1522 
1523   assert(running_arch_str,
1524          "Didn't find running architecture code in arch_array");
1525 
1526   // If the architecture is right
1527   // but some other error took place - report os::lasterror(...) msg
1528   if (lib_arch == running_arch) {
1529     return NULL;
1530   }
1531 
1532   if (lib_arch_str != NULL) {
1533     ::_snprintf(ebuf, ebuflen - 1,
1534                 "Can't load %s-bit .dll on a %s-bit platform",
1535                 lib_arch_str, running_arch_str);
1536   } else {
1537     // don't know what architecture this dll was build for
1538     ::_snprintf(ebuf, ebuflen - 1,
1539                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1540                 lib_arch, running_arch_str);
1541   }
1542 
1543   return NULL;
1544 }
1545 
1546 void os::print_dll_info(outputStream *st) {
1547   st->print_cr("Dynamic libraries:");
1548   get_loaded_modules_info(_print_module, (void *)st);
1549 }
1550 
1551 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1552   HANDLE   hProcess;
1553 
1554 # define MAX_NUM_MODULES 128
1555   HMODULE     modules[MAX_NUM_MODULES];
1556   static char filename[MAX_PATH];
1557   int         result = 0;
1558 
1559   int pid = os::current_process_id();
1560   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1561                          FALSE, pid);
1562   if (hProcess == NULL) return 0;
1563 
1564   DWORD size_needed;
1565   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1566     CloseHandle(hProcess);
1567     return 0;
1568   }
1569 
1570   // number of modules that are currently loaded
1571   int num_modules = size_needed / sizeof(HMODULE);
1572 
1573   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1574     // Get Full pathname:
1575     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1576       filename[0] = '\0';
1577     }
1578 
1579     MODULEINFO modinfo;
1580     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1581       modinfo.lpBaseOfDll = NULL;
1582       modinfo.SizeOfImage = 0;
1583     }
1584 
1585     // Invoke callback function
1586     result = callback(filename, (address)modinfo.lpBaseOfDll,
1587                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1588     if (result) break;
1589   }
1590 
1591   CloseHandle(hProcess);
1592   return result;
1593 }
1594 
1595 bool os::get_host_name(char* buf, size_t buflen) {
1596   DWORD size = (DWORD)buflen;
1597   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1598 }
1599 
1600 void os::get_summary_os_info(char* buf, size_t buflen) {
1601   stringStream sst(buf, buflen);
1602   os::win32::print_windows_version(&sst);
1603   // chop off newline character
1604   char* nl = strchr(buf, '\n');
1605   if (nl != NULL) *nl = '\0';
1606 }
1607 
1608 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1609   int ret = vsnprintf(buf, len, fmt, args);
1610   // Get the correct buffer size if buf is too small
1611   if (ret < 0) {
1612     return _vscprintf(fmt, args);
1613   }
1614   return ret;
1615 }
1616 
1617 static inline time_t get_mtime(const char* filename) {
1618   struct stat st;
1619   int ret = os::stat(filename, &st);
1620   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
1621   return st.st_mtime;
1622 }
1623 
1624 int os::compare_file_modified_times(const char* file1, const char* file2) {
1625   time_t t1 = get_mtime(file1);
1626   time_t t2 = get_mtime(file2);
1627   return t1 - t2;
1628 }
1629 
1630 void os::print_os_info_brief(outputStream* st) {
1631   os::print_os_info(st);
1632 }
1633 
1634 void os::print_os_info(outputStream* st) {
1635 #ifdef ASSERT
1636   char buffer[1024];
1637   st->print("HostName: ");
1638   if (get_host_name(buffer, sizeof(buffer))) {
1639     st->print("%s ", buffer);
1640   } else {
1641     st->print("N/A ");
1642   }
1643 #endif
1644   st->print("OS:");
1645   os::win32::print_windows_version(st);
1646 }
1647 
1648 void os::win32::print_windows_version(outputStream* st) {
1649   OSVERSIONINFOEX osvi;
1650   VS_FIXEDFILEINFO *file_info;
1651   TCHAR kernel32_path[MAX_PATH];
1652   UINT len, ret;
1653 
1654   // Use the GetVersionEx information to see if we're on a server or
1655   // workstation edition of Windows. Starting with Windows 8.1 we can't
1656   // trust the OS version information returned by this API.
1657   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1658   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1659   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1660     st->print_cr("Call to GetVersionEx failed");
1661     return;
1662   }
1663   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1664 
1665   // Get the full path to \Windows\System32\kernel32.dll and use that for
1666   // determining what version of Windows we're running on.
1667   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1668   ret = GetSystemDirectory(kernel32_path, len);
1669   if (ret == 0 || ret > len) {
1670     st->print_cr("Call to GetSystemDirectory failed");
1671     return;
1672   }
1673   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1674 
1675   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1676   if (version_size == 0) {
1677     st->print_cr("Call to GetFileVersionInfoSize failed");
1678     return;
1679   }
1680 
1681   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1682   if (version_info == NULL) {
1683     st->print_cr("Failed to allocate version_info");
1684     return;
1685   }
1686 
1687   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1688     os::free(version_info);
1689     st->print_cr("Call to GetFileVersionInfo failed");
1690     return;
1691   }
1692 
1693   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1694     os::free(version_info);
1695     st->print_cr("Call to VerQueryValue failed");
1696     return;
1697   }
1698 
1699   int major_version = HIWORD(file_info->dwProductVersionMS);
1700   int minor_version = LOWORD(file_info->dwProductVersionMS);
1701   int build_number = HIWORD(file_info->dwProductVersionLS);
1702   int build_minor = LOWORD(file_info->dwProductVersionLS);
1703   int os_vers = major_version * 1000 + minor_version;
1704   os::free(version_info);
1705 
1706   st->print(" Windows ");
1707   switch (os_vers) {
1708 
1709   case 6000:
1710     if (is_workstation) {
1711       st->print("Vista");
1712     } else {
1713       st->print("Server 2008");
1714     }
1715     break;
1716 
1717   case 6001:
1718     if (is_workstation) {
1719       st->print("7");
1720     } else {
1721       st->print("Server 2008 R2");
1722     }
1723     break;
1724 
1725   case 6002:
1726     if (is_workstation) {
1727       st->print("8");
1728     } else {
1729       st->print("Server 2012");
1730     }
1731     break;
1732 
1733   case 6003:
1734     if (is_workstation) {
1735       st->print("8.1");
1736     } else {
1737       st->print("Server 2012 R2");
1738     }
1739     break;
1740 
1741   case 10000:
1742     if (is_workstation) {
1743       st->print("10");
1744     } else {
1745       st->print("Server 2016");
1746     }
1747     break;
1748 
1749   default:
1750     // Unrecognized windows, print out its major and minor versions
1751     st->print("%d.%d", major_version, minor_version);
1752     break;
1753   }
1754 
1755   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1756   // find out whether we are running on 64 bit processor or not
1757   SYSTEM_INFO si;
1758   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1759   GetNativeSystemInfo(&si);
1760   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1761     st->print(" , 64 bit");
1762   }
1763 
1764   st->print(" Build %d", build_number);
1765   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1766   st->cr();
1767 }
1768 
1769 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1770   // Nothing to do for now.
1771 }
1772 
1773 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1774   HKEY key;
1775   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1776                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1777   if (status == ERROR_SUCCESS) {
1778     DWORD size = (DWORD)buflen;
1779     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1780     if (status != ERROR_SUCCESS) {
1781         strncpy(buf, "## __CPU__", buflen);
1782     }
1783     RegCloseKey(key);
1784   } else {
1785     // Put generic cpu info to return
1786     strncpy(buf, "## __CPU__", buflen);
1787   }
1788 }
1789 
1790 void os::print_memory_info(outputStream* st) {
1791   st->print("Memory:");
1792   st->print(" %dk page", os::vm_page_size()>>10);
1793 
1794   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1795   // value if total memory is larger than 4GB
1796   MEMORYSTATUSEX ms;
1797   ms.dwLength = sizeof(ms);
1798   GlobalMemoryStatusEx(&ms);
1799 
1800   st->print(", physical %uk", os::physical_memory() >> 10);
1801   st->print("(%uk free)", os::available_memory() >> 10);
1802 
1803   st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1804   st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1805   st->cr();
1806 }
1807 
1808 void os::print_siginfo(outputStream *st, const void* siginfo) {
1809   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1810   st->print("siginfo:");
1811 
1812   char tmp[64];
1813   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1814     strcpy(tmp, "EXCEPTION_??");
1815   }
1816   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1817 
1818   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1819        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1820        er->NumberParameters >= 2) {
1821     switch (er->ExceptionInformation[0]) {
1822     case 0: st->print(", reading address"); break;
1823     case 1: st->print(", writing address"); break;
1824     case 8: st->print(", data execution prevention violation at address"); break;
1825     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1826                        er->ExceptionInformation[0]);
1827     }
1828     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1829   } else {
1830     int num = er->NumberParameters;
1831     if (num > 0) {
1832       st->print(", ExceptionInformation=");
1833       for (int i = 0; i < num; i++) {
1834         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1835       }
1836     }
1837   }
1838   st->cr();
1839 }
1840 
1841 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1842   // do nothing
1843 }
1844 
1845 static char saved_jvm_path[MAX_PATH] = {0};
1846 
1847 // Find the full path to the current module, jvm.dll
1848 void os::jvm_path(char *buf, jint buflen) {
1849   // Error checking.
1850   if (buflen < MAX_PATH) {
1851     assert(false, "must use a large-enough buffer");
1852     buf[0] = '\0';
1853     return;
1854   }
1855   // Lazy resolve the path to current module.
1856   if (saved_jvm_path[0] != 0) {
1857     strcpy(buf, saved_jvm_path);
1858     return;
1859   }
1860 
1861   buf[0] = '\0';
1862   if (Arguments::sun_java_launcher_is_altjvm()) {
1863     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1864     // for a JAVA_HOME environment variable and fix up the path so it
1865     // looks like jvm.dll is installed there (append a fake suffix
1866     // hotspot/jvm.dll).
1867     char* java_home_var = ::getenv("JAVA_HOME");
1868     if (java_home_var != NULL && java_home_var[0] != 0 &&
1869         strlen(java_home_var) < (size_t)buflen) {
1870       strncpy(buf, java_home_var, buflen);
1871 
1872       // determine if this is a legacy image or modules image
1873       // modules image doesn't have "jre" subdirectory
1874       size_t len = strlen(buf);
1875       char* jrebin_p = buf + len;
1876       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1877       if (0 != _access(buf, 0)) {
1878         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1879       }
1880       len = strlen(buf);
1881       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1882     }
1883   }
1884 
1885   if (buf[0] == '\0') {
1886     GetModuleFileName(vm_lib_handle, buf, buflen);
1887   }
1888   strncpy(saved_jvm_path, buf, MAX_PATH);
1889   saved_jvm_path[MAX_PATH - 1] = '\0';
1890 }
1891 
1892 
1893 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1894 #ifndef _WIN64
1895   st->print("_");
1896 #endif
1897 }
1898 
1899 
1900 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1901 #ifndef _WIN64
1902   st->print("@%d", args_size  * sizeof(int));
1903 #endif
1904 }
1905 
1906 // This method is a copy of JDK's sysGetLastErrorString
1907 // from src/windows/hpi/src/system_md.c
1908 
1909 size_t os::lasterror(char* buf, size_t len) {
1910   DWORD errval;
1911 
1912   if ((errval = GetLastError()) != 0) {
1913     // DOS error
1914     size_t n = (size_t)FormatMessage(
1915                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1916                                      NULL,
1917                                      errval,
1918                                      0,
1919                                      buf,
1920                                      (DWORD)len,
1921                                      NULL);
1922     if (n > 3) {
1923       // Drop final '.', CR, LF
1924       if (buf[n - 1] == '\n') n--;
1925       if (buf[n - 1] == '\r') n--;
1926       if (buf[n - 1] == '.') n--;
1927       buf[n] = '\0';
1928     }
1929     return n;
1930   }
1931 
1932   if (errno != 0) {
1933     // C runtime error that has no corresponding DOS error code
1934     const char* s = os::strerror(errno);
1935     size_t n = strlen(s);
1936     if (n >= len) n = len - 1;
1937     strncpy(buf, s, n);
1938     buf[n] = '\0';
1939     return n;
1940   }
1941 
1942   return 0;
1943 }
1944 
1945 int os::get_last_error() {
1946   DWORD error = GetLastError();
1947   if (error == 0) {
1948     error = errno;
1949   }
1950   return (int)error;
1951 }
1952 
1953 WindowsSemaphore::WindowsSemaphore(uint value) {
1954   _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
1955 
1956   guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError());
1957 }
1958 
1959 WindowsSemaphore::~WindowsSemaphore() {
1960   ::CloseHandle(_semaphore);
1961 }
1962 
1963 void WindowsSemaphore::signal(uint count) {
1964   if (count > 0) {
1965     BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
1966 
1967     assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError());
1968   }
1969 }
1970 
1971 void WindowsSemaphore::wait() {
1972   DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
1973   assert(ret != WAIT_FAILED,   "WaitForSingleObject failed with error code: %lu", GetLastError());
1974   assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret);
1975 }
1976 
1977 // sun.misc.Signal
1978 // NOTE that this is a workaround for an apparent kernel bug where if
1979 // a signal handler for SIGBREAK is installed then that signal handler
1980 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1981 // See bug 4416763.
1982 static void (*sigbreakHandler)(int) = NULL;
1983 
1984 static void UserHandler(int sig, void *siginfo, void *context) {
1985   os::signal_notify(sig);
1986   // We need to reinstate the signal handler each time...
1987   os::signal(sig, (void*)UserHandler);
1988 }
1989 
1990 void* os::user_handler() {
1991   return (void*) UserHandler;
1992 }
1993 
1994 void* os::signal(int signal_number, void* handler) {
1995   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1996     void (*oldHandler)(int) = sigbreakHandler;
1997     sigbreakHandler = (void (*)(int)) handler;
1998     return (void*) oldHandler;
1999   } else {
2000     return (void*)::signal(signal_number, (void (*)(int))handler);
2001   }
2002 }
2003 
2004 void os::signal_raise(int signal_number) {
2005   raise(signal_number);
2006 }
2007 
2008 // The Win32 C runtime library maps all console control events other than ^C
2009 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2010 // logoff, and shutdown events.  We therefore install our own console handler
2011 // that raises SIGTERM for the latter cases.
2012 //
2013 static BOOL WINAPI consoleHandler(DWORD event) {
2014   switch (event) {
2015   case CTRL_C_EVENT:
2016     if (is_error_reported()) {
2017       // Ctrl-C is pressed during error reporting, likely because the error
2018       // handler fails to abort. Let VM die immediately.
2019       os::die();
2020     }
2021 
2022     os::signal_raise(SIGINT);
2023     return TRUE;
2024     break;
2025   case CTRL_BREAK_EVENT:
2026     if (sigbreakHandler != NULL) {
2027       (*sigbreakHandler)(SIGBREAK);
2028     }
2029     return TRUE;
2030     break;
2031   case CTRL_LOGOFF_EVENT: {
2032     // Don't terminate JVM if it is running in a non-interactive session,
2033     // such as a service process.
2034     USEROBJECTFLAGS flags;
2035     HANDLE handle = GetProcessWindowStation();
2036     if (handle != NULL &&
2037         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2038         sizeof(USEROBJECTFLAGS), NULL)) {
2039       // If it is a non-interactive session, let next handler to deal
2040       // with it.
2041       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2042         return FALSE;
2043       }
2044     }
2045   }
2046   case CTRL_CLOSE_EVENT:
2047   case CTRL_SHUTDOWN_EVENT:
2048     os::signal_raise(SIGTERM);
2049     return TRUE;
2050     break;
2051   default:
2052     break;
2053   }
2054   return FALSE;
2055 }
2056 
2057 // The following code is moved from os.cpp for making this
2058 // code platform specific, which it is by its very nature.
2059 
2060 // Return maximum OS signal used + 1 for internal use only
2061 // Used as exit signal for signal_thread
2062 int os::sigexitnum_pd() {
2063   return NSIG;
2064 }
2065 
2066 // a counter for each possible signal value, including signal_thread exit signal
2067 static volatile jint pending_signals[NSIG+1] = { 0 };
2068 static HANDLE sig_sem = NULL;
2069 
2070 void os::signal_init_pd() {
2071   // Initialize signal structures
2072   memset((void*)pending_signals, 0, sizeof(pending_signals));
2073 
2074   sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2075 
2076   // Programs embedding the VM do not want it to attempt to receive
2077   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2078   // shutdown hooks mechanism introduced in 1.3.  For example, when
2079   // the VM is run as part of a Windows NT service (i.e., a servlet
2080   // engine in a web server), the correct behavior is for any console
2081   // control handler to return FALSE, not TRUE, because the OS's
2082   // "final" handler for such events allows the process to continue if
2083   // it is a service (while terminating it if it is not a service).
2084   // To make this behavior uniform and the mechanism simpler, we
2085   // completely disable the VM's usage of these console events if -Xrs
2086   // (=ReduceSignalUsage) is specified.  This means, for example, that
2087   // the CTRL-BREAK thread dump mechanism is also disabled in this
2088   // case.  See bugs 4323062, 4345157, and related bugs.
2089 
2090   if (!ReduceSignalUsage) {
2091     // Add a CTRL-C handler
2092     SetConsoleCtrlHandler(consoleHandler, TRUE);
2093   }
2094 }
2095 
2096 void os::signal_notify(int signal_number) {
2097   BOOL ret;
2098   if (sig_sem != NULL) {
2099     Atomic::inc(&pending_signals[signal_number]);
2100     ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2101     assert(ret != 0, "ReleaseSemaphore() failed");
2102   }
2103 }
2104 
2105 static int check_pending_signals(bool wait_for_signal) {
2106   DWORD ret;
2107   while (true) {
2108     for (int i = 0; i < NSIG + 1; i++) {
2109       jint n = pending_signals[i];
2110       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2111         return i;
2112       }
2113     }
2114     if (!wait_for_signal) {
2115       return -1;
2116     }
2117 
2118     JavaThread *thread = JavaThread::current();
2119 
2120     ThreadBlockInVM tbivm(thread);
2121 
2122     bool threadIsSuspended;
2123     do {
2124       thread->set_suspend_equivalent();
2125       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2126       ret = ::WaitForSingleObject(sig_sem, INFINITE);
2127       assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2128 
2129       // were we externally suspended while we were waiting?
2130       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2131       if (threadIsSuspended) {
2132         // The semaphore has been incremented, but while we were waiting
2133         // another thread suspended us. We don't want to continue running
2134         // while suspended because that would surprise the thread that
2135         // suspended us.
2136         ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2137         assert(ret != 0, "ReleaseSemaphore() failed");
2138 
2139         thread->java_suspend_self();
2140       }
2141     } while (threadIsSuspended);
2142   }
2143 }
2144 
2145 int os::signal_lookup() {
2146   return check_pending_signals(false);
2147 }
2148 
2149 int os::signal_wait() {
2150   return check_pending_signals(true);
2151 }
2152 
2153 // Implicit OS exception handling
2154 
2155 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2156                       address handler) {
2157     JavaThread* thread = (JavaThread*) Thread::current_or_null();
2158   // Save pc in thread
2159 #ifdef _M_IA64
2160   // Do not blow up if no thread info available.
2161   if (thread) {
2162     // Saving PRECISE pc (with slot information) in thread.
2163     uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2164     // Convert precise PC into "Unix" format
2165     precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2166     thread->set_saved_exception_pc((address)precise_pc);
2167   }
2168   // Set pc to handler
2169   exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2170   // Clear out psr.ri (= Restart Instruction) in order to continue
2171   // at the beginning of the target bundle.
2172   exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2173   assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2174 #else
2175   #ifdef _M_AMD64
2176   // Do not blow up if no thread info available.
2177   if (thread) {
2178     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2179   }
2180   // Set pc to handler
2181   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2182   #else
2183   // Do not blow up if no thread info available.
2184   if (thread) {
2185     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2186   }
2187   // Set pc to handler
2188   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2189   #endif
2190 #endif
2191 
2192   // Continue the execution
2193   return EXCEPTION_CONTINUE_EXECUTION;
2194 }
2195 
2196 
2197 // Used for PostMortemDump
2198 extern "C" void safepoints();
2199 extern "C" void find(int x);
2200 extern "C" void events();
2201 
2202 // According to Windows API documentation, an illegal instruction sequence should generate
2203 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2204 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2205 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2206 
2207 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2208 
2209 // From "Execution Protection in the Windows Operating System" draft 0.35
2210 // Once a system header becomes available, the "real" define should be
2211 // included or copied here.
2212 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2213 
2214 // Handle NAT Bit consumption on IA64.
2215 #ifdef _M_IA64
2216   #define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
2217 #endif
2218 
2219 // Windows Vista/2008 heap corruption check
2220 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2221 
2222 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2223 // C++ compiler contain this error code. Because this is a compiler-generated
2224 // error, the code is not listed in the Win32 API header files.
2225 // The code is actually a cryptic mnemonic device, with the initial "E"
2226 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2227 // ASCII values of "msc".
2228 
2229 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2230 
2231 #define def_excpt(val) { #val, (val) }
2232 
2233 static const struct { char* name; uint number; } exceptlabels[] = {
2234     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2235     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2236     def_excpt(EXCEPTION_BREAKPOINT),
2237     def_excpt(EXCEPTION_SINGLE_STEP),
2238     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2239     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2240     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2241     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2242     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2243     def_excpt(EXCEPTION_FLT_OVERFLOW),
2244     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2245     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2246     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2247     def_excpt(EXCEPTION_INT_OVERFLOW),
2248     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2249     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2250     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2251     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2252     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2253     def_excpt(EXCEPTION_STACK_OVERFLOW),
2254     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2255     def_excpt(EXCEPTION_GUARD_PAGE),
2256     def_excpt(EXCEPTION_INVALID_HANDLE),
2257     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2258     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2259 #ifdef _M_IA64
2260     , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION)
2261 #endif
2262 };
2263 
2264 #undef def_excpt
2265 
2266 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2267   uint code = static_cast<uint>(exception_code);
2268   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2269     if (exceptlabels[i].number == code) {
2270       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2271       return buf;
2272     }
2273   }
2274 
2275   return NULL;
2276 }
2277 
2278 //-----------------------------------------------------------------------------
2279 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2280   // handle exception caused by idiv; should only happen for -MinInt/-1
2281   // (division by zero is handled explicitly)
2282 #ifdef _M_IA64
2283   assert(0, "Fix Handle_IDiv_Exception");
2284 #else
2285   #ifdef  _M_AMD64
2286   PCONTEXT ctx = exceptionInfo->ContextRecord;
2287   address pc = (address)ctx->Rip;
2288   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2289   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2290   if (pc[0] == 0xF7) {
2291     // set correct result values and continue after idiv instruction
2292     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2293   } else {
2294     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2295   }
2296   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2297   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2298   // idiv opcode (0xF7).
2299   ctx->Rdx = (DWORD)0;             // remainder
2300   // Continue the execution
2301   #else
2302   PCONTEXT ctx = exceptionInfo->ContextRecord;
2303   address pc = (address)ctx->Eip;
2304   assert(pc[0] == 0xF7, "not an idiv opcode");
2305   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2306   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2307   // set correct result values and continue after idiv instruction
2308   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2309   ctx->Eax = (DWORD)min_jint;      // result
2310   ctx->Edx = (DWORD)0;             // remainder
2311   // Continue the execution
2312   #endif
2313 #endif
2314   return EXCEPTION_CONTINUE_EXECUTION;
2315 }
2316 
2317 //-----------------------------------------------------------------------------
2318 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2319   PCONTEXT ctx = exceptionInfo->ContextRecord;
2320 #ifndef  _WIN64
2321   // handle exception caused by native method modifying control word
2322   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2323 
2324   switch (exception_code) {
2325   case EXCEPTION_FLT_DENORMAL_OPERAND:
2326   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2327   case EXCEPTION_FLT_INEXACT_RESULT:
2328   case EXCEPTION_FLT_INVALID_OPERATION:
2329   case EXCEPTION_FLT_OVERFLOW:
2330   case EXCEPTION_FLT_STACK_CHECK:
2331   case EXCEPTION_FLT_UNDERFLOW:
2332     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2333     if (fp_control_word != ctx->FloatSave.ControlWord) {
2334       // Restore FPCW and mask out FLT exceptions
2335       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2336       // Mask out pending FLT exceptions
2337       ctx->FloatSave.StatusWord &=  0xffffff00;
2338       return EXCEPTION_CONTINUE_EXECUTION;
2339     }
2340   }
2341 
2342   if (prev_uef_handler != NULL) {
2343     // We didn't handle this exception so pass it to the previous
2344     // UnhandledExceptionFilter.
2345     return (prev_uef_handler)(exceptionInfo);
2346   }
2347 #else // !_WIN64
2348   // On Windows, the mxcsr control bits are non-volatile across calls
2349   // See also CR 6192333
2350   //
2351   jint MxCsr = INITIAL_MXCSR;
2352   // we can't use StubRoutines::addr_mxcsr_std()
2353   // because in Win64 mxcsr is not saved there
2354   if (MxCsr != ctx->MxCsr) {
2355     ctx->MxCsr = MxCsr;
2356     return EXCEPTION_CONTINUE_EXECUTION;
2357   }
2358 #endif // !_WIN64
2359 
2360   return EXCEPTION_CONTINUE_SEARCH;
2361 }
2362 
2363 static inline void report_error(Thread* t, DWORD exception_code,
2364                                 address addr, void* siginfo, void* context) {
2365   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2366 
2367   // If UseOsErrorReporting, this will return here and save the error file
2368   // somewhere where we can find it in the minidump.
2369 }
2370 
2371 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2372         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2373   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2374   address addr = (address) exceptionRecord->ExceptionInformation[1];
2375   if (Interpreter::contains(pc)) {
2376     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2377     if (!fr->is_first_java_frame()) {
2378       // get_frame_at_stack_banging_point() is only called when we
2379       // have well defined stacks so java_sender() calls do not need
2380       // to assert safe_for_sender() first.
2381       *fr = fr->java_sender();
2382     }
2383   } else {
2384     // more complex code with compiled code
2385     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2386     CodeBlob* cb = CodeCache::find_blob(pc);
2387     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2388       // Not sure where the pc points to, fallback to default
2389       // stack overflow handling
2390       return false;
2391     } else {
2392       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2393       // in compiled code, the stack banging is performed just after the return pc
2394       // has been pushed on the stack
2395       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2396       if (!fr->is_java_frame()) {
2397         // See java_sender() comment above.
2398         *fr = fr->java_sender();
2399       }
2400     }
2401   }
2402   assert(fr->is_java_frame(), "Safety check");
2403   return true;
2404 }
2405 
2406 //-----------------------------------------------------------------------------
2407 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2408   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2409   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2410 #ifdef _M_IA64
2411   // On Itanium, we need the "precise pc", which has the slot number coded
2412   // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2413   address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2414   // Convert the pc to "Unix format", which has the slot number coded
2415   // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2416   // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2417   // information is saved in the Unix format.
2418   address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2419 #else
2420   #ifdef _M_AMD64
2421   address pc = (address) exceptionInfo->ContextRecord->Rip;
2422   #else
2423   address pc = (address) exceptionInfo->ContextRecord->Eip;
2424   #endif
2425 #endif
2426   Thread* t = Thread::current_or_null_safe();
2427 
2428   // Handle SafeFetch32 and SafeFetchN exceptions.
2429   if (StubRoutines::is_safefetch_fault(pc)) {
2430     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2431   }
2432 
2433 #ifndef _WIN64
2434   // Execution protection violation - win32 running on AMD64 only
2435   // Handled first to avoid misdiagnosis as a "normal" access violation;
2436   // This is safe to do because we have a new/unique ExceptionInformation
2437   // code for this condition.
2438   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2439     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2440     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2441     address addr = (address) exceptionRecord->ExceptionInformation[1];
2442 
2443     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2444       int page_size = os::vm_page_size();
2445 
2446       // Make sure the pc and the faulting address are sane.
2447       //
2448       // If an instruction spans a page boundary, and the page containing
2449       // the beginning of the instruction is executable but the following
2450       // page is not, the pc and the faulting address might be slightly
2451       // different - we still want to unguard the 2nd page in this case.
2452       //
2453       // 15 bytes seems to be a (very) safe value for max instruction size.
2454       bool pc_is_near_addr =
2455         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2456       bool instr_spans_page_boundary =
2457         (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2458                          (intptr_t) page_size) > 0);
2459 
2460       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2461         static volatile address last_addr =
2462           (address) os::non_memory_address_word();
2463 
2464         // In conservative mode, don't unguard unless the address is in the VM
2465         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2466             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2467 
2468           // Set memory to RWX and retry
2469           address page_start =
2470             (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2471           bool res = os::protect_memory((char*) page_start, page_size,
2472                                         os::MEM_PROT_RWX);
2473 
2474           log_debug(os)("Execution protection violation "
2475                         "at " INTPTR_FORMAT
2476                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2477                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2478 
2479           // Set last_addr so if we fault again at the same address, we don't
2480           // end up in an endless loop.
2481           //
2482           // There are two potential complications here.  Two threads trapping
2483           // at the same address at the same time could cause one of the
2484           // threads to think it already unguarded, and abort the VM.  Likely
2485           // very rare.
2486           //
2487           // The other race involves two threads alternately trapping at
2488           // different addresses and failing to unguard the page, resulting in
2489           // an endless loop.  This condition is probably even more unlikely
2490           // than the first.
2491           //
2492           // Although both cases could be avoided by using locks or thread
2493           // local last_addr, these solutions are unnecessary complication:
2494           // this handler is a best-effort safety net, not a complete solution.
2495           // It is disabled by default and should only be used as a workaround
2496           // in case we missed any no-execute-unsafe VM code.
2497 
2498           last_addr = addr;
2499 
2500           return EXCEPTION_CONTINUE_EXECUTION;
2501         }
2502       }
2503 
2504       // Last unguard failed or not unguarding
2505       tty->print_raw_cr("Execution protection violation");
2506       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2507                    exceptionInfo->ContextRecord);
2508       return EXCEPTION_CONTINUE_SEARCH;
2509     }
2510   }
2511 #endif // _WIN64
2512 
2513   // Check to see if we caught the safepoint code in the
2514   // process of write protecting the memory serialization page.
2515   // It write enables the page immediately after protecting it
2516   // so just return.
2517   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2518     if (t != NULL && t->is_Java_thread()) {
2519       JavaThread* thread = (JavaThread*) t;
2520       PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2521       address addr = (address) exceptionRecord->ExceptionInformation[1];
2522       if (os::is_memory_serialize_page(thread, addr)) {
2523         // Block current thread until the memory serialize page permission restored.
2524         os::block_on_serialize_page_trap();
2525         return EXCEPTION_CONTINUE_EXECUTION;
2526       }
2527     }
2528   }
2529 
2530   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2531       VM_Version::is_cpuinfo_segv_addr(pc)) {
2532     // Verify that OS save/restore AVX registers.
2533     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2534   }
2535 
2536   if (t != NULL && t->is_Java_thread()) {
2537     JavaThread* thread = (JavaThread*) t;
2538     bool in_java = thread->thread_state() == _thread_in_Java;
2539 
2540     // Handle potential stack overflows up front.
2541     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2542 #ifdef _M_IA64
2543       // Use guard page for register stack.
2544       PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2545       address addr = (address) exceptionRecord->ExceptionInformation[1];
2546       // Check for a register stack overflow on Itanium
2547       if (thread->addr_inside_register_stack_red_zone(addr)) {
2548         // Fatal red zone violation happens if the Java program
2549         // catches a StackOverflow error and does so much processing
2550         // that it runs beyond the unprotected yellow guard zone. As
2551         // a result, we are out of here.
2552         fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2553       } else if(thread->addr_inside_register_stack(addr)) {
2554         // Disable the yellow zone which sets the state that
2555         // we've got a stack overflow problem.
2556         if (thread->stack_yellow_reserved_zone_enabled()) {
2557           thread->disable_stack_yellow_reserved_zone();
2558         }
2559         // Give us some room to process the exception.
2560         thread->disable_register_stack_guard();
2561         // Tracing with +Verbose.
2562         if (Verbose) {
2563           tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2564           tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2565           tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2566           tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2567                         thread->register_stack_base(),
2568                         thread->register_stack_base() + thread->stack_size());
2569         }
2570 
2571         // Reguard the permanent register stack red zone just to be sure.
2572         // We saw Windows silently disabling this without telling us.
2573         thread->enable_register_stack_red_zone();
2574 
2575         return Handle_Exception(exceptionInfo,
2576                                 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2577       }
2578 #endif
2579       if (thread->stack_guards_enabled()) {
2580         if (in_java) {
2581           frame fr;
2582           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2583           address addr = (address) exceptionRecord->ExceptionInformation[1];
2584           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2585             assert(fr.is_java_frame(), "Must be a Java frame");
2586             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2587           }
2588         }
2589         // Yellow zone violation.  The o/s has unprotected the first yellow
2590         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2591         // update the enabled status, even if the zone contains only one page.
2592         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2593         thread->disable_stack_yellow_reserved_zone();
2594         // If not in java code, return and hope for the best.
2595         return in_java
2596             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2597             :  EXCEPTION_CONTINUE_EXECUTION;
2598       } else {
2599         // Fatal red zone violation.
2600         thread->disable_stack_red_zone();
2601         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2602         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2603                       exceptionInfo->ContextRecord);
2604         return EXCEPTION_CONTINUE_SEARCH;
2605       }
2606     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2607       // Either stack overflow or null pointer exception.
2608       if (in_java) {
2609         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2610         address addr = (address) exceptionRecord->ExceptionInformation[1];
2611         address stack_end = thread->stack_end();
2612         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2613           // Stack overflow.
2614           assert(!os::uses_stack_guard_pages(),
2615                  "should be caught by red zone code above.");
2616           return Handle_Exception(exceptionInfo,
2617                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2618         }
2619         // Check for safepoint polling and implicit null
2620         // We only expect null pointers in the stubs (vtable)
2621         // the rest are checked explicitly now.
2622         CodeBlob* cb = CodeCache::find_blob(pc);
2623         if (cb != NULL) {
2624           if (os::is_poll_address(addr)) {
2625             address stub = SharedRuntime::get_poll_stub(pc);
2626             return Handle_Exception(exceptionInfo, stub);
2627           }
2628         }
2629         {
2630 #ifdef _WIN64
2631           // If it's a legal stack address map the entire region in
2632           //
2633           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2634           address addr = (address) exceptionRecord->ExceptionInformation[1];
2635           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2636             addr = (address)((uintptr_t)addr &
2637                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2638             os::commit_memory((char *)addr, thread->stack_base() - addr,
2639                               !ExecMem);
2640             return EXCEPTION_CONTINUE_EXECUTION;
2641           } else
2642 #endif
2643           {
2644             // Null pointer exception.
2645 #ifdef _M_IA64
2646             // Process implicit null checks in compiled code. Note: Implicit null checks
2647             // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2648             if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2649               CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2650               // Handle implicit null check in UEP method entry
2651               if (cb && (cb->is_frame_complete_at(pc) ||
2652                          (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2653                 if (Verbose) {
2654                   intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2655                   tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2656                   tty->print_cr("      to addr " INTPTR_FORMAT, addr);
2657                   tty->print_cr("      bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2658                                 *(bundle_start + 1), *bundle_start);
2659                 }
2660                 return Handle_Exception(exceptionInfo,
2661                                         SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2662               }
2663             }
2664 
2665             // Implicit null checks were processed above.  Hence, we should not reach
2666             // here in the usual case => die!
2667             if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2668             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2669                          exceptionInfo->ContextRecord);
2670             return EXCEPTION_CONTINUE_SEARCH;
2671 
2672 #else // !IA64
2673 
2674             if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
2675               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2676               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2677             }
2678             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2679                          exceptionInfo->ContextRecord);
2680             return EXCEPTION_CONTINUE_SEARCH;
2681 #endif
2682           }
2683         }
2684       }
2685 
2686 #ifdef _WIN64
2687       // Special care for fast JNI field accessors.
2688       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2689       // in and the heap gets shrunk before the field access.
2690       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2691         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2692         if (addr != (address)-1) {
2693           return Handle_Exception(exceptionInfo, addr);
2694         }
2695       }
2696 #endif
2697 
2698       // Stack overflow or null pointer exception in native code.
2699       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2700                    exceptionInfo->ContextRecord);
2701       return EXCEPTION_CONTINUE_SEARCH;
2702     } // /EXCEPTION_ACCESS_VIOLATION
2703     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2704 #if defined _M_IA64
2705     else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2706               exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2707       M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2708 
2709       // Compiled method patched to be non entrant? Following conditions must apply:
2710       // 1. must be first instruction in bundle
2711       // 2. must be a break instruction with appropriate code
2712       if ((((uint64_t) pc & 0x0F) == 0) &&
2713           (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2714         return Handle_Exception(exceptionInfo,
2715                                 (address)SharedRuntime::get_handle_wrong_method_stub());
2716       }
2717     } // /EXCEPTION_ILLEGAL_INSTRUCTION
2718 #endif
2719 
2720 
2721     if (in_java) {
2722       switch (exception_code) {
2723       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2724         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2725 
2726       case EXCEPTION_INT_OVERFLOW:
2727         return Handle_IDiv_Exception(exceptionInfo);
2728 
2729       } // switch
2730     }
2731     if (((thread->thread_state() == _thread_in_Java) ||
2732          (thread->thread_state() == _thread_in_native)) &&
2733          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2734       LONG result=Handle_FLT_Exception(exceptionInfo);
2735       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2736     }
2737   }
2738 
2739   if (exception_code != EXCEPTION_BREAKPOINT) {
2740     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2741                  exceptionInfo->ContextRecord);
2742   }
2743   return EXCEPTION_CONTINUE_SEARCH;
2744 }
2745 
2746 #ifndef _WIN64
2747 // Special care for fast JNI accessors.
2748 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2749 // the heap gets shrunk before the field access.
2750 // Need to install our own structured exception handler since native code may
2751 // install its own.
2752 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2753   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2754   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2755     address pc = (address) exceptionInfo->ContextRecord->Eip;
2756     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2757     if (addr != (address)-1) {
2758       return Handle_Exception(exceptionInfo, addr);
2759     }
2760   }
2761   return EXCEPTION_CONTINUE_SEARCH;
2762 }
2763 
2764 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2765   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2766                                                      jobject obj,           \
2767                                                      jfieldID fieldID) {    \
2768     __try {                                                                 \
2769       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2770                                                                  obj,       \
2771                                                                  fieldID);  \
2772     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2773                                               _exception_info())) {         \
2774     }                                                                       \
2775     return 0;                                                               \
2776   }
2777 
2778 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2779 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2780 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2781 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2782 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2783 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2784 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2785 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2786 
2787 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2788   switch (type) {
2789   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2790   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2791   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2792   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2793   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2794   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2795   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2796   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2797   default:        ShouldNotReachHere();
2798   }
2799   return (address)-1;
2800 }
2801 #endif
2802 
2803 // Virtual Memory
2804 
2805 int os::vm_page_size() { return os::win32::vm_page_size(); }
2806 int os::vm_allocation_granularity() {
2807   return os::win32::vm_allocation_granularity();
2808 }
2809 
2810 // Windows large page support is available on Windows 2003. In order to use
2811 // large page memory, the administrator must first assign additional privilege
2812 // to the user:
2813 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2814 //   + select Local Policies -> User Rights Assignment
2815 //   + double click "Lock pages in memory", add users and/or groups
2816 //   + reboot
2817 // Note the above steps are needed for administrator as well, as administrators
2818 // by default do not have the privilege to lock pages in memory.
2819 //
2820 // Note about Windows 2003: although the API supports committing large page
2821 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2822 // scenario, I found through experiment it only uses large page if the entire
2823 // memory region is reserved and committed in a single VirtualAlloc() call.
2824 // This makes Windows large page support more or less like Solaris ISM, in
2825 // that the entire heap must be committed upfront. This probably will change
2826 // in the future, if so the code below needs to be revisited.
2827 
2828 #ifndef MEM_LARGE_PAGES
2829   #define MEM_LARGE_PAGES 0x20000000
2830 #endif
2831 
2832 static HANDLE    _hProcess;
2833 static HANDLE    _hToken;
2834 
2835 // Container for NUMA node list info
2836 class NUMANodeListHolder {
2837  private:
2838   int *_numa_used_node_list;  // allocated below
2839   int _numa_used_node_count;
2840 
2841   void free_node_list() {
2842     if (_numa_used_node_list != NULL) {
2843       FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2844     }
2845   }
2846 
2847  public:
2848   NUMANodeListHolder() {
2849     _numa_used_node_count = 0;
2850     _numa_used_node_list = NULL;
2851     // do rest of initialization in build routine (after function pointers are set up)
2852   }
2853 
2854   ~NUMANodeListHolder() {
2855     free_node_list();
2856   }
2857 
2858   bool build() {
2859     DWORD_PTR proc_aff_mask;
2860     DWORD_PTR sys_aff_mask;
2861     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2862     ULONG highest_node_number;
2863     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2864     free_node_list();
2865     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2866     for (unsigned int i = 0; i <= highest_node_number; i++) {
2867       ULONGLONG proc_mask_numa_node;
2868       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2869       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2870         _numa_used_node_list[_numa_used_node_count++] = i;
2871       }
2872     }
2873     return (_numa_used_node_count > 1);
2874   }
2875 
2876   int get_count() { return _numa_used_node_count; }
2877   int get_node_list_entry(int n) {
2878     // for indexes out of range, returns -1
2879     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2880   }
2881 
2882 } numa_node_list_holder;
2883 
2884 
2885 
2886 static size_t _large_page_size = 0;
2887 
2888 static bool request_lock_memory_privilege() {
2889   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2890                           os::current_process_id());
2891 
2892   LUID luid;
2893   if (_hProcess != NULL &&
2894       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2895       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2896 
2897     TOKEN_PRIVILEGES tp;
2898     tp.PrivilegeCount = 1;
2899     tp.Privileges[0].Luid = luid;
2900     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2901 
2902     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2903     // privilege. Check GetLastError() too. See MSDN document.
2904     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2905         (GetLastError() == ERROR_SUCCESS)) {
2906       return true;
2907     }
2908   }
2909 
2910   return false;
2911 }
2912 
2913 static void cleanup_after_large_page_init() {
2914   if (_hProcess) CloseHandle(_hProcess);
2915   _hProcess = NULL;
2916   if (_hToken) CloseHandle(_hToken);
2917   _hToken = NULL;
2918 }
2919 
2920 static bool numa_interleaving_init() {
2921   bool success = false;
2922   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2923 
2924   // print a warning if UseNUMAInterleaving flag is specified on command line
2925   bool warn_on_failure = use_numa_interleaving_specified;
2926 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2927 
2928   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2929   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2930   NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2931 
2932   if (numa_node_list_holder.build()) {
2933     if (log_is_enabled(Debug, os, cpu)) {
2934       Log(os, cpu) log;
2935       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2936       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2937         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2938       }
2939     }
2940     success = true;
2941   } else {
2942     WARN("Process does not cover multiple NUMA nodes.");
2943   }
2944   if (!success) {
2945     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2946   }
2947   return success;
2948 #undef WARN
2949 }
2950 
2951 // this routine is used whenever we need to reserve a contiguous VA range
2952 // but we need to make separate VirtualAlloc calls for each piece of the range
2953 // Reasons for doing this:
2954 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2955 //  * UseNUMAInterleaving requires a separate node for each piece
2956 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2957                                          DWORD prot,
2958                                          bool should_inject_error = false) {
2959   char * p_buf;
2960   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2961   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2962   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2963 
2964   // first reserve enough address space in advance since we want to be
2965   // able to break a single contiguous virtual address range into multiple
2966   // large page commits but WS2003 does not allow reserving large page space
2967   // so we just use 4K pages for reserve, this gives us a legal contiguous
2968   // address space. then we will deallocate that reservation, and re alloc
2969   // using large pages
2970   const size_t size_of_reserve = bytes + chunk_size;
2971   if (bytes > size_of_reserve) {
2972     // Overflowed.
2973     return NULL;
2974   }
2975   p_buf = (char *) VirtualAlloc(addr,
2976                                 size_of_reserve,  // size of Reserve
2977                                 MEM_RESERVE,
2978                                 PAGE_READWRITE);
2979   // If reservation failed, return NULL
2980   if (p_buf == NULL) return NULL;
2981   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2982   os::release_memory(p_buf, bytes + chunk_size);
2983 
2984   // we still need to round up to a page boundary (in case we are using large pages)
2985   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2986   // instead we handle this in the bytes_to_rq computation below
2987   p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2988 
2989   // now go through and allocate one chunk at a time until all bytes are
2990   // allocated
2991   size_t  bytes_remaining = bytes;
2992   // An overflow of align_size_up() would have been caught above
2993   // in the calculation of size_of_reserve.
2994   char * next_alloc_addr = p_buf;
2995   HANDLE hProc = GetCurrentProcess();
2996 
2997 #ifdef ASSERT
2998   // Variable for the failure injection
2999   long ran_num = os::random();
3000   size_t fail_after = ran_num % bytes;
3001 #endif
3002 
3003   int count=0;
3004   while (bytes_remaining) {
3005     // select bytes_to_rq to get to the next chunk_size boundary
3006 
3007     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
3008     // Note allocate and commit
3009     char * p_new;
3010 
3011 #ifdef ASSERT
3012     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3013 #else
3014     const bool inject_error_now = false;
3015 #endif
3016 
3017     if (inject_error_now) {
3018       p_new = NULL;
3019     } else {
3020       if (!UseNUMAInterleaving) {
3021         p_new = (char *) VirtualAlloc(next_alloc_addr,
3022                                       bytes_to_rq,
3023                                       flags,
3024                                       prot);
3025       } else {
3026         // get the next node to use from the used_node_list
3027         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3028         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3029         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3030       }
3031     }
3032 
3033     if (p_new == NULL) {
3034       // Free any allocated pages
3035       if (next_alloc_addr > p_buf) {
3036         // Some memory was committed so release it.
3037         size_t bytes_to_release = bytes - bytes_remaining;
3038         // NMT has yet to record any individual blocks, so it
3039         // need to create a dummy 'reserve' record to match
3040         // the release.
3041         MemTracker::record_virtual_memory_reserve((address)p_buf,
3042                                                   bytes_to_release, CALLER_PC);
3043         os::release_memory(p_buf, bytes_to_release);
3044       }
3045 #ifdef ASSERT
3046       if (should_inject_error) {
3047         log_develop_debug(pagesize)("Reserving pages individually failed.");
3048       }
3049 #endif
3050       return NULL;
3051     }
3052 
3053     bytes_remaining -= bytes_to_rq;
3054     next_alloc_addr += bytes_to_rq;
3055     count++;
3056   }
3057   // Although the memory is allocated individually, it is returned as one.
3058   // NMT records it as one block.
3059   if ((flags & MEM_COMMIT) != 0) {
3060     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3061   } else {
3062     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3063   }
3064 
3065   // made it this far, success
3066   return p_buf;
3067 }
3068 
3069 
3070 
3071 void os::large_page_init() {
3072   if (!UseLargePages) return;
3073 
3074   // print a warning if any large page related flag is specified on command line
3075   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3076                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3077   bool success = false;
3078 
3079 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3080   if (request_lock_memory_privilege()) {
3081     size_t s = GetLargePageMinimum();
3082     if (s) {
3083 #if defined(IA32) || defined(AMD64)
3084       if (s > 4*M || LargePageSizeInBytes > 4*M) {
3085         WARN("JVM cannot use large pages bigger than 4mb.");
3086       } else {
3087 #endif
3088         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3089           _large_page_size = LargePageSizeInBytes;
3090         } else {
3091           _large_page_size = s;
3092         }
3093         success = true;
3094 #if defined(IA32) || defined(AMD64)
3095       }
3096 #endif
3097     } else {
3098       WARN("Large page is not supported by the processor.");
3099     }
3100   } else {
3101     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3102   }
3103 #undef WARN
3104 
3105   const size_t default_page_size = (size_t) vm_page_size();
3106   if (success && _large_page_size > default_page_size) {
3107     _page_sizes[0] = _large_page_size;
3108     _page_sizes[1] = default_page_size;
3109     _page_sizes[2] = 0;
3110   }
3111 
3112   cleanup_after_large_page_init();
3113   UseLargePages = success;
3114 }
3115 
3116 // On win32, one cannot release just a part of reserved memory, it's an
3117 // all or nothing deal.  When we split a reservation, we must break the
3118 // reservation into two reservations.
3119 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3120                                   bool realloc) {
3121   if (size > 0) {
3122     release_memory(base, size);
3123     if (realloc) {
3124       reserve_memory(split, base);
3125     }
3126     if (size != split) {
3127       reserve_memory(size - split, base + split);
3128     }
3129   }
3130 }
3131 
3132 // Multiple threads can race in this code but it's not possible to unmap small sections of
3133 // virtual space to get requested alignment, like posix-like os's.
3134 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3135 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3136   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3137          "Alignment must be a multiple of allocation granularity (page size)");
3138   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3139 
3140   size_t extra_size = size + alignment;
3141   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3142 
3143   char* aligned_base = NULL;
3144 
3145   do {
3146     char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3147     if (extra_base == NULL) {
3148       return NULL;
3149     }
3150     // Do manual alignment
3151     aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3152 
3153     os::release_memory(extra_base, extra_size);
3154 
3155     aligned_base = os::reserve_memory(size, aligned_base);
3156 
3157   } while (aligned_base == NULL);
3158 
3159   return aligned_base;
3160 }
3161 
3162 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3163   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3164          "reserve alignment");
3165   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3166   char* res;
3167   // note that if UseLargePages is on, all the areas that require interleaving
3168   // will go thru reserve_memory_special rather than thru here.
3169   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3170   if (!use_individual) {
3171     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3172   } else {
3173     elapsedTimer reserveTimer;
3174     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3175     // in numa interleaving, we have to allocate pages individually
3176     // (well really chunks of NUMAInterleaveGranularity size)
3177     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3178     if (res == NULL) {
3179       warning("NUMA page allocation failed");
3180     }
3181     if (Verbose && PrintMiscellaneous) {
3182       reserveTimer.stop();
3183       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3184                     reserveTimer.milliseconds(), reserveTimer.ticks());
3185     }
3186   }
3187   assert(res == NULL || addr == NULL || addr == res,
3188          "Unexpected address from reserve.");
3189 
3190   return res;
3191 }
3192 
3193 // Reserve memory at an arbitrary address, only if that area is
3194 // available (and not reserved for something else).
3195 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3196   // Windows os::reserve_memory() fails of the requested address range is
3197   // not avilable.
3198   return reserve_memory(bytes, requested_addr);
3199 }
3200 
3201 size_t os::large_page_size() {
3202   return _large_page_size;
3203 }
3204 
3205 bool os::can_commit_large_page_memory() {
3206   // Windows only uses large page memory when the entire region is reserved
3207   // and committed in a single VirtualAlloc() call. This may change in the
3208   // future, but with Windows 2003 it's not possible to commit on demand.
3209   return false;
3210 }
3211 
3212 bool os::can_execute_large_page_memory() {
3213   return true;
3214 }
3215 
3216 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3217                                  bool exec) {
3218   assert(UseLargePages, "only for large pages");
3219 
3220   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3221     return NULL; // Fallback to small pages.
3222   }
3223 
3224   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3225   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3226 
3227   // with large pages, there are two cases where we need to use Individual Allocation
3228   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3229   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3230   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3231     log_debug(pagesize)("Reserving large pages individually.");
3232 
3233     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3234     if (p_buf == NULL) {
3235       // give an appropriate warning message
3236       if (UseNUMAInterleaving) {
3237         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3238       }
3239       if (UseLargePagesIndividualAllocation) {
3240         warning("Individually allocated large pages failed, "
3241                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3242       }
3243       return NULL;
3244     }
3245 
3246     return p_buf;
3247 
3248   } else {
3249     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3250 
3251     // normal policy just allocate it all at once
3252     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3253     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3254     if (res != NULL) {
3255       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3256     }
3257 
3258     return res;
3259   }
3260 }
3261 
3262 bool os::release_memory_special(char* base, size_t bytes) {
3263   assert(base != NULL, "Sanity check");
3264   return release_memory(base, bytes);
3265 }
3266 
3267 void os::print_statistics() {
3268 }
3269 
3270 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3271   int err = os::get_last_error();
3272   char buf[256];
3273   size_t buf_len = os::lasterror(buf, sizeof(buf));
3274   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3275           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3276           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3277 }
3278 
3279 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3280   if (bytes == 0) {
3281     // Don't bother the OS with noops.
3282     return true;
3283   }
3284   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3285   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3286   // Don't attempt to print anything if the OS call fails. We're
3287   // probably low on resources, so the print itself may cause crashes.
3288 
3289   // unless we have NUMAInterleaving enabled, the range of a commit
3290   // is always within a reserve covered by a single VirtualAlloc
3291   // in that case we can just do a single commit for the requested size
3292   if (!UseNUMAInterleaving) {
3293     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3294       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3295       return false;
3296     }
3297     if (exec) {
3298       DWORD oldprot;
3299       // Windows doc says to use VirtualProtect to get execute permissions
3300       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3301         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3302         return false;
3303       }
3304     }
3305     return true;
3306   } else {
3307 
3308     // when NUMAInterleaving is enabled, the commit might cover a range that
3309     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3310     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3311     // returns represents the number of bytes that can be committed in one step.
3312     size_t bytes_remaining = bytes;
3313     char * next_alloc_addr = addr;
3314     while (bytes_remaining > 0) {
3315       MEMORY_BASIC_INFORMATION alloc_info;
3316       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3317       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3318       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3319                        PAGE_READWRITE) == NULL) {
3320         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3321                                             exec);)
3322         return false;
3323       }
3324       if (exec) {
3325         DWORD oldprot;
3326         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3327                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3328           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3329                                               exec);)
3330           return false;
3331         }
3332       }
3333       bytes_remaining -= bytes_to_rq;
3334       next_alloc_addr += bytes_to_rq;
3335     }
3336   }
3337   // if we made it this far, return true
3338   return true;
3339 }
3340 
3341 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3342                           bool exec) {
3343   // alignment_hint is ignored on this OS
3344   return pd_commit_memory(addr, size, exec);
3345 }
3346 
3347 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3348                                   const char* mesg) {
3349   assert(mesg != NULL, "mesg must be specified");
3350   if (!pd_commit_memory(addr, size, exec)) {
3351     warn_fail_commit_memory(addr, size, exec);
3352     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3353   }
3354 }
3355 
3356 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3357                                   size_t alignment_hint, bool exec,
3358                                   const char* mesg) {
3359   // alignment_hint is ignored on this OS
3360   pd_commit_memory_or_exit(addr, size, exec, mesg);
3361 }
3362 
3363 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3364   if (bytes == 0) {
3365     // Don't bother the OS with noops.
3366     return true;
3367   }
3368   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3369   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3370   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3371 }
3372 
3373 bool os::pd_release_memory(char* addr, size_t bytes) {
3374   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3375 }
3376 
3377 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3378   return os::commit_memory(addr, size, !ExecMem);
3379 }
3380 
3381 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3382   return os::uncommit_memory(addr, size);
3383 }
3384 
3385 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3386   uint count = 0;
3387   bool ret = false;
3388   size_t bytes_remaining = bytes;
3389   char * next_protect_addr = addr;
3390 
3391   // Use VirtualQuery() to get the chunk size.
3392   while (bytes_remaining) {
3393     MEMORY_BASIC_INFORMATION alloc_info;
3394     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3395       return false;
3396     }
3397 
3398     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3399     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3400     // but we don't distinguish here as both cases are protected by same API.
3401     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3402     warning("Failed protecting pages individually for chunk #%u", count);
3403     if (!ret) {
3404       return false;
3405     }
3406 
3407     bytes_remaining -= bytes_to_protect;
3408     next_protect_addr += bytes_to_protect;
3409     count++;
3410   }
3411   return ret;
3412 }
3413 
3414 // Set protections specified
3415 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3416                         bool is_committed) {
3417   unsigned int p = 0;
3418   switch (prot) {
3419   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3420   case MEM_PROT_READ: p = PAGE_READONLY; break;
3421   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3422   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3423   default:
3424     ShouldNotReachHere();
3425   }
3426 
3427   DWORD old_status;
3428 
3429   // Strange enough, but on Win32 one can change protection only for committed
3430   // memory, not a big deal anyway, as bytes less or equal than 64K
3431   if (!is_committed) {
3432     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3433                           "cannot commit protection page");
3434   }
3435   // One cannot use os::guard_memory() here, as on Win32 guard page
3436   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3437   //
3438   // Pages in the region become guard pages. Any attempt to access a guard page
3439   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3440   // the guard page status. Guard pages thus act as a one-time access alarm.
3441   bool ret;
3442   if (UseNUMAInterleaving) {
3443     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3444     // so we must protect the chunks individually.
3445     ret = protect_pages_individually(addr, bytes, p, &old_status);
3446   } else {
3447     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3448   }
3449 #ifdef ASSERT
3450   if (!ret) {
3451     int err = os::get_last_error();
3452     char buf[256];
3453     size_t buf_len = os::lasterror(buf, sizeof(buf));
3454     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3455           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3456           buf_len != 0 ? buf : "<no_error_string>", err);
3457   }
3458 #endif
3459   return ret;
3460 }
3461 
3462 bool os::guard_memory(char* addr, size_t bytes) {
3463   DWORD old_status;
3464   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3465 }
3466 
3467 bool os::unguard_memory(char* addr, size_t bytes) {
3468   DWORD old_status;
3469   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3470 }
3471 
3472 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3473 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3474 void os::numa_make_global(char *addr, size_t bytes)    { }
3475 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3476 bool os::numa_topology_changed()                       { return false; }
3477 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3478 int os::numa_get_group_id()                            { return 0; }
3479 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3480   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3481     // Provide an answer for UMA systems
3482     ids[0] = 0;
3483     return 1;
3484   } else {
3485     // check for size bigger than actual groups_num
3486     size = MIN2(size, numa_get_groups_num());
3487     for (int i = 0; i < (int)size; i++) {
3488       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3489     }
3490     return size;
3491   }
3492 }
3493 
3494 bool os::get_page_info(char *start, page_info* info) {
3495   return false;
3496 }
3497 
3498 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3499                      page_info* page_found) {
3500   return end;
3501 }
3502 
3503 char* os::non_memory_address_word() {
3504   // Must never look like an address returned by reserve_memory,
3505   // even in its subfields (as defined by the CPU immediate fields,
3506   // if the CPU splits constants across multiple instructions).
3507   return (char*)-1;
3508 }
3509 
3510 #define MAX_ERROR_COUNT 100
3511 #define SYS_THREAD_ERROR 0xffffffffUL
3512 
3513 void os::pd_start_thread(Thread* thread) {
3514   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3515   // Returns previous suspend state:
3516   // 0:  Thread was not suspended
3517   // 1:  Thread is running now
3518   // >1: Thread is still suspended.
3519   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3520 }
3521 
3522 class HighResolutionInterval : public CHeapObj<mtThread> {
3523   // The default timer resolution seems to be 10 milliseconds.
3524   // (Where is this written down?)
3525   // If someone wants to sleep for only a fraction of the default,
3526   // then we set the timer resolution down to 1 millisecond for
3527   // the duration of their interval.
3528   // We carefully set the resolution back, since otherwise we
3529   // seem to incur an overhead (3%?) that we don't need.
3530   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3531   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3532   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3533   // timeBeginPeriod() if the relative error exceeded some threshold.
3534   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3535   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
3536   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3537   // resolution timers running.
3538  private:
3539   jlong resolution;
3540  public:
3541   HighResolutionInterval(jlong ms) {
3542     resolution = ms % 10L;
3543     if (resolution != 0) {
3544       MMRESULT result = timeBeginPeriod(1L);
3545     }
3546   }
3547   ~HighResolutionInterval() {
3548     if (resolution != 0) {
3549       MMRESULT result = timeEndPeriod(1L);
3550     }
3551     resolution = 0L;
3552   }
3553 };
3554 
3555 int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3556   jlong limit = (jlong) MAXDWORD;
3557 
3558   while (ms > limit) {
3559     int res;
3560     if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) {
3561       return res;
3562     }
3563     ms -= limit;
3564   }
3565 
3566   assert(thread == Thread::current(), "thread consistency check");
3567   OSThread* osthread = thread->osthread();
3568   OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3569   int result;
3570   if (interruptable) {
3571     assert(thread->is_Java_thread(), "must be java thread");
3572     JavaThread *jt = (JavaThread *) thread;
3573     ThreadBlockInVM tbivm(jt);
3574 
3575     jt->set_suspend_equivalent();
3576     // cleared by handle_special_suspend_equivalent_condition() or
3577     // java_suspend_self() via check_and_wait_while_suspended()
3578 
3579     HANDLE events[1];
3580     events[0] = osthread->interrupt_event();
3581     HighResolutionInterval *phri=NULL;
3582     if (!ForceTimeHighResolution) {
3583       phri = new HighResolutionInterval(ms);
3584     }
3585     if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3586       result = OS_TIMEOUT;
3587     } else {
3588       ResetEvent(osthread->interrupt_event());
3589       osthread->set_interrupted(false);
3590       result = OS_INTRPT;
3591     }
3592     delete phri; //if it is NULL, harmless
3593 
3594     // were we externally suspended while we were waiting?
3595     jt->check_and_wait_while_suspended();
3596   } else {
3597     assert(!thread->is_Java_thread(), "must not be java thread");
3598     Sleep((long) ms);
3599     result = OS_TIMEOUT;
3600   }
3601   return result;
3602 }
3603 
3604 // Short sleep, direct OS call.
3605 //
3606 // ms = 0, means allow others (if any) to run.
3607 //
3608 void os::naked_short_sleep(jlong ms) {
3609   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3610   Sleep(ms);
3611 }
3612 
3613 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3614 void os::infinite_sleep() {
3615   while (true) {    // sleep forever ...
3616     Sleep(100000);  // ... 100 seconds at a time
3617   }
3618 }
3619 
3620 typedef BOOL (WINAPI * STTSignature)(void);
3621 
3622 void os::naked_yield() {
3623   // Consider passing back the return value from SwitchToThread().
3624   SwitchToThread();
3625 }
3626 
3627 // Win32 only gives you access to seven real priorities at a time,
3628 // so we compress Java's ten down to seven.  It would be better
3629 // if we dynamically adjusted relative priorities.
3630 
3631 int os::java_to_os_priority[CriticalPriority + 1] = {
3632   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3633   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3634   THREAD_PRIORITY_LOWEST,                       // 2
3635   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3636   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3637   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3638   THREAD_PRIORITY_NORMAL,                       // 6
3639   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3640   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3641   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3642   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3643   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3644 };
3645 
3646 int prio_policy1[CriticalPriority + 1] = {
3647   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3648   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3649   THREAD_PRIORITY_LOWEST,                       // 2
3650   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3651   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3652   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3653   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3654   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3655   THREAD_PRIORITY_HIGHEST,                      // 8
3656   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3657   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3658   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3659 };
3660 
3661 static int prio_init() {
3662   // If ThreadPriorityPolicy is 1, switch tables
3663   if (ThreadPriorityPolicy == 1) {
3664     int i;
3665     for (i = 0; i < CriticalPriority + 1; i++) {
3666       os::java_to_os_priority[i] = prio_policy1[i];
3667     }
3668   }
3669   if (UseCriticalJavaThreadPriority) {
3670     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3671   }
3672   return 0;
3673 }
3674 
3675 OSReturn os::set_native_priority(Thread* thread, int priority) {
3676   if (!UseThreadPriorities) return OS_OK;
3677   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3678   return ret ? OS_OK : OS_ERR;
3679 }
3680 
3681 OSReturn os::get_native_priority(const Thread* const thread,
3682                                  int* priority_ptr) {
3683   if (!UseThreadPriorities) {
3684     *priority_ptr = java_to_os_priority[NormPriority];
3685     return OS_OK;
3686   }
3687   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3688   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3689     assert(false, "GetThreadPriority failed");
3690     return OS_ERR;
3691   }
3692   *priority_ptr = os_prio;
3693   return OS_OK;
3694 }
3695 
3696 
3697 // Hint to the underlying OS that a task switch would not be good.
3698 // Void return because it's a hint and can fail.
3699 void os::hint_no_preempt() {}
3700 
3701 void os::interrupt(Thread* thread) {
3702   assert(!thread->is_Java_thread() || Thread::current() == thread ||
3703          Threads_lock->owned_by_self(),
3704          "possibility of dangling Thread pointer");
3705 
3706   OSThread* osthread = thread->osthread();
3707   osthread->set_interrupted(true);
3708   // More than one thread can get here with the same value of osthread,
3709   // resulting in multiple notifications.  We do, however, want the store
3710   // to interrupted() to be visible to other threads before we post
3711   // the interrupt event.
3712   OrderAccess::release();
3713   SetEvent(osthread->interrupt_event());
3714   // For JSR166:  unpark after setting status
3715   if (thread->is_Java_thread()) {
3716     ((JavaThread*)thread)->parker()->unpark();
3717   }
3718 
3719   ParkEvent * ev = thread->_ParkEvent;
3720   if (ev != NULL) ev->unpark();
3721 }
3722 
3723 
3724 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3725   assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3726          "possibility of dangling Thread pointer");
3727 
3728   OSThread* osthread = thread->osthread();
3729   // There is no synchronization between the setting of the interrupt
3730   // and it being cleared here. It is critical - see 6535709 - that
3731   // we only clear the interrupt state, and reset the interrupt event,
3732   // if we are going to report that we were indeed interrupted - else
3733   // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3734   // depending on the timing. By checking thread interrupt event to see
3735   // if the thread gets real interrupt thus prevent spurious wakeup.
3736   bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3737   if (interrupted && clear_interrupted) {
3738     osthread->set_interrupted(false);
3739     ResetEvent(osthread->interrupt_event());
3740   } // Otherwise leave the interrupted state alone
3741 
3742   return interrupted;
3743 }
3744 
3745 // Get's a pc (hint) for a running thread. Currently used only for profiling.
3746 ExtendedPC os::get_thread_pc(Thread* thread) {
3747   CONTEXT context;
3748   context.ContextFlags = CONTEXT_CONTROL;
3749   HANDLE handle = thread->osthread()->thread_handle();
3750 #ifdef _M_IA64
3751   assert(0, "Fix get_thread_pc");
3752   return ExtendedPC(NULL);
3753 #else
3754   if (GetThreadContext(handle, &context)) {
3755 #ifdef _M_AMD64
3756     return ExtendedPC((address) context.Rip);
3757 #else
3758     return ExtendedPC((address) context.Eip);
3759 #endif
3760   } else {
3761     return ExtendedPC(NULL);
3762   }
3763 #endif
3764 }
3765 
3766 // GetCurrentThreadId() returns DWORD
3767 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3768 
3769 static int _initial_pid = 0;
3770 
3771 int os::current_process_id() {
3772   return (_initial_pid ? _initial_pid : _getpid());
3773 }
3774 
3775 int    os::win32::_vm_page_size              = 0;
3776 int    os::win32::_vm_allocation_granularity = 0;
3777 int    os::win32::_processor_type            = 0;
3778 // Processor level is not available on non-NT systems, use vm_version instead
3779 int    os::win32::_processor_level           = 0;
3780 julong os::win32::_physical_memory           = 0;
3781 size_t os::win32::_default_stack_size        = 0;
3782 
3783 intx          os::win32::_os_thread_limit    = 0;
3784 volatile intx os::win32::_os_thread_count    = 0;
3785 
3786 bool   os::win32::_is_windows_server         = false;
3787 
3788 // 6573254
3789 // Currently, the bug is observed across all the supported Windows releases,
3790 // including the latest one (as of this writing - Windows Server 2012 R2)
3791 bool   os::win32::_has_exit_bug              = true;
3792 
3793 void os::win32::initialize_system_info() {
3794   SYSTEM_INFO si;
3795   GetSystemInfo(&si);
3796   _vm_page_size    = si.dwPageSize;
3797   _vm_allocation_granularity = si.dwAllocationGranularity;
3798   _processor_type  = si.dwProcessorType;
3799   _processor_level = si.wProcessorLevel;
3800   set_processor_count(si.dwNumberOfProcessors);
3801 
3802   MEMORYSTATUSEX ms;
3803   ms.dwLength = sizeof(ms);
3804 
3805   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3806   // dwMemoryLoad (% of memory in use)
3807   GlobalMemoryStatusEx(&ms);
3808   _physical_memory = ms.ullTotalPhys;
3809 
3810   if (FLAG_IS_DEFAULT(MaxRAM)) {
3811     // Adjust MaxRAM according to the maximum virtual address space available.
3812     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3813   }
3814 
3815   OSVERSIONINFOEX oi;
3816   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3817   GetVersionEx((OSVERSIONINFO*)&oi);
3818   switch (oi.dwPlatformId) {
3819   case VER_PLATFORM_WIN32_NT:
3820     {
3821       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3822       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3823           oi.wProductType == VER_NT_SERVER) {
3824         _is_windows_server = true;
3825       }
3826     }
3827     break;
3828   default: fatal("Unknown platform");
3829   }
3830 
3831   _default_stack_size = os::current_stack_size();
3832   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3833   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3834          "stack size not a multiple of page size");
3835 
3836   initialize_performance_counter();
3837 }
3838 
3839 
3840 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3841                                       int ebuflen) {
3842   char path[MAX_PATH];
3843   DWORD size;
3844   DWORD pathLen = (DWORD)sizeof(path);
3845   HINSTANCE result = NULL;
3846 
3847   // only allow library name without path component
3848   assert(strchr(name, '\\') == NULL, "path not allowed");
3849   assert(strchr(name, ':') == NULL, "path not allowed");
3850   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3851     jio_snprintf(ebuf, ebuflen,
3852                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3853     return NULL;
3854   }
3855 
3856   // search system directory
3857   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3858     if (size >= pathLen) {
3859       return NULL; // truncated
3860     }
3861     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3862       return NULL; // truncated
3863     }
3864     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3865       return result;
3866     }
3867   }
3868 
3869   // try Windows directory
3870   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3871     if (size >= pathLen) {
3872       return NULL; // truncated
3873     }
3874     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3875       return NULL; // truncated
3876     }
3877     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3878       return result;
3879     }
3880   }
3881 
3882   jio_snprintf(ebuf, ebuflen,
3883                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3884   return NULL;
3885 }
3886 
3887 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3888 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3889 
3890 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3891   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3892   return TRUE;
3893 }
3894 
3895 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3896   // Basic approach:
3897   //  - Each exiting thread registers its intent to exit and then does so.
3898   //  - A thread trying to terminate the process must wait for all
3899   //    threads currently exiting to complete their exit.
3900 
3901   if (os::win32::has_exit_bug()) {
3902     // The array holds handles of the threads that have started exiting by calling
3903     // _endthreadex().
3904     // Should be large enough to avoid blocking the exiting thread due to lack of
3905     // a free slot.
3906     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3907     static int handle_count = 0;
3908 
3909     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3910     static CRITICAL_SECTION crit_sect;
3911     static volatile jint process_exiting = 0;
3912     int i, j;
3913     DWORD res;
3914     HANDLE hproc, hthr;
3915 
3916     // We only attempt to register threads until a process exiting
3917     // thread manages to set the process_exiting flag. Any threads
3918     // that come through here after the process_exiting flag is set
3919     // are unregistered and will be caught in the SuspendThread()
3920     // infinite loop below.
3921     bool registered = false;
3922 
3923     // The first thread that reached this point, initializes the critical section.
3924     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3925       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3926     } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3927       if (what != EPT_THREAD) {
3928         // Atomically set process_exiting before the critical section
3929         // to increase the visibility between racing threads.
3930         Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0);
3931       }
3932       EnterCriticalSection(&crit_sect);
3933 
3934       if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3935         // Remove from the array those handles of the threads that have completed exiting.
3936         for (i = 0, j = 0; i < handle_count; ++i) {
3937           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3938           if (res == WAIT_TIMEOUT) {
3939             handles[j++] = handles[i];
3940           } else {
3941             if (res == WAIT_FAILED) {
3942               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3943                       GetLastError(), __FILE__, __LINE__);
3944             }
3945             // Don't keep the handle, if we failed waiting for it.
3946             CloseHandle(handles[i]);
3947           }
3948         }
3949 
3950         // If there's no free slot in the array of the kept handles, we'll have to
3951         // wait until at least one thread completes exiting.
3952         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3953           // Raise the priority of the oldest exiting thread to increase its chances
3954           // to complete sooner.
3955           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3956           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3957           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3958             i = (res - WAIT_OBJECT_0);
3959             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3960             for (; i < handle_count; ++i) {
3961               handles[i] = handles[i + 1];
3962             }
3963           } else {
3964             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3965                     (res == WAIT_FAILED ? "failed" : "timed out"),
3966                     GetLastError(), __FILE__, __LINE__);
3967             // Don't keep handles, if we failed waiting for them.
3968             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3969               CloseHandle(handles[i]);
3970             }
3971             handle_count = 0;
3972           }
3973         }
3974 
3975         // Store a duplicate of the current thread handle in the array of handles.
3976         hproc = GetCurrentProcess();
3977         hthr = GetCurrentThread();
3978         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3979                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3980           warning("DuplicateHandle failed (%u) in %s: %d\n",
3981                   GetLastError(), __FILE__, __LINE__);
3982 
3983           // We can't register this thread (no more handles) so this thread
3984           // may be racing with a thread that is calling exit(). If the thread
3985           // that is calling exit() has managed to set the process_exiting
3986           // flag, then this thread will be caught in the SuspendThread()
3987           // infinite loop below which closes that race. A small timing
3988           // window remains before the process_exiting flag is set, but it
3989           // is only exposed when we are out of handles.
3990         } else {
3991           ++handle_count;
3992           registered = true;
3993 
3994           // The current exiting thread has stored its handle in the array, and now
3995           // should leave the critical section before calling _endthreadex().
3996         }
3997 
3998       } else if (what != EPT_THREAD && handle_count > 0) {
3999         jlong start_time, finish_time, timeout_left;
4000         // Before ending the process, make sure all the threads that had called
4001         // _endthreadex() completed.
4002 
4003         // Set the priority level of the current thread to the same value as
4004         // the priority level of exiting threads.
4005         // This is to ensure it will be given a fair chance to execute if
4006         // the timeout expires.
4007         hthr = GetCurrentThread();
4008         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
4009         start_time = os::javaTimeNanos();
4010         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
4011         for (i = 0; ; ) {
4012           int portion_count = handle_count - i;
4013           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
4014             portion_count = MAXIMUM_WAIT_OBJECTS;
4015           }
4016           for (j = 0; j < portion_count; ++j) {
4017             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4018           }
4019           timeout_left = (finish_time - start_time) / 1000000L;
4020           if (timeout_left < 0) {
4021             timeout_left = 0;
4022           }
4023           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4024           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4025             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4026                     (res == WAIT_FAILED ? "failed" : "timed out"),
4027                     GetLastError(), __FILE__, __LINE__);
4028             // Reset portion_count so we close the remaining
4029             // handles due to this error.
4030             portion_count = handle_count - i;
4031           }
4032           for (j = 0; j < portion_count; ++j) {
4033             CloseHandle(handles[i + j]);
4034           }
4035           if ((i += portion_count) >= handle_count) {
4036             break;
4037           }
4038           start_time = os::javaTimeNanos();
4039         }
4040         handle_count = 0;
4041       }
4042 
4043       LeaveCriticalSection(&crit_sect);
4044     }
4045 
4046     if (!registered &&
4047         OrderAccess::load_acquire(&process_exiting) != 0 &&
4048         process_exiting != (jint)GetCurrentThreadId()) {
4049       // Some other thread is about to call exit(), so we don't let
4050       // the current unregistered thread proceed to exit() or _endthreadex()
4051       while (true) {
4052         SuspendThread(GetCurrentThread());
4053         // Avoid busy-wait loop, if SuspendThread() failed.
4054         Sleep(EXIT_TIMEOUT);
4055       }
4056     }
4057   }
4058 
4059   // We are here if either
4060   // - there's no 'race at exit' bug on this OS release;
4061   // - initialization of the critical section failed (unlikely);
4062   // - the current thread has registered itself and left the critical section;
4063   // - the process-exiting thread has raised the flag and left the critical section.
4064   if (what == EPT_THREAD) {
4065     _endthreadex((unsigned)exit_code);
4066   } else if (what == EPT_PROCESS) {
4067     ::exit(exit_code);
4068   } else {
4069     _exit(exit_code);
4070   }
4071 
4072   // Should not reach here
4073   return exit_code;
4074 }
4075 
4076 #undef EXIT_TIMEOUT
4077 
4078 void os::win32::setmode_streams() {
4079   _setmode(_fileno(stdin), _O_BINARY);
4080   _setmode(_fileno(stdout), _O_BINARY);
4081   _setmode(_fileno(stderr), _O_BINARY);
4082 }
4083 
4084 
4085 bool os::is_debugger_attached() {
4086   return IsDebuggerPresent() ? true : false;
4087 }
4088 
4089 
4090 void os::wait_for_keypress_at_exit(void) {
4091   if (PauseAtExit) {
4092     fprintf(stderr, "Press any key to continue...\n");
4093     fgetc(stdin);
4094   }
4095 }
4096 
4097 
4098 bool os::message_box(const char* title, const char* message) {
4099   int result = MessageBox(NULL, message, title,
4100                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4101   return result == IDYES;
4102 }
4103 
4104 #ifndef PRODUCT
4105 #ifndef _WIN64
4106 // Helpers to check whether NX protection is enabled
4107 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4108   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4109       pex->ExceptionRecord->NumberParameters > 0 &&
4110       pex->ExceptionRecord->ExceptionInformation[0] ==
4111       EXCEPTION_INFO_EXEC_VIOLATION) {
4112     return EXCEPTION_EXECUTE_HANDLER;
4113   }
4114   return EXCEPTION_CONTINUE_SEARCH;
4115 }
4116 
4117 void nx_check_protection() {
4118   // If NX is enabled we'll get an exception calling into code on the stack
4119   char code[] = { (char)0xC3 }; // ret
4120   void *code_ptr = (void *)code;
4121   __try {
4122     __asm call code_ptr
4123   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4124     tty->print_raw_cr("NX protection detected.");
4125   }
4126 }
4127 #endif // _WIN64
4128 #endif // PRODUCT
4129 
4130 // This is called _before_ the global arguments have been parsed
4131 void os::init(void) {
4132   _initial_pid = _getpid();
4133 
4134   init_random(1234567);
4135 
4136   win32::initialize_system_info();
4137   win32::setmode_streams();
4138   init_page_sizes((size_t) win32::vm_page_size());
4139 
4140   // This may be overridden later when argument processing is done.
4141   FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false);
4142 
4143   // Initialize main_process and main_thread
4144   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4145   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4146                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4147     fatal("DuplicateHandle failed\n");
4148   }
4149   main_thread_id = (int) GetCurrentThreadId();
4150 
4151   // initialize fast thread access - only used for 32-bit
4152   win32::initialize_thread_ptr_offset();
4153 }
4154 
4155 // To install functions for atexit processing
4156 extern "C" {
4157   static void perfMemory_exit_helper() {
4158     perfMemory_exit();
4159   }
4160 }
4161 
4162 static jint initSock();
4163 
4164 // this is called _after_ the global arguments have been parsed
4165 jint os::init_2(void) {
4166   // Allocate a single page and mark it as readable for safepoint polling
4167   address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
4168   guarantee(polling_page != NULL, "Reserve Failed for polling page");
4169 
4170   address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
4171   guarantee(return_page != NULL, "Commit Failed for polling page");
4172 
4173   os::set_polling_page(polling_page);
4174   log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
4175 
4176   if (!UseMembar) {
4177     address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
4178     guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
4179 
4180     return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
4181     guarantee(return_page != NULL, "Commit Failed for memory serialize page");
4182 
4183     os::set_memory_serialize_page(mem_serialize_page);
4184     log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
4185   }
4186 
4187   // Setup Windows Exceptions
4188 
4189   // for debugging float code generation bugs
4190   if (ForceFloatExceptions) {
4191 #ifndef  _WIN64
4192     static long fp_control_word = 0;
4193     __asm { fstcw fp_control_word }
4194     // see Intel PPro Manual, Vol. 2, p 7-16
4195     const long precision = 0x20;
4196     const long underflow = 0x10;
4197     const long overflow  = 0x08;
4198     const long zero_div  = 0x04;
4199     const long denorm    = 0x02;
4200     const long invalid   = 0x01;
4201     fp_control_word |= invalid;
4202     __asm { fldcw fp_control_word }
4203 #endif
4204   }
4205 
4206   // If stack_commit_size is 0, windows will reserve the default size,
4207   // but only commit a small portion of it.
4208   size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
4209   size_t default_reserve_size = os::win32::default_stack_size();
4210   size_t actual_reserve_size = stack_commit_size;
4211   if (stack_commit_size < default_reserve_size) {
4212     // If stack_commit_size == 0, we want this too
4213     actual_reserve_size = default_reserve_size;
4214   }
4215 
4216   // Check minimum allowable stack size for thread creation and to initialize
4217   // the java system classes, including StackOverflowError - depends on page
4218   // size.  Add two 4K pages for compiler2 recursion in main thread.
4219   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4220   // class initialization depending on 32 or 64 bit VM.
4221   size_t min_stack_allowed =
4222             (size_t)(JavaThread::stack_guard_zone_size() +
4223                      JavaThread::stack_shadow_zone_size() +
4224                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4225 
4226   min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
4227 
4228   if (actual_reserve_size < min_stack_allowed) {
4229     tty->print_cr("\nThe Java thread stack size specified is too small. "
4230                   "Specify at least %dk",
4231                   min_stack_allowed / K);
4232     return JNI_ERR;
4233   }
4234 
4235   JavaThread::set_stack_size_at_create(stack_commit_size);
4236 
4237   // Calculate theoretical max. size of Threads to guard gainst artifical
4238   // out-of-memory situations, where all available address-space has been
4239   // reserved by thread stacks.
4240   assert(actual_reserve_size != 0, "Must have a stack");
4241 
4242   // Calculate the thread limit when we should start doing Virtual Memory
4243   // banging. Currently when the threads will have used all but 200Mb of space.
4244   //
4245   // TODO: consider performing a similar calculation for commit size instead
4246   // as reserve size, since on a 64-bit platform we'll run into that more
4247   // often than running out of virtual memory space.  We can use the
4248   // lower value of the two calculations as the os_thread_limit.
4249   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4250   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4251 
4252   // at exit methods are called in the reverse order of their registration.
4253   // there is no limit to the number of functions registered. atexit does
4254   // not set errno.
4255 
4256   if (PerfAllowAtExitRegistration) {
4257     // only register atexit functions if PerfAllowAtExitRegistration is set.
4258     // atexit functions can be delayed until process exit time, which
4259     // can be problematic for embedded VM situations. Embedded VMs should
4260     // call DestroyJavaVM() to assure that VM resources are released.
4261 
4262     // note: perfMemory_exit_helper atexit function may be removed in
4263     // the future if the appropriate cleanup code can be added to the
4264     // VM_Exit VMOperation's doit method.
4265     if (atexit(perfMemory_exit_helper) != 0) {
4266       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4267     }
4268   }
4269 
4270 #ifndef _WIN64
4271   // Print something if NX is enabled (win32 on AMD64)
4272   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4273 #endif
4274 
4275   // initialize thread priority policy
4276   prio_init();
4277 
4278   if (UseNUMA && !ForceNUMA) {
4279     UseNUMA = false; // We don't fully support this yet
4280   }
4281 
4282   if (UseNUMAInterleaving) {
4283     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4284     bool success = numa_interleaving_init();
4285     if (!success) UseNUMAInterleaving = false;
4286   }
4287 
4288   if (initSock() != JNI_OK) {
4289     return JNI_ERR;
4290   }
4291 
4292   return JNI_OK;
4293 }
4294 
4295 // Mark the polling page as unreadable
4296 void os::make_polling_page_unreadable(void) {
4297   DWORD old_status;
4298   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4299                       PAGE_NOACCESS, &old_status)) {
4300     fatal("Could not disable polling page");
4301   }
4302 }
4303 
4304 // Mark the polling page as readable
4305 void os::make_polling_page_readable(void) {
4306   DWORD old_status;
4307   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4308                       PAGE_READONLY, &old_status)) {
4309     fatal("Could not enable polling page");
4310   }
4311 }
4312 
4313 
4314 int os::stat(const char *path, struct stat *sbuf) {
4315   char pathbuf[MAX_PATH];
4316   if (strlen(path) > MAX_PATH - 1) {
4317     errno = ENAMETOOLONG;
4318     return -1;
4319   }
4320   os::native_path(strcpy(pathbuf, path));
4321   int ret = ::stat(pathbuf, sbuf);
4322   if (sbuf != NULL && UseUTCFileTimestamp) {
4323     // Fix for 6539723.  st_mtime returned from stat() is dependent on
4324     // the system timezone and so can return different values for the
4325     // same file if/when daylight savings time changes.  This adjustment
4326     // makes sure the same timestamp is returned regardless of the TZ.
4327     //
4328     // See:
4329     // http://msdn.microsoft.com/library/
4330     //   default.asp?url=/library/en-us/sysinfo/base/
4331     //   time_zone_information_str.asp
4332     // and
4333     // http://msdn.microsoft.com/library/default.asp?url=
4334     //   /library/en-us/sysinfo/base/settimezoneinformation.asp
4335     //
4336     // NOTE: there is a insidious bug here:  If the timezone is changed
4337     // after the call to stat() but before 'GetTimeZoneInformation()', then
4338     // the adjustment we do here will be wrong and we'll return the wrong
4339     // value (which will likely end up creating an invalid class data
4340     // archive).  Absent a better API for this, or some time zone locking
4341     // mechanism, we'll have to live with this risk.
4342     TIME_ZONE_INFORMATION tz;
4343     DWORD tzid = GetTimeZoneInformation(&tz);
4344     int daylightBias =
4345       (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
4346     sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4347   }
4348   return ret;
4349 }
4350 
4351 
4352 #define FT2INT64(ft) \
4353   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4354 
4355 
4356 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4357 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4358 // of a thread.
4359 //
4360 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4361 // the fast estimate available on the platform.
4362 
4363 // current_thread_cpu_time() is not optimized for Windows yet
4364 jlong os::current_thread_cpu_time() {
4365   // return user + sys since the cost is the same
4366   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4367 }
4368 
4369 jlong os::thread_cpu_time(Thread* thread) {
4370   // consistent with what current_thread_cpu_time() returns.
4371   return os::thread_cpu_time(thread, true /* user+sys */);
4372 }
4373 
4374 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4375   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4376 }
4377 
4378 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4379   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4380   // If this function changes, os::is_thread_cpu_time_supported() should too
4381   FILETIME CreationTime;
4382   FILETIME ExitTime;
4383   FILETIME KernelTime;
4384   FILETIME UserTime;
4385 
4386   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4387                       &ExitTime, &KernelTime, &UserTime) == 0) {
4388     return -1;
4389   } else if (user_sys_cpu_time) {
4390     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4391   } else {
4392     return FT2INT64(UserTime) * 100;
4393   }
4394 }
4395 
4396 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4397   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4398   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4399   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4400   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4401 }
4402 
4403 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4404   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4405   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4406   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4407   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4408 }
4409 
4410 bool os::is_thread_cpu_time_supported() {
4411   // see os::thread_cpu_time
4412   FILETIME CreationTime;
4413   FILETIME ExitTime;
4414   FILETIME KernelTime;
4415   FILETIME UserTime;
4416 
4417   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4418                       &KernelTime, &UserTime) == 0) {
4419     return false;
4420   } else {
4421     return true;
4422   }
4423 }
4424 
4425 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4426 // It does have primitives (PDH API) to get CPU usage and run queue length.
4427 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4428 // If we wanted to implement loadavg on Windows, we have a few options:
4429 //
4430 // a) Query CPU usage and run queue length and "fake" an answer by
4431 //    returning the CPU usage if it's under 100%, and the run queue
4432 //    length otherwise.  It turns out that querying is pretty slow
4433 //    on Windows, on the order of 200 microseconds on a fast machine.
4434 //    Note that on the Windows the CPU usage value is the % usage
4435 //    since the last time the API was called (and the first call
4436 //    returns 100%), so we'd have to deal with that as well.
4437 //
4438 // b) Sample the "fake" answer using a sampling thread and store
4439 //    the answer in a global variable.  The call to loadavg would
4440 //    just return the value of the global, avoiding the slow query.
4441 //
4442 // c) Sample a better answer using exponential decay to smooth the
4443 //    value.  This is basically the algorithm used by UNIX kernels.
4444 //
4445 // Note that sampling thread starvation could affect both (b) and (c).
4446 int os::loadavg(double loadavg[], int nelem) {
4447   return -1;
4448 }
4449 
4450 
4451 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4452 bool os::dont_yield() {
4453   return DontYieldALot;
4454 }
4455 
4456 // This method is a slightly reworked copy of JDK's sysOpen
4457 // from src/windows/hpi/src/sys_api_md.c
4458 
4459 int os::open(const char *path, int oflag, int mode) {
4460   char pathbuf[MAX_PATH];
4461 
4462   if (strlen(path) > MAX_PATH - 1) {
4463     errno = ENAMETOOLONG;
4464     return -1;
4465   }
4466   os::native_path(strcpy(pathbuf, path));
4467   return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4468 }
4469 
4470 FILE* os::open(int fd, const char* mode) {
4471   return ::_fdopen(fd, mode);
4472 }
4473 
4474 // Is a (classpath) directory empty?
4475 bool os::dir_is_empty(const char* path) {
4476   WIN32_FIND_DATA fd;
4477   HANDLE f = FindFirstFile(path, &fd);
4478   if (f == INVALID_HANDLE_VALUE) {
4479     return true;
4480   }
4481   FindClose(f);
4482   return false;
4483 }
4484 
4485 // create binary file, rewriting existing file if required
4486 int os::create_binary_file(const char* path, bool rewrite_existing) {
4487   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4488   if (!rewrite_existing) {
4489     oflags |= _O_EXCL;
4490   }
4491   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4492 }
4493 
4494 // return current position of file pointer
4495 jlong os::current_file_offset(int fd) {
4496   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4497 }
4498 
4499 // move file pointer to the specified offset
4500 jlong os::seek_to_file_offset(int fd, jlong offset) {
4501   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4502 }
4503 
4504 
4505 jlong os::lseek(int fd, jlong offset, int whence) {
4506   return (jlong) ::_lseeki64(fd, offset, whence);
4507 }
4508 
4509 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4510   OVERLAPPED ov;
4511   DWORD nread;
4512   BOOL result;
4513 
4514   ZeroMemory(&ov, sizeof(ov));
4515   ov.Offset = (DWORD)offset;
4516   ov.OffsetHigh = (DWORD)(offset >> 32);
4517 
4518   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4519 
4520   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4521 
4522   return result ? nread : 0;
4523 }
4524 
4525 
4526 // This method is a slightly reworked copy of JDK's sysNativePath
4527 // from src/windows/hpi/src/path_md.c
4528 
4529 // Convert a pathname to native format.  On win32, this involves forcing all
4530 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4531 // sometimes rejects '/') and removing redundant separators.  The input path is
4532 // assumed to have been converted into the character encoding used by the local
4533 // system.  Because this might be a double-byte encoding, care is taken to
4534 // treat double-byte lead characters correctly.
4535 //
4536 // This procedure modifies the given path in place, as the result is never
4537 // longer than the original.  There is no error return; this operation always
4538 // succeeds.
4539 char * os::native_path(char *path) {
4540   char *src = path, *dst = path, *end = path;
4541   char *colon = NULL;  // If a drive specifier is found, this will
4542                        // point to the colon following the drive letter
4543 
4544   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4545   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4546           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4547 
4548   // Check for leading separators
4549 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4550   while (isfilesep(*src)) {
4551     src++;
4552   }
4553 
4554   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4555     // Remove leading separators if followed by drive specifier.  This
4556     // hack is necessary to support file URLs containing drive
4557     // specifiers (e.g., "file://c:/path").  As a side effect,
4558     // "/c:/path" can be used as an alternative to "c:/path".
4559     *dst++ = *src++;
4560     colon = dst;
4561     *dst++ = ':';
4562     src++;
4563   } else {
4564     src = path;
4565     if (isfilesep(src[0]) && isfilesep(src[1])) {
4566       // UNC pathname: Retain first separator; leave src pointed at
4567       // second separator so that further separators will be collapsed
4568       // into the second separator.  The result will be a pathname
4569       // beginning with "\\\\" followed (most likely) by a host name.
4570       src = dst = path + 1;
4571       path[0] = '\\';     // Force first separator to '\\'
4572     }
4573   }
4574 
4575   end = dst;
4576 
4577   // Remove redundant separators from remainder of path, forcing all
4578   // separators to be '\\' rather than '/'. Also, single byte space
4579   // characters are removed from the end of the path because those
4580   // are not legal ending characters on this operating system.
4581   //
4582   while (*src != '\0') {
4583     if (isfilesep(*src)) {
4584       *dst++ = '\\'; src++;
4585       while (isfilesep(*src)) src++;
4586       if (*src == '\0') {
4587         // Check for trailing separator
4588         end = dst;
4589         if (colon == dst - 2) break;  // "z:\\"
4590         if (dst == path + 1) break;   // "\\"
4591         if (dst == path + 2 && isfilesep(path[0])) {
4592           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4593           // beginning of a UNC pathname.  Even though it is not, by
4594           // itself, a valid UNC pathname, we leave it as is in order
4595           // to be consistent with the path canonicalizer as well
4596           // as the win32 APIs, which treat this case as an invalid
4597           // UNC pathname rather than as an alias for the root
4598           // directory of the current drive.
4599           break;
4600         }
4601         end = --dst;  // Path does not denote a root directory, so
4602                       // remove trailing separator
4603         break;
4604       }
4605       end = dst;
4606     } else {
4607       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4608         *dst++ = *src++;
4609         if (*src) *dst++ = *src++;
4610         end = dst;
4611       } else {  // Copy a single-byte character
4612         char c = *src++;
4613         *dst++ = c;
4614         // Space is not a legal ending character
4615         if (c != ' ') end = dst;
4616       }
4617     }
4618   }
4619 
4620   *end = '\0';
4621 
4622   // For "z:", add "." to work around a bug in the C runtime library
4623   if (colon == dst - 1) {
4624     path[2] = '.';
4625     path[3] = '\0';
4626   }
4627 
4628   return path;
4629 }
4630 
4631 // This code is a copy of JDK's sysSetLength
4632 // from src/windows/hpi/src/sys_api_md.c
4633 
4634 int os::ftruncate(int fd, jlong length) {
4635   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4636   long high = (long)(length >> 32);
4637   DWORD ret;
4638 
4639   if (h == (HANDLE)(-1)) {
4640     return -1;
4641   }
4642 
4643   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4644   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4645     return -1;
4646   }
4647 
4648   if (::SetEndOfFile(h) == FALSE) {
4649     return -1;
4650   }
4651 
4652   return 0;
4653 }
4654 
4655 int os::get_fileno(FILE* fp) {
4656   return _fileno(fp);
4657 }
4658 
4659 // This code is a copy of JDK's sysSync
4660 // from src/windows/hpi/src/sys_api_md.c
4661 // except for the legacy workaround for a bug in Win 98
4662 
4663 int os::fsync(int fd) {
4664   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4665 
4666   if ((!::FlushFileBuffers(handle)) &&
4667       (GetLastError() != ERROR_ACCESS_DENIED)) {
4668     // from winerror.h
4669     return -1;
4670   }
4671   return 0;
4672 }
4673 
4674 static int nonSeekAvailable(int, long *);
4675 static int stdinAvailable(int, long *);
4676 
4677 #define S_ISCHR(mode)   (((mode) & _S_IFCHR) == _S_IFCHR)
4678 #define S_ISFIFO(mode)  (((mode) & _S_IFIFO) == _S_IFIFO)
4679 
4680 // This code is a copy of JDK's sysAvailable
4681 // from src/windows/hpi/src/sys_api_md.c
4682 
4683 int os::available(int fd, jlong *bytes) {
4684   jlong cur, end;
4685   struct _stati64 stbuf64;
4686 
4687   if (::_fstati64(fd, &stbuf64) >= 0) {
4688     int mode = stbuf64.st_mode;
4689     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4690       int ret;
4691       long lpbytes;
4692       if (fd == 0) {
4693         ret = stdinAvailable(fd, &lpbytes);
4694       } else {
4695         ret = nonSeekAvailable(fd, &lpbytes);
4696       }
4697       (*bytes) = (jlong)(lpbytes);
4698       return ret;
4699     }
4700     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4701       return FALSE;
4702     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4703       return FALSE;
4704     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4705       return FALSE;
4706     }
4707     *bytes = end - cur;
4708     return TRUE;
4709   } else {
4710     return FALSE;
4711   }
4712 }
4713 
4714 void os::flockfile(FILE* fp) {
4715   _lock_file(fp);
4716 }
4717 
4718 void os::funlockfile(FILE* fp) {
4719   _unlock_file(fp);
4720 }
4721 
4722 // This code is a copy of JDK's nonSeekAvailable
4723 // from src/windows/hpi/src/sys_api_md.c
4724 
4725 static int nonSeekAvailable(int fd, long *pbytes) {
4726   // This is used for available on non-seekable devices
4727   // (like both named and anonymous pipes, such as pipes
4728   //  connected to an exec'd process).
4729   // Standard Input is a special case.
4730   HANDLE han;
4731 
4732   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4733     return FALSE;
4734   }
4735 
4736   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4737     // PeekNamedPipe fails when at EOF.  In that case we
4738     // simply make *pbytes = 0 which is consistent with the
4739     // behavior we get on Solaris when an fd is at EOF.
4740     // The only alternative is to raise an Exception,
4741     // which isn't really warranted.
4742     //
4743     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4744       return FALSE;
4745     }
4746     *pbytes = 0;
4747   }
4748   return TRUE;
4749 }
4750 
4751 #define MAX_INPUT_EVENTS 2000
4752 
4753 // This code is a copy of JDK's stdinAvailable
4754 // from src/windows/hpi/src/sys_api_md.c
4755 
4756 static int stdinAvailable(int fd, long *pbytes) {
4757   HANDLE han;
4758   DWORD numEventsRead = 0;  // Number of events read from buffer
4759   DWORD numEvents = 0;      // Number of events in buffer
4760   DWORD i = 0;              // Loop index
4761   DWORD curLength = 0;      // Position marker
4762   DWORD actualLength = 0;   // Number of bytes readable
4763   BOOL error = FALSE;       // Error holder
4764   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4765 
4766   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4767     return FALSE;
4768   }
4769 
4770   // Construct an array of input records in the console buffer
4771   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4772   if (error == 0) {
4773     return nonSeekAvailable(fd, pbytes);
4774   }
4775 
4776   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4777   if (numEvents > MAX_INPUT_EVENTS) {
4778     numEvents = MAX_INPUT_EVENTS;
4779   }
4780 
4781   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4782   if (lpBuffer == NULL) {
4783     return FALSE;
4784   }
4785 
4786   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4787   if (error == 0) {
4788     os::free(lpBuffer);
4789     return FALSE;
4790   }
4791 
4792   // Examine input records for the number of bytes available
4793   for (i=0; i<numEvents; i++) {
4794     if (lpBuffer[i].EventType == KEY_EVENT) {
4795 
4796       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4797                                       &(lpBuffer[i].Event);
4798       if (keyRecord->bKeyDown == TRUE) {
4799         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4800         curLength++;
4801         if (*keyPressed == '\r') {
4802           actualLength = curLength;
4803         }
4804       }
4805     }
4806   }
4807 
4808   if (lpBuffer != NULL) {
4809     os::free(lpBuffer);
4810   }
4811 
4812   *pbytes = (long) actualLength;
4813   return TRUE;
4814 }
4815 
4816 // Map a block of memory.
4817 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4818                         char *addr, size_t bytes, bool read_only,
4819                         bool allow_exec) {
4820   HANDLE hFile;
4821   char* base;
4822 
4823   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4824                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4825   if (hFile == NULL) {
4826     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4827     return NULL;
4828   }
4829 
4830   if (allow_exec) {
4831     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4832     // unless it comes from a PE image (which the shared archive is not.)
4833     // Even VirtualProtect refuses to give execute access to mapped memory
4834     // that was not previously executable.
4835     //
4836     // Instead, stick the executable region in anonymous memory.  Yuck.
4837     // Penalty is that ~4 pages will not be shareable - in the future
4838     // we might consider DLLizing the shared archive with a proper PE
4839     // header so that mapping executable + sharing is possible.
4840 
4841     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4842                                 PAGE_READWRITE);
4843     if (base == NULL) {
4844       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4845       CloseHandle(hFile);
4846       return NULL;
4847     }
4848 
4849     DWORD bytes_read;
4850     OVERLAPPED overlapped;
4851     overlapped.Offset = (DWORD)file_offset;
4852     overlapped.OffsetHigh = 0;
4853     overlapped.hEvent = NULL;
4854     // ReadFile guarantees that if the return value is true, the requested
4855     // number of bytes were read before returning.
4856     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4857     if (!res) {
4858       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4859       release_memory(base, bytes);
4860       CloseHandle(hFile);
4861       return NULL;
4862     }
4863   } else {
4864     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4865                                     NULL /* file_name */);
4866     if (hMap == NULL) {
4867       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4868       CloseHandle(hFile);
4869       return NULL;
4870     }
4871 
4872     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4873     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4874                                   (DWORD)bytes, addr);
4875     if (base == NULL) {
4876       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4877       CloseHandle(hMap);
4878       CloseHandle(hFile);
4879       return NULL;
4880     }
4881 
4882     if (CloseHandle(hMap) == 0) {
4883       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4884       CloseHandle(hFile);
4885       return base;
4886     }
4887   }
4888 
4889   if (allow_exec) {
4890     DWORD old_protect;
4891     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4892     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4893 
4894     if (!res) {
4895       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4896       // Don't consider this a hard error, on IA32 even if the
4897       // VirtualProtect fails, we should still be able to execute
4898       CloseHandle(hFile);
4899       return base;
4900     }
4901   }
4902 
4903   if (CloseHandle(hFile) == 0) {
4904     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4905     return base;
4906   }
4907 
4908   return base;
4909 }
4910 
4911 
4912 // Remap a block of memory.
4913 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4914                           char *addr, size_t bytes, bool read_only,
4915                           bool allow_exec) {
4916   // This OS does not allow existing memory maps to be remapped so we
4917   // have to unmap the memory before we remap it.
4918   if (!os::unmap_memory(addr, bytes)) {
4919     return NULL;
4920   }
4921 
4922   // There is a very small theoretical window between the unmap_memory()
4923   // call above and the map_memory() call below where a thread in native
4924   // code may be able to access an address that is no longer mapped.
4925 
4926   return os::map_memory(fd, file_name, file_offset, addr, bytes,
4927                         read_only, allow_exec);
4928 }
4929 
4930 
4931 // Unmap a block of memory.
4932 // Returns true=success, otherwise false.
4933 
4934 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4935   MEMORY_BASIC_INFORMATION mem_info;
4936   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4937     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
4938     return false;
4939   }
4940 
4941   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4942   // Instead, executable region was allocated using VirtualAlloc(). See
4943   // pd_map_memory() above.
4944   //
4945   // The following flags should match the 'exec_access' flages used for
4946   // VirtualProtect() in pd_map_memory().
4947   if (mem_info.Protect == PAGE_EXECUTE_READ ||
4948       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4949     return pd_release_memory(addr, bytes);
4950   }
4951 
4952   BOOL result = UnmapViewOfFile(addr);
4953   if (result == 0) {
4954     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
4955     return false;
4956   }
4957   return true;
4958 }
4959 
4960 void os::pause() {
4961   char filename[MAX_PATH];
4962   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4963     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4964   } else {
4965     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4966   }
4967 
4968   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4969   if (fd != -1) {
4970     struct stat buf;
4971     ::close(fd);
4972     while (::stat(filename, &buf) == 0) {
4973       Sleep(100);
4974     }
4975   } else {
4976     jio_fprintf(stderr,
4977                 "Could not open pause file '%s', continuing immediately.\n", filename);
4978   }
4979 }
4980 
4981 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4982   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4983 }
4984 
4985 // See the caveats for this class in os_windows.hpp
4986 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4987 // into this method and returns false. If no OS EXCEPTION was raised, returns
4988 // true.
4989 // The callback is supposed to provide the method that should be protected.
4990 //
4991 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4992   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4993   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4994          "crash_protection already set?");
4995 
4996   bool success = true;
4997   __try {
4998     WatcherThread::watcher_thread()->set_crash_protection(this);
4999     cb.call();
5000   } __except(EXCEPTION_EXECUTE_HANDLER) {
5001     // only for protection, nothing to do
5002     success = false;
5003   }
5004   WatcherThread::watcher_thread()->set_crash_protection(NULL);
5005   return success;
5006 }
5007 
5008 // An Event wraps a win32 "CreateEvent" kernel handle.
5009 //
5010 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5011 //
5012 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5013 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5014 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5015 //     In addition, an unpark() operation might fetch the handle field, but the
5016 //     event could recycle between the fetch and the SetEvent() operation.
5017 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5018 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5019 //     on an stale but recycled handle would be harmless, but in practice this might
5020 //     confuse other non-Sun code, so it's not a viable approach.
5021 //
5022 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5023 //     with the Event.  The event handle is never closed.  This could be construed
5024 //     as handle leakage, but only up to the maximum # of threads that have been extant
5025 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5026 //     permit a process to have hundreds of thousands of open handles.
5027 //
5028 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5029 //     and release unused handles.
5030 //
5031 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5032 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5033 //
5034 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5035 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5036 //
5037 // We use (2).
5038 //
5039 // TODO-FIXME:
5040 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5041 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5042 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5043 // 3.  Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
5044 //     into a single win32 CreateEvent() handle.
5045 //
5046 // Assumption:
5047 //    Only one parker can exist on an event, which is why we allocate
5048 //    them per-thread. Multiple unparkers can coexist.
5049 //
5050 // _Event transitions in park()
5051 //   -1 => -1 : illegal
5052 //    1 =>  0 : pass - return immediately
5053 //    0 => -1 : block; then set _Event to 0 before returning
5054 //
5055 // _Event transitions in unpark()
5056 //    0 => 1 : just return
5057 //    1 => 1 : just return
5058 //   -1 => either 0 or 1; must signal target thread
5059 //         That is, we can safely transition _Event from -1 to either
5060 //         0 or 1.
5061 //
5062 // _Event serves as a restricted-range semaphore.
5063 //   -1 : thread is blocked, i.e. there is a waiter
5064 //    0 : neutral: thread is running or ready,
5065 //        could have been signaled after a wait started
5066 //    1 : signaled - thread is running or ready
5067 //
5068 // Another possible encoding of _Event would be with
5069 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5070 //
5071 
5072 int os::PlatformEvent::park(jlong Millis) {
5073   // Transitions for _Event:
5074   //   -1 => -1 : illegal
5075   //    1 =>  0 : pass - return immediately
5076   //    0 => -1 : block; then set _Event to 0 before returning
5077 
5078   guarantee(_ParkHandle != NULL , "Invariant");
5079   guarantee(Millis > 0          , "Invariant");
5080 
5081   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5082   // the initial park() operation.
5083   // Consider: use atomic decrement instead of CAS-loop
5084 
5085   int v;
5086   for (;;) {
5087     v = _Event;
5088     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5089   }
5090   guarantee((v == 0) || (v == 1), "invariant");
5091   if (v != 0) return OS_OK;
5092 
5093   // Do this the hard way by blocking ...
5094   // TODO: consider a brief spin here, gated on the success of recent
5095   // spin attempts by this thread.
5096   //
5097   // We decompose long timeouts into series of shorter timed waits.
5098   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5099   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5100   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5101   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5102   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5103   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5104   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5105   // for the already waited time.  This policy does not admit any new outcomes.
5106   // In the future, however, we might want to track the accumulated wait time and
5107   // adjust Millis accordingly if we encounter a spurious wakeup.
5108 
5109   const int MAXTIMEOUT = 0x10000000;
5110   DWORD rv = WAIT_TIMEOUT;
5111   while (_Event < 0 && Millis > 0) {
5112     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5113     if (Millis > MAXTIMEOUT) {
5114       prd = MAXTIMEOUT;
5115     }
5116     rv = ::WaitForSingleObject(_ParkHandle, prd);
5117     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5118     if (rv == WAIT_TIMEOUT) {
5119       Millis -= prd;
5120     }
5121   }
5122   v = _Event;
5123   _Event = 0;
5124   // see comment at end of os::PlatformEvent::park() below:
5125   OrderAccess::fence();
5126   // If we encounter a nearly simultanous timeout expiry and unpark()
5127   // we return OS_OK indicating we awoke via unpark().
5128   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5129   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5130 }
5131 
5132 void os::PlatformEvent::park() {
5133   // Transitions for _Event:
5134   //   -1 => -1 : illegal
5135   //    1 =>  0 : pass - return immediately
5136   //    0 => -1 : block; then set _Event to 0 before returning
5137 
5138   guarantee(_ParkHandle != NULL, "Invariant");
5139   // Invariant: Only the thread associated with the Event/PlatformEvent
5140   // may call park().
5141   // Consider: use atomic decrement instead of CAS-loop
5142   int v;
5143   for (;;) {
5144     v = _Event;
5145     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5146   }
5147   guarantee((v == 0) || (v == 1), "invariant");
5148   if (v != 0) return;
5149 
5150   // Do this the hard way by blocking ...
5151   // TODO: consider a brief spin here, gated on the success of recent
5152   // spin attempts by this thread.
5153   while (_Event < 0) {
5154     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5155     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5156   }
5157 
5158   // Usually we'll find _Event == 0 at this point, but as
5159   // an optional optimization we clear it, just in case can
5160   // multiple unpark() operations drove _Event up to 1.
5161   _Event = 0;
5162   OrderAccess::fence();
5163   guarantee(_Event >= 0, "invariant");
5164 }
5165 
5166 void os::PlatformEvent::unpark() {
5167   guarantee(_ParkHandle != NULL, "Invariant");
5168 
5169   // Transitions for _Event:
5170   //    0 => 1 : just return
5171   //    1 => 1 : just return
5172   //   -1 => either 0 or 1; must signal target thread
5173   //         That is, we can safely transition _Event from -1 to either
5174   //         0 or 1.
5175   // See also: "Semaphores in Plan 9" by Mullender & Cox
5176   //
5177   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5178   // that it will take two back-to-back park() calls for the owning
5179   // thread to block. This has the benefit of forcing a spurious return
5180   // from the first park() call after an unpark() call which will help
5181   // shake out uses of park() and unpark() without condition variables.
5182 
5183   if (Atomic::xchg(1, &_Event) >= 0) return;
5184 
5185   ::SetEvent(_ParkHandle);
5186 }
5187 
5188 
5189 // JSR166
5190 // -------------------------------------------------------
5191 
5192 // The Windows implementation of Park is very straightforward: Basic
5193 // operations on Win32 Events turn out to have the right semantics to
5194 // use them directly. We opportunistically resuse the event inherited
5195 // from Monitor.
5196 
5197 void Parker::park(bool isAbsolute, jlong time) {
5198   guarantee(_ParkEvent != NULL, "invariant");
5199   // First, demultiplex/decode time arguments
5200   if (time < 0) { // don't wait
5201     return;
5202   } else if (time == 0 && !isAbsolute) {
5203     time = INFINITE;
5204   } else if (isAbsolute) {
5205     time -= os::javaTimeMillis(); // convert to relative time
5206     if (time <= 0) {  // already elapsed
5207       return;
5208     }
5209   } else { // relative
5210     time /= 1000000;  // Must coarsen from nanos to millis
5211     if (time == 0) {  // Wait for the minimal time unit if zero
5212       time = 1;
5213     }
5214   }
5215 
5216   JavaThread* thread = JavaThread::current();
5217 
5218   // Don't wait if interrupted or already triggered
5219   if (Thread::is_interrupted(thread, false) ||
5220       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5221     ResetEvent(_ParkEvent);
5222     return;
5223   } else {
5224     ThreadBlockInVM tbivm(thread);
5225     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5226     thread->set_suspend_equivalent();
5227 
5228     WaitForSingleObject(_ParkEvent, time);
5229     ResetEvent(_ParkEvent);
5230 
5231     // If externally suspended while waiting, re-suspend
5232     if (thread->handle_special_suspend_equivalent_condition()) {
5233       thread->java_suspend_self();
5234     }
5235   }
5236 }
5237 
5238 void Parker::unpark() {
5239   guarantee(_ParkEvent != NULL, "invariant");
5240   SetEvent(_ParkEvent);
5241 }
5242 
5243 // Run the specified command in a separate process. Return its exit value,
5244 // or -1 on failure (e.g. can't create a new process).
5245 int os::fork_and_exec(char* cmd) {
5246   STARTUPINFO si;
5247   PROCESS_INFORMATION pi;
5248 
5249   memset(&si, 0, sizeof(si));
5250   si.cb = sizeof(si);
5251   memset(&pi, 0, sizeof(pi));
5252   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5253                             cmd,    // command line
5254                             NULL,   // process security attribute
5255                             NULL,   // thread security attribute
5256                             TRUE,   // inherits system handles
5257                             0,      // no creation flags
5258                             NULL,   // use parent's environment block
5259                             NULL,   // use parent's starting directory
5260                             &si,    // (in) startup information
5261                             &pi);   // (out) process information
5262 
5263   if (rslt) {
5264     // Wait until child process exits.
5265     WaitForSingleObject(pi.hProcess, INFINITE);
5266 
5267     DWORD exit_code;
5268     GetExitCodeProcess(pi.hProcess, &exit_code);
5269 
5270     // Close process and thread handles.
5271     CloseHandle(pi.hProcess);
5272     CloseHandle(pi.hThread);
5273 
5274     return (int)exit_code;
5275   } else {
5276     return -1;
5277   }
5278 }
5279 
5280 bool os::find(address addr, outputStream* st) {
5281   int offset = -1;
5282   bool result = false;
5283   char buf[256];
5284   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5285     st->print(PTR_FORMAT " ", addr);
5286     if (strlen(buf) < sizeof(buf) - 1) {
5287       char* p = strrchr(buf, '\\');
5288       if (p) {
5289         st->print("%s", p + 1);
5290       } else {
5291         st->print("%s", buf);
5292       }
5293     } else {
5294         // The library name is probably truncated. Let's omit the library name.
5295         // See also JDK-8147512.
5296     }
5297     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5298       st->print("::%s + 0x%x", buf, offset);
5299     }
5300     st->cr();
5301     result = true;
5302   }
5303   return result;
5304 }
5305 
5306 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5307   DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5308 
5309   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
5310     JavaThread* thread = JavaThread::current();
5311     PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5312     address addr = (address) exceptionRecord->ExceptionInformation[1];
5313 
5314     if (os::is_memory_serialize_page(thread, addr)) {
5315       return EXCEPTION_CONTINUE_EXECUTION;
5316     }
5317   }
5318 
5319   return EXCEPTION_CONTINUE_SEARCH;
5320 }
5321 
5322 // We don't build a headless jre for Windows
5323 bool os::is_headless_jre() { return false; }
5324 
5325 static jint initSock() {
5326   WSADATA wsadata;
5327 
5328   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5329     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5330                 ::GetLastError());
5331     return JNI_ERR;
5332   }
5333   return JNI_OK;
5334 }
5335 
5336 struct hostent* os::get_host_by_name(char* name) {
5337   return (struct hostent*)gethostbyname(name);
5338 }
5339 
5340 int os::socket_close(int fd) {
5341   return ::closesocket(fd);
5342 }
5343 
5344 int os::socket(int domain, int type, int protocol) {
5345   return ::socket(domain, type, protocol);
5346 }
5347 
5348 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5349   return ::connect(fd, him, len);
5350 }
5351 
5352 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5353   return ::recv(fd, buf, (int)nBytes, flags);
5354 }
5355 
5356 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5357   return ::send(fd, buf, (int)nBytes, flags);
5358 }
5359 
5360 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5361   return ::send(fd, buf, (int)nBytes, flags);
5362 }
5363 
5364 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5365 #if defined(IA32)
5366   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5367 #elif defined (AMD64)
5368   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5369 #endif
5370 
5371 // returns true if thread could be suspended,
5372 // false otherwise
5373 static bool do_suspend(HANDLE* h) {
5374   if (h != NULL) {
5375     if (SuspendThread(*h) != ~0) {
5376       return true;
5377     }
5378   }
5379   return false;
5380 }
5381 
5382 // resume the thread
5383 // calling resume on an active thread is a no-op
5384 static void do_resume(HANDLE* h) {
5385   if (h != NULL) {
5386     ResumeThread(*h);
5387   }
5388 }
5389 
5390 // retrieve a suspend/resume context capable handle
5391 // from the tid. Caller validates handle return value.
5392 void get_thread_handle_for_extended_context(HANDLE* h,
5393                                             OSThread::thread_id_t tid) {
5394   if (h != NULL) {
5395     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5396   }
5397 }
5398 
5399 // Thread sampling implementation
5400 //
5401 void os::SuspendedThreadTask::internal_do_task() {
5402   CONTEXT    ctxt;
5403   HANDLE     h = NULL;
5404 
5405   // get context capable handle for thread
5406   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5407 
5408   // sanity
5409   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5410     return;
5411   }
5412 
5413   // suspend the thread
5414   if (do_suspend(&h)) {
5415     ctxt.ContextFlags = sampling_context_flags;
5416     // get thread context
5417     GetThreadContext(h, &ctxt);
5418     SuspendedThreadTaskContext context(_thread, &ctxt);
5419     // pass context to Thread Sampling impl
5420     do_task(context);
5421     // resume thread
5422     do_resume(&h);
5423   }
5424 
5425   // close handle
5426   CloseHandle(h);
5427 }
5428 
5429 bool os::start_debugging(char *buf, int buflen) {
5430   int len = (int)strlen(buf);
5431   char *p = &buf[len];
5432 
5433   jio_snprintf(p, buflen-len,
5434              "\n\n"
5435              "Do you want to debug the problem?\n\n"
5436              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5437              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5438              "Otherwise, select 'No' to abort...",
5439              os::current_process_id(), os::current_thread_id());
5440 
5441   bool yes = os::message_box("Unexpected Error", buf);
5442 
5443   if (yes) {
5444     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5445     // exception. If VM is running inside a debugger, the debugger will
5446     // catch the exception. Otherwise, the breakpoint exception will reach
5447     // the default windows exception handler, which can spawn a debugger and
5448     // automatically attach to the dying VM.
5449     os::breakpoint();
5450     yes = false;
5451   }
5452   return yes;
5453 }
5454 
5455 void* os::get_default_process_handle() {
5456   return (void*)GetModuleHandle(NULL);
5457 }
5458 
5459 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5460 // which is used to find statically linked in agents.
5461 // Additionally for windows, takes into account __stdcall names.
5462 // Parameters:
5463 //            sym_name: Symbol in library we are looking for
5464 //            lib_name: Name of library to look in, NULL for shared libs.
5465 //            is_absolute_path == true if lib_name is absolute path to agent
5466 //                                     such as "C:/a/b/L.dll"
5467 //            == false if only the base name of the library is passed in
5468 //               such as "L"
5469 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5470                                     bool is_absolute_path) {
5471   char *agent_entry_name;
5472   size_t len;
5473   size_t name_len;
5474   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5475   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5476   const char *start;
5477 
5478   if (lib_name != NULL) {
5479     len = name_len = strlen(lib_name);
5480     if (is_absolute_path) {
5481       // Need to strip path, prefix and suffix
5482       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5483         lib_name = ++start;
5484       } else {
5485         // Need to check for drive prefix
5486         if ((start = strchr(lib_name, ':')) != NULL) {
5487           lib_name = ++start;
5488         }
5489       }
5490       if (len <= (prefix_len + suffix_len)) {
5491         return NULL;
5492       }
5493       lib_name += prefix_len;
5494       name_len = strlen(lib_name) - suffix_len;
5495     }
5496   }
5497   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5498   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5499   if (agent_entry_name == NULL) {
5500     return NULL;
5501   }
5502   if (lib_name != NULL) {
5503     const char *p = strrchr(sym_name, '@');
5504     if (p != NULL && p != sym_name) {
5505       // sym_name == _Agent_OnLoad@XX
5506       strncpy(agent_entry_name, sym_name, (p - sym_name));
5507       agent_entry_name[(p-sym_name)] = '\0';
5508       // agent_entry_name == _Agent_OnLoad
5509       strcat(agent_entry_name, "_");
5510       strncat(agent_entry_name, lib_name, name_len);
5511       strcat(agent_entry_name, p);
5512       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5513     } else {
5514       strcpy(agent_entry_name, sym_name);
5515       strcat(agent_entry_name, "_");
5516       strncat(agent_entry_name, lib_name, name_len);
5517     }
5518   } else {
5519     strcpy(agent_entry_name, sym_name);
5520   }
5521   return agent_entry_name;
5522 }
5523 
5524 #ifndef PRODUCT
5525 
5526 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5527 // contiguous memory block at a particular address.
5528 // The test first tries to find a good approximate address to allocate at by using the same
5529 // method to allocate some memory at any address. The test then tries to allocate memory in
5530 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5531 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5532 // the previously allocated memory is available for allocation. The only actual failure
5533 // that is reported is when the test tries to allocate at a particular location but gets a
5534 // different valid one. A NULL return value at this point is not considered an error but may
5535 // be legitimate.
5536 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5537 void TestReserveMemorySpecial_test() {
5538   if (!UseLargePages) {
5539     if (VerboseInternalVMTests) {
5540       tty->print("Skipping test because large pages are disabled");
5541     }
5542     return;
5543   }
5544   // save current value of globals
5545   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5546   bool old_use_numa_interleaving = UseNUMAInterleaving;
5547 
5548   // set globals to make sure we hit the correct code path
5549   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5550 
5551   // do an allocation at an address selected by the OS to get a good one.
5552   const size_t large_allocation_size = os::large_page_size() * 4;
5553   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5554   if (result == NULL) {
5555     if (VerboseInternalVMTests) {
5556       tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5557                           large_allocation_size);
5558     }
5559   } else {
5560     os::release_memory_special(result, large_allocation_size);
5561 
5562     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5563     // we managed to get it once.
5564     const size_t expected_allocation_size = os::large_page_size();
5565     char* expected_location = result + os::large_page_size();
5566     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5567     if (actual_location == NULL) {
5568       if (VerboseInternalVMTests) {
5569         tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5570                             expected_location, large_allocation_size);
5571       }
5572     } else {
5573       // release memory
5574       os::release_memory_special(actual_location, expected_allocation_size);
5575       // only now check, after releasing any memory to avoid any leaks.
5576       assert(actual_location == expected_location,
5577              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5578              expected_location, expected_allocation_size, actual_location);
5579     }
5580   }
5581 
5582   // restore globals
5583   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5584   UseNUMAInterleaving = old_use_numa_interleaving;
5585 }
5586 #endif // PRODUCT
5587 
5588 /*
5589   All the defined signal names for Windows.
5590 
5591   NOTE that not all of these names are accepted by FindSignal!
5592 
5593   For various reasons some of these may be rejected at runtime.
5594 
5595   Here are the names currently accepted by a user of sun.misc.Signal with
5596   1.4.1 (ignoring potential interaction with use of chaining, etc):
5597 
5598      (LIST TBD)
5599 
5600 */
5601 int os::get_signal_number(const char* name) {
5602   static const struct {
5603     char* name;
5604     int   number;
5605   } siglabels [] =
5606     // derived from version 6.0 VC98/include/signal.h
5607   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5608   "FPE",        SIGFPE,         // floating point exception
5609   "SEGV",       SIGSEGV,        // segment violation
5610   "INT",        SIGINT,         // interrupt
5611   "TERM",       SIGTERM,        // software term signal from kill
5612   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5613   "ILL",        SIGILL};        // illegal instruction
5614   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5615     if (strcmp(name, siglabels[i].name) == 0) {
5616       return siglabels[i].number;
5617     }
5618   }
5619   return -1;
5620 }
5621 
5622 // Fast current thread access
5623 
5624 int os::win32::_thread_ptr_offset = 0;
5625 
5626 static void call_wrapper_dummy() {}
5627 
5628 // We need to call the os_exception_wrapper once so that it sets
5629 // up the offset from FS of the thread pointer.
5630 void os::win32::initialize_thread_ptr_offset() {
5631   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5632                            NULL, NULL, NULL, NULL);
5633 }