1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "classfile/classLoader.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/disassembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "jvm_windows.h"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/filemap.hpp"
  41 #include "mutex_windows.inline.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm.h"
  47 #include "prims/jvm_misc.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/atomic.inline.hpp"
  50 #include "runtime/extendedPC.hpp"
  51 #include "runtime/globals.hpp"
  52 #include "runtime/interfaceSupport.hpp"
  53 #include "runtime/java.hpp"
  54 #include "runtime/javaCalls.hpp"
  55 #include "runtime/mutexLocker.hpp"
  56 #include "runtime/objectMonitor.hpp"
  57 #include "runtime/orderAccess.inline.hpp"
  58 #include "runtime/osThread.hpp"
  59 #include "runtime/perfMemory.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/statSampler.hpp"
  62 #include "runtime/stubRoutines.hpp"
  63 #include "runtime/thread.inline.hpp"
  64 #include "runtime/threadCritical.hpp"
  65 #include "runtime/timer.hpp"
  66 #include "runtime/vm_version.hpp"
  67 #include "semaphore_windows.hpp"
  68 #include "services/attachListener.hpp"
  69 #include "services/memTracker.hpp"
  70 #include "services/runtimeService.hpp"
  71 #include "utilities/decoder.hpp"
  72 #include "utilities/defaultStream.hpp"
  73 #include "utilities/events.hpp"
  74 #include "utilities/growableArray.hpp"
  75 #include "utilities/macros.hpp"
  76 #include "utilities/vmError.hpp"
  77 
  78 #ifdef _DEBUG
  79 #include <crtdbg.h>
  80 #endif
  81 
  82 
  83 #include <windows.h>
  84 #include <sys/types.h>
  85 #include <sys/stat.h>
  86 #include <sys/timeb.h>
  87 #include <objidl.h>
  88 #include <shlobj.h>
  89 
  90 #include <malloc.h>
  91 #include <signal.h>
  92 #include <direct.h>
  93 #include <errno.h>
  94 #include <fcntl.h>
  95 #include <io.h>
  96 #include <process.h>              // For _beginthreadex(), _endthreadex()
  97 #include <imagehlp.h>             // For os::dll_address_to_function_name
  98 // for enumerating dll libraries
  99 #include <vdmdbg.h>
 100 
 101 // for timer info max values which include all bits
 102 #define ALL_64_BITS CONST64(-1)
 103 
 104 // For DLL loading/load error detection
 105 // Values of PE COFF
 106 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 107 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 108 
 109 static HANDLE main_process;
 110 static HANDLE main_thread;
 111 static int    main_thread_id;
 112 
 113 static FILETIME process_creation_time;
 114 static FILETIME process_exit_time;
 115 static FILETIME process_user_time;
 116 static FILETIME process_kernel_time;
 117 
 118 #ifdef _M_IA64
 119   #define __CPU__ ia64
 120 #else
 121   #ifdef _M_AMD64
 122     #define __CPU__ amd64
 123   #else
 124     #define __CPU__ i486
 125   #endif
 126 #endif
 127 
 128 // save DLL module handle, used by GetModuleFileName
 129 
 130 HINSTANCE vm_lib_handle;
 131 
 132 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 133   switch (reason) {
 134   case DLL_PROCESS_ATTACH:
 135     vm_lib_handle = hinst;
 136     if (ForceTimeHighResolution) {
 137       timeBeginPeriod(1L);
 138     }
 139     break;
 140   case DLL_PROCESS_DETACH:
 141     if (ForceTimeHighResolution) {
 142       timeEndPeriod(1L);
 143     }
 144     break;
 145   default:
 146     break;
 147   }
 148   return true;
 149 }
 150 
 151 static inline double fileTimeAsDouble(FILETIME* time) {
 152   const double high  = (double) ((unsigned int) ~0);
 153   const double split = 10000000.0;
 154   double result = (time->dwLowDateTime / split) +
 155                    time->dwHighDateTime * (high/split);
 156   return result;
 157 }
 158 
 159 // Implementation of os
 160 
 161 bool os::unsetenv(const char* name) {
 162   assert(name != NULL, "Null pointer");
 163   return (SetEnvironmentVariable(name, NULL) == TRUE);
 164 }
 165 
 166 // No setuid programs under Windows.
 167 bool os::have_special_privileges() {
 168   return false;
 169 }
 170 
 171 
 172 // This method is  a periodic task to check for misbehaving JNI applications
 173 // under CheckJNI, we can add any periodic checks here.
 174 // For Windows at the moment does nothing
 175 void os::run_periodic_checks() {
 176   return;
 177 }
 178 
 179 // previous UnhandledExceptionFilter, if there is one
 180 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 181 
 182 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 183 
 184 void os::init_system_properties_values() {
 185   // sysclasspath, java_home, dll_dir
 186   {
 187     char *home_path;
 188     char *dll_path;
 189     char *pslash;
 190     char *bin = "\\bin";
 191     char home_dir[MAX_PATH + 1];
 192     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 193 
 194     if (alt_home_dir != NULL)  {
 195       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 196       home_dir[MAX_PATH] = '\0';
 197     } else {
 198       os::jvm_path(home_dir, sizeof(home_dir));
 199       // Found the full path to jvm.dll.
 200       // Now cut the path to <java_home>/jre if we can.
 201       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 202       pslash = strrchr(home_dir, '\\');
 203       if (pslash != NULL) {
 204         *pslash = '\0';                   // get rid of \{client|server}
 205         pslash = strrchr(home_dir, '\\');
 206         if (pslash != NULL) {
 207           *pslash = '\0';                 // get rid of \bin
 208         }
 209       }
 210     }
 211 
 212     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 213     if (home_path == NULL) {
 214       return;
 215     }
 216     strcpy(home_path, home_dir);
 217     Arguments::set_java_home(home_path);
 218     FREE_C_HEAP_ARRAY(char, home_path);
 219 
 220     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 221                                 mtInternal);
 222     if (dll_path == NULL) {
 223       return;
 224     }
 225     strcpy(dll_path, home_dir);
 226     strcat(dll_path, bin);
 227     Arguments::set_dll_dir(dll_path);
 228     FREE_C_HEAP_ARRAY(char, dll_path);
 229 
 230     if (!set_boot_path('\\', ';')) {
 231       return;
 232     }
 233   }
 234 
 235 // library_path
 236 #define EXT_DIR "\\lib\\ext"
 237 #define BIN_DIR "\\bin"
 238 #define PACKAGE_DIR "\\Sun\\Java"
 239   {
 240     // Win32 library search order (See the documentation for LoadLibrary):
 241     //
 242     // 1. The directory from which application is loaded.
 243     // 2. The system wide Java Extensions directory (Java only)
 244     // 3. System directory (GetSystemDirectory)
 245     // 4. Windows directory (GetWindowsDirectory)
 246     // 5. The PATH environment variable
 247     // 6. The current directory
 248 
 249     char *library_path;
 250     char tmp[MAX_PATH];
 251     char *path_str = ::getenv("PATH");
 252 
 253     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 254                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 255 
 256     library_path[0] = '\0';
 257 
 258     GetModuleFileName(NULL, tmp, sizeof(tmp));
 259     *(strrchr(tmp, '\\')) = '\0';
 260     strcat(library_path, tmp);
 261 
 262     GetWindowsDirectory(tmp, sizeof(tmp));
 263     strcat(library_path, ";");
 264     strcat(library_path, tmp);
 265     strcat(library_path, PACKAGE_DIR BIN_DIR);
 266 
 267     GetSystemDirectory(tmp, sizeof(tmp));
 268     strcat(library_path, ";");
 269     strcat(library_path, tmp);
 270 
 271     GetWindowsDirectory(tmp, sizeof(tmp));
 272     strcat(library_path, ";");
 273     strcat(library_path, tmp);
 274 
 275     if (path_str) {
 276       strcat(library_path, ";");
 277       strcat(library_path, path_str);
 278     }
 279 
 280     strcat(library_path, ";.");
 281 
 282     Arguments::set_library_path(library_path);
 283     FREE_C_HEAP_ARRAY(char, library_path);
 284   }
 285 
 286   // Default extensions directory
 287   {
 288     char path[MAX_PATH];
 289     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 290     GetWindowsDirectory(path, MAX_PATH);
 291     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 292             path, PACKAGE_DIR, EXT_DIR);
 293     Arguments::set_ext_dirs(buf);
 294   }
 295   #undef EXT_DIR
 296   #undef BIN_DIR
 297   #undef PACKAGE_DIR
 298 
 299 #ifndef _WIN64
 300   // set our UnhandledExceptionFilter and save any previous one
 301   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 302 #endif
 303 
 304   // Done
 305   return;
 306 }
 307 
 308 void os::breakpoint() {
 309   DebugBreak();
 310 }
 311 
 312 // Invoked from the BREAKPOINT Macro
 313 extern "C" void breakpoint() {
 314   os::breakpoint();
 315 }
 316 
 317 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 318 // So far, this method is only used by Native Memory Tracking, which is
 319 // only supported on Windows XP or later.
 320 //
 321 int os::get_native_stack(address* stack, int frames, int toSkip) {
 322 #ifdef _NMT_NOINLINE_
 323   toSkip++;
 324 #endif
 325   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 326   for (int index = captured; index < frames; index ++) {
 327     stack[index] = NULL;
 328   }
 329   return captured;
 330 }
 331 
 332 
 333 // os::current_stack_base()
 334 //
 335 //   Returns the base of the stack, which is the stack's
 336 //   starting address.  This function must be called
 337 //   while running on the stack of the thread being queried.
 338 
 339 address os::current_stack_base() {
 340   MEMORY_BASIC_INFORMATION minfo;
 341   address stack_bottom;
 342   size_t stack_size;
 343 
 344   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 345   stack_bottom =  (address)minfo.AllocationBase;
 346   stack_size = minfo.RegionSize;
 347 
 348   // Add up the sizes of all the regions with the same
 349   // AllocationBase.
 350   while (1) {
 351     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 352     if (stack_bottom == (address)minfo.AllocationBase) {
 353       stack_size += minfo.RegionSize;
 354     } else {
 355       break;
 356     }
 357   }
 358 
 359 #ifdef _M_IA64
 360   // IA64 has memory and register stacks
 361   //
 362   // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
 363   // at thread creation (1MB backing store growing upwards, 1MB memory stack
 364   // growing downwards, 2MB summed up)
 365   //
 366   // ...
 367   // ------- top of stack (high address) -----
 368   // |
 369   // |      1MB
 370   // |      Backing Store (Register Stack)
 371   // |
 372   // |         / \
 373   // |          |
 374   // |          |
 375   // |          |
 376   // ------------------------ stack base -----
 377   // |      1MB
 378   // |      Memory Stack
 379   // |
 380   // |          |
 381   // |          |
 382   // |          |
 383   // |         \ /
 384   // |
 385   // ----- bottom of stack (low address) -----
 386   // ...
 387 
 388   stack_size = stack_size / 2;
 389 #endif
 390   return stack_bottom + stack_size;
 391 }
 392 
 393 size_t os::current_stack_size() {
 394   size_t sz;
 395   MEMORY_BASIC_INFORMATION minfo;
 396   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 397   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 398   return sz;
 399 }
 400 
 401 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 402   const struct tm* time_struct_ptr = localtime(clock);
 403   if (time_struct_ptr != NULL) {
 404     *res = *time_struct_ptr;
 405     return res;
 406   }
 407   return NULL;
 408 }
 409 
 410 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 411 
 412 // Thread start routine for all newly created threads
 413 static unsigned __stdcall thread_native_entry(Thread* thread) {
 414   // Try to randomize the cache line index of hot stack frames.
 415   // This helps when threads of the same stack traces evict each other's
 416   // cache lines. The threads can be either from the same JVM instance, or
 417   // from different JVM instances. The benefit is especially true for
 418   // processors with hyperthreading technology.
 419   static int counter = 0;
 420   int pid = os::current_process_id();
 421   _alloca(((pid ^ counter++) & 7) * 128);
 422 
 423   thread->initialize_thread_current();
 424 
 425   OSThread* osthr = thread->osthread();
 426   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 427 
 428   if (UseNUMA) {
 429     int lgrp_id = os::numa_get_group_id();
 430     if (lgrp_id != -1) {
 431       thread->set_lgrp_id(lgrp_id);
 432     }
 433   }
 434 
 435   // Diagnostic code to investigate JDK-6573254
 436   int res = 30115;  // non-java thread
 437   if (thread->is_Java_thread()) {
 438     res = 20115;    // java thread
 439   }
 440 
 441   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 442 
 443   // Install a win32 structured exception handler around every thread created
 444   // by VM, so VM can generate error dump when an exception occurred in non-
 445   // Java thread (e.g. VM thread).
 446   __try {
 447     thread->run();
 448   } __except(topLevelExceptionFilter(
 449                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 450     // Nothing to do.
 451   }
 452 
 453   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 454 
 455   // One less thread is executing
 456   // When the VMThread gets here, the main thread may have already exited
 457   // which frees the CodeHeap containing the Atomic::add code
 458   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 459     Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
 460   }
 461 
 462   // If a thread has not deleted itself ("delete this") as part of its
 463   // termination sequence, we have to ensure thread-local-storage is
 464   // cleared before we actually terminate. No threads should ever be
 465   // deleted asynchronously with respect to their termination.
 466   if (Thread::current_or_null_safe() != NULL) {
 467     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 468     thread->clear_thread_current();
 469   }
 470 
 471   // Thread must not return from exit_process_or_thread(), but if it does,
 472   // let it proceed to exit normally
 473   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 474 }
 475 
 476 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 477                                   int thread_id) {
 478   // Allocate the OSThread object
 479   OSThread* osthread = new OSThread(NULL, NULL);
 480   if (osthread == NULL) return NULL;
 481 
 482   // Initialize support for Java interrupts
 483   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 484   if (interrupt_event == NULL) {
 485     delete osthread;
 486     return NULL;
 487   }
 488   osthread->set_interrupt_event(interrupt_event);
 489 
 490   // Store info on the Win32 thread into the OSThread
 491   osthread->set_thread_handle(thread_handle);
 492   osthread->set_thread_id(thread_id);
 493 
 494   if (UseNUMA) {
 495     int lgrp_id = os::numa_get_group_id();
 496     if (lgrp_id != -1) {
 497       thread->set_lgrp_id(lgrp_id);
 498     }
 499   }
 500 
 501   // Initial thread state is INITIALIZED, not SUSPENDED
 502   osthread->set_state(INITIALIZED);
 503 
 504   return osthread;
 505 }
 506 
 507 
 508 bool os::create_attached_thread(JavaThread* thread) {
 509 #ifdef ASSERT
 510   thread->verify_not_published();
 511 #endif
 512   HANDLE thread_h;
 513   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 514                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 515     fatal("DuplicateHandle failed\n");
 516   }
 517   OSThread* osthread = create_os_thread(thread, thread_h,
 518                                         (int)current_thread_id());
 519   if (osthread == NULL) {
 520     return false;
 521   }
 522 
 523   // Initial thread state is RUNNABLE
 524   osthread->set_state(RUNNABLE);
 525 
 526   thread->set_osthread(osthread);
 527 
 528   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 529     os::current_thread_id());
 530 
 531   return true;
 532 }
 533 
 534 bool os::create_main_thread(JavaThread* thread) {
 535 #ifdef ASSERT
 536   thread->verify_not_published();
 537 #endif
 538   if (_starting_thread == NULL) {
 539     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 540     if (_starting_thread == NULL) {
 541       return false;
 542     }
 543   }
 544 
 545   // The primordial thread is runnable from the start)
 546   _starting_thread->set_state(RUNNABLE);
 547 
 548   thread->set_osthread(_starting_thread);
 549   return true;
 550 }
 551 
 552 // Helper function to trace _beginthreadex attributes,
 553 //  similar to os::Posix::describe_pthread_attr()
 554 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 555                                                size_t stacksize, unsigned initflag) {
 556   stringStream ss(buf, buflen);
 557   if (stacksize == 0) {
 558     ss.print("stacksize: default, ");
 559   } else {
 560     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 561   }
 562   ss.print("flags: ");
 563   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 564   #define ALL(X) \
 565     X(CREATE_SUSPENDED) \
 566     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 567   ALL(PRINT_FLAG)
 568   #undef ALL
 569   #undef PRINT_FLAG
 570   return buf;
 571 }
 572 
 573 // Allocate and initialize a new OSThread
 574 bool os::create_thread(Thread* thread, ThreadType thr_type,
 575                        size_t stack_size) {
 576   unsigned thread_id;
 577 
 578   // Allocate the OSThread object
 579   OSThread* osthread = new OSThread(NULL, NULL);
 580   if (osthread == NULL) {
 581     return false;
 582   }
 583 
 584   // Initialize support for Java interrupts
 585   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 586   if (interrupt_event == NULL) {
 587     delete osthread;
 588     return NULL;
 589   }
 590   osthread->set_interrupt_event(interrupt_event);
 591   osthread->set_interrupted(false);
 592 
 593   thread->set_osthread(osthread);
 594 
 595   if (stack_size == 0) {
 596     switch (thr_type) {
 597     case os::java_thread:
 598       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 599       if (JavaThread::stack_size_at_create() > 0) {
 600         stack_size = JavaThread::stack_size_at_create();
 601       }
 602       break;
 603     case os::compiler_thread:
 604       if (CompilerThreadStackSize > 0) {
 605         stack_size = (size_t)(CompilerThreadStackSize * K);
 606         break;
 607       } // else fall through:
 608         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 609     case os::vm_thread:
 610     case os::pgc_thread:
 611     case os::cgc_thread:
 612     case os::watcher_thread:
 613       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 614       break;
 615     }
 616   }
 617 
 618   // Create the Win32 thread
 619   //
 620   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 621   // does not specify stack size. Instead, it specifies the size of
 622   // initially committed space. The stack size is determined by
 623   // PE header in the executable. If the committed "stack_size" is larger
 624   // than default value in the PE header, the stack is rounded up to the
 625   // nearest multiple of 1MB. For example if the launcher has default
 626   // stack size of 320k, specifying any size less than 320k does not
 627   // affect the actual stack size at all, it only affects the initial
 628   // commitment. On the other hand, specifying 'stack_size' larger than
 629   // default value may cause significant increase in memory usage, because
 630   // not only the stack space will be rounded up to MB, but also the
 631   // entire space is committed upfront.
 632   //
 633   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 634   // for CreateThread() that can treat 'stack_size' as stack size. However we
 635   // are not supposed to call CreateThread() directly according to MSDN
 636   // document because JVM uses C runtime library. The good news is that the
 637   // flag appears to work with _beginthredex() as well.
 638 
 639   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 640   HANDLE thread_handle =
 641     (HANDLE)_beginthreadex(NULL,
 642                            (unsigned)stack_size,
 643                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 644                            thread,
 645                            initflag,
 646                            &thread_id);
 647 
 648   char buf[64];
 649   if (thread_handle != NULL) {
 650     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 651       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 652   } else {
 653     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 654       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 655   }
 656 
 657   if (thread_handle == NULL) {
 658     // Need to clean up stuff we've allocated so far
 659     CloseHandle(osthread->interrupt_event());
 660     thread->set_osthread(NULL);
 661     delete osthread;
 662     return NULL;
 663   }
 664 
 665   Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
 666 
 667   // Store info on the Win32 thread into the OSThread
 668   osthread->set_thread_handle(thread_handle);
 669   osthread->set_thread_id(thread_id);
 670 
 671   // Initial thread state is INITIALIZED, not SUSPENDED
 672   osthread->set_state(INITIALIZED);
 673 
 674   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 675   return true;
 676 }
 677 
 678 
 679 // Free Win32 resources related to the OSThread
 680 void os::free_thread(OSThread* osthread) {
 681   assert(osthread != NULL, "osthread not set");
 682 
 683   // We are told to free resources of the argument thread,
 684   // but we can only really operate on the current thread.
 685   assert(Thread::current()->osthread() == osthread,
 686          "os::free_thread but not current thread");
 687 
 688   CloseHandle(osthread->thread_handle());
 689   CloseHandle(osthread->interrupt_event());
 690   delete osthread;
 691 }
 692 
 693 static jlong first_filetime;
 694 static jlong initial_performance_count;
 695 static jlong performance_frequency;
 696 
 697 
 698 jlong as_long(LARGE_INTEGER x) {
 699   jlong result = 0; // initialization to avoid warning
 700   set_high(&result, x.HighPart);
 701   set_low(&result, x.LowPart);
 702   return result;
 703 }
 704 
 705 
 706 jlong os::elapsed_counter() {
 707   LARGE_INTEGER count;
 708   QueryPerformanceCounter(&count);
 709   return as_long(count) - initial_performance_count;
 710 }
 711 
 712 
 713 jlong os::elapsed_frequency() {
 714   return performance_frequency;
 715 }
 716 
 717 
 718 julong os::available_memory() {
 719   return win32::available_memory();
 720 }
 721 
 722 julong os::win32::available_memory() {
 723   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 724   // value if total memory is larger than 4GB
 725   MEMORYSTATUSEX ms;
 726   ms.dwLength = sizeof(ms);
 727   GlobalMemoryStatusEx(&ms);
 728 
 729   return (julong)ms.ullAvailPhys;
 730 }
 731 
 732 julong os::physical_memory() {
 733   return win32::physical_memory();
 734 }
 735 
 736 bool os::has_allocatable_memory_limit(julong* limit) {
 737   MEMORYSTATUSEX ms;
 738   ms.dwLength = sizeof(ms);
 739   GlobalMemoryStatusEx(&ms);
 740 #ifdef _LP64
 741   *limit = (julong)ms.ullAvailVirtual;
 742   return true;
 743 #else
 744   // Limit to 1400m because of the 2gb address space wall
 745   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 746   return true;
 747 #endif
 748 }
 749 
 750 int os::active_processor_count() {
 751   DWORD_PTR lpProcessAffinityMask = 0;
 752   DWORD_PTR lpSystemAffinityMask = 0;
 753   int proc_count = processor_count();
 754   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 755       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 756     // Nof active processors is number of bits in process affinity mask
 757     int bitcount = 0;
 758     while (lpProcessAffinityMask != 0) {
 759       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 760       bitcount++;
 761     }
 762     return bitcount;
 763   } else {
 764     return proc_count;
 765   }
 766 }
 767 
 768 void os::set_native_thread_name(const char *name) {
 769 
 770   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 771   //
 772   // Note that unfortunately this only works if the process
 773   // is already attached to a debugger; debugger must observe
 774   // the exception below to show the correct name.
 775 
 776   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 777   struct {
 778     DWORD dwType;     // must be 0x1000
 779     LPCSTR szName;    // pointer to name (in user addr space)
 780     DWORD dwThreadID; // thread ID (-1=caller thread)
 781     DWORD dwFlags;    // reserved for future use, must be zero
 782   } info;
 783 
 784   info.dwType = 0x1000;
 785   info.szName = name;
 786   info.dwThreadID = -1;
 787   info.dwFlags = 0;
 788 
 789   __try {
 790     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 791   } __except(EXCEPTION_CONTINUE_EXECUTION) {}
 792 }
 793 
 794 bool os::distribute_processes(uint length, uint* distribution) {
 795   // Not yet implemented.
 796   return false;
 797 }
 798 
 799 bool os::bind_to_processor(uint processor_id) {
 800   // Not yet implemented.
 801   return false;
 802 }
 803 
 804 void os::win32::initialize_performance_counter() {
 805   LARGE_INTEGER count;
 806   QueryPerformanceFrequency(&count);
 807   performance_frequency = as_long(count);
 808   QueryPerformanceCounter(&count);
 809   initial_performance_count = as_long(count);
 810 }
 811 
 812 
 813 double os::elapsedTime() {
 814   return (double) elapsed_counter() / (double) elapsed_frequency();
 815 }
 816 
 817 
 818 // Windows format:
 819 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 820 // Java format:
 821 //   Java standards require the number of milliseconds since 1/1/1970
 822 
 823 // Constant offset - calculated using offset()
 824 static jlong  _offset   = 116444736000000000;
 825 // Fake time counter for reproducible results when debugging
 826 static jlong  fake_time = 0;
 827 
 828 #ifdef ASSERT
 829 // Just to be safe, recalculate the offset in debug mode
 830 static jlong _calculated_offset = 0;
 831 static int   _has_calculated_offset = 0;
 832 
 833 jlong offset() {
 834   if (_has_calculated_offset) return _calculated_offset;
 835   SYSTEMTIME java_origin;
 836   java_origin.wYear          = 1970;
 837   java_origin.wMonth         = 1;
 838   java_origin.wDayOfWeek     = 0; // ignored
 839   java_origin.wDay           = 1;
 840   java_origin.wHour          = 0;
 841   java_origin.wMinute        = 0;
 842   java_origin.wSecond        = 0;
 843   java_origin.wMilliseconds  = 0;
 844   FILETIME jot;
 845   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 846     fatal("Error = %d\nWindows error", GetLastError());
 847   }
 848   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 849   _has_calculated_offset = 1;
 850   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 851   return _calculated_offset;
 852 }
 853 #else
 854 jlong offset() {
 855   return _offset;
 856 }
 857 #endif
 858 
 859 jlong windows_to_java_time(FILETIME wt) {
 860   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 861   return (a - offset()) / 10000;
 862 }
 863 
 864 // Returns time ticks in (10th of micro seconds)
 865 jlong windows_to_time_ticks(FILETIME wt) {
 866   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 867   return (a - offset());
 868 }
 869 
 870 FILETIME java_to_windows_time(jlong l) {
 871   jlong a = (l * 10000) + offset();
 872   FILETIME result;
 873   result.dwHighDateTime = high(a);
 874   result.dwLowDateTime  = low(a);
 875   return result;
 876 }
 877 
 878 bool os::supports_vtime() { return true; }
 879 bool os::enable_vtime() { return false; }
 880 bool os::vtime_enabled() { return false; }
 881 
 882 double os::elapsedVTime() {
 883   FILETIME created;
 884   FILETIME exited;
 885   FILETIME kernel;
 886   FILETIME user;
 887   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 888     // the resolution of windows_to_java_time() should be sufficient (ms)
 889     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 890   } else {
 891     return elapsedTime();
 892   }
 893 }
 894 
 895 jlong os::javaTimeMillis() {
 896   if (UseFakeTimers) {
 897     return fake_time++;
 898   } else {
 899     FILETIME wt;
 900     GetSystemTimeAsFileTime(&wt);
 901     return windows_to_java_time(wt);
 902   }
 903 }
 904 
 905 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 906   FILETIME wt;
 907   GetSystemTimeAsFileTime(&wt);
 908   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 909   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 910   seconds = secs;
 911   nanos = jlong(ticks - (secs*10000000)) * 100;
 912 }
 913 
 914 jlong os::javaTimeNanos() {
 915     LARGE_INTEGER current_count;
 916     QueryPerformanceCounter(&current_count);
 917     double current = as_long(current_count);
 918     double freq = performance_frequency;
 919     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 920     return time;
 921 }
 922 
 923 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 924   jlong freq = performance_frequency;
 925   if (freq < NANOSECS_PER_SEC) {
 926     // the performance counter is 64 bits and we will
 927     // be multiplying it -- so no wrap in 64 bits
 928     info_ptr->max_value = ALL_64_BITS;
 929   } else if (freq > NANOSECS_PER_SEC) {
 930     // use the max value the counter can reach to
 931     // determine the max value which could be returned
 932     julong max_counter = (julong)ALL_64_BITS;
 933     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 934   } else {
 935     // the performance counter is 64 bits and we will
 936     // be using it directly -- so no wrap in 64 bits
 937     info_ptr->max_value = ALL_64_BITS;
 938   }
 939 
 940   // using a counter, so no skipping
 941   info_ptr->may_skip_backward = false;
 942   info_ptr->may_skip_forward = false;
 943 
 944   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 945 }
 946 
 947 char* os::local_time_string(char *buf, size_t buflen) {
 948   SYSTEMTIME st;
 949   GetLocalTime(&st);
 950   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 951                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 952   return buf;
 953 }
 954 
 955 bool os::getTimesSecs(double* process_real_time,
 956                       double* process_user_time,
 957                       double* process_system_time) {
 958   HANDLE h_process = GetCurrentProcess();
 959   FILETIME create_time, exit_time, kernel_time, user_time;
 960   BOOL result = GetProcessTimes(h_process,
 961                                 &create_time,
 962                                 &exit_time,
 963                                 &kernel_time,
 964                                 &user_time);
 965   if (result != 0) {
 966     FILETIME wt;
 967     GetSystemTimeAsFileTime(&wt);
 968     jlong rtc_millis = windows_to_java_time(wt);
 969     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 970     *process_user_time =
 971       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 972     *process_system_time =
 973       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
 974     return true;
 975   } else {
 976     return false;
 977   }
 978 }
 979 
 980 void os::shutdown() {
 981   // allow PerfMemory to attempt cleanup of any persistent resources
 982   perfMemory_exit();
 983 
 984   // flush buffered output, finish log files
 985   ostream_abort();
 986 
 987   // Check for abort hook
 988   abort_hook_t abort_hook = Arguments::abort_hook();
 989   if (abort_hook != NULL) {
 990     abort_hook();
 991   }
 992 }
 993 
 994 
 995 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
 996                                          PMINIDUMP_EXCEPTION_INFORMATION,
 997                                          PMINIDUMP_USER_STREAM_INFORMATION,
 998                                          PMINIDUMP_CALLBACK_INFORMATION);
 999 
1000 static HANDLE dumpFile = NULL;
1001 
1002 // Check if dump file can be created.
1003 void os::check_dump_limit(char* buffer, size_t buffsz) {
1004   bool status = true;
1005   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1006     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1007     status = false;
1008   }
1009 
1010 #ifndef ASSERT
1011   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1012     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1013     status = false;
1014   }
1015 #endif
1016 
1017   if (status) {
1018     const char* cwd = get_current_directory(NULL, 0);
1019     int pid = current_process_id();
1020     if (cwd != NULL) {
1021       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1022     } else {
1023       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1024     }
1025 
1026     if (dumpFile == NULL &&
1027        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1028                  == INVALID_HANDLE_VALUE) {
1029       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1030       status = false;
1031     }
1032   }
1033   VMError::record_coredump_status(buffer, status);
1034 }
1035 
1036 void os::abort(bool dump_core, void* siginfo, const void* context) {
1037   HINSTANCE dbghelp;
1038   EXCEPTION_POINTERS ep;
1039   MINIDUMP_EXCEPTION_INFORMATION mei;
1040   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1041 
1042   HANDLE hProcess = GetCurrentProcess();
1043   DWORD processId = GetCurrentProcessId();
1044   MINIDUMP_TYPE dumpType;
1045 
1046   shutdown();
1047   if (!dump_core || dumpFile == NULL) {
1048     if (dumpFile != NULL) {
1049       CloseHandle(dumpFile);
1050     }
1051     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1052   }
1053 
1054   dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
1055 
1056   if (dbghelp == NULL) {
1057     jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
1058     CloseHandle(dumpFile);
1059     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1060   }
1061 
1062   _MiniDumpWriteDump =
1063       CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1064                                     PMINIDUMP_EXCEPTION_INFORMATION,
1065                                     PMINIDUMP_USER_STREAM_INFORMATION,
1066                                     PMINIDUMP_CALLBACK_INFORMATION),
1067                                     GetProcAddress(dbghelp,
1068                                     "MiniDumpWriteDump"));
1069 
1070   if (_MiniDumpWriteDump == NULL) {
1071     jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
1072     CloseHandle(dumpFile);
1073     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1074   }
1075 
1076   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1077     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1078 
1079   if (siginfo != NULL && context != NULL) {
1080     ep.ContextRecord = (PCONTEXT) context;
1081     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1082 
1083     mei.ThreadId = GetCurrentThreadId();
1084     mei.ExceptionPointers = &ep;
1085     pmei = &mei;
1086   } else {
1087     pmei = NULL;
1088   }
1089 
1090   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1091   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1092   if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1093       _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1094     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1095   }
1096   CloseHandle(dumpFile);
1097   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1098 }
1099 
1100 // Die immediately, no exit hook, no abort hook, no cleanup.
1101 void os::die() {
1102   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1103 }
1104 
1105 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1106 //  * dirent_md.c       1.15 00/02/02
1107 //
1108 // The declarations for DIR and struct dirent are in jvm_win32.h.
1109 
1110 // Caller must have already run dirname through JVM_NativePath, which removes
1111 // duplicate slashes and converts all instances of '/' into '\\'.
1112 
1113 DIR * os::opendir(const char *dirname) {
1114   assert(dirname != NULL, "just checking");   // hotspot change
1115   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1116   DWORD fattr;                                // hotspot change
1117   char alt_dirname[4] = { 0, 0, 0, 0 };
1118 
1119   if (dirp == 0) {
1120     errno = ENOMEM;
1121     return 0;
1122   }
1123 
1124   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1125   // as a directory in FindFirstFile().  We detect this case here and
1126   // prepend the current drive name.
1127   //
1128   if (dirname[1] == '\0' && dirname[0] == '\\') {
1129     alt_dirname[0] = _getdrive() + 'A' - 1;
1130     alt_dirname[1] = ':';
1131     alt_dirname[2] = '\\';
1132     alt_dirname[3] = '\0';
1133     dirname = alt_dirname;
1134   }
1135 
1136   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1137   if (dirp->path == 0) {
1138     free(dirp);
1139     errno = ENOMEM;
1140     return 0;
1141   }
1142   strcpy(dirp->path, dirname);
1143 
1144   fattr = GetFileAttributes(dirp->path);
1145   if (fattr == 0xffffffff) {
1146     free(dirp->path);
1147     free(dirp);
1148     errno = ENOENT;
1149     return 0;
1150   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1151     free(dirp->path);
1152     free(dirp);
1153     errno = ENOTDIR;
1154     return 0;
1155   }
1156 
1157   // Append "*.*", or possibly "\\*.*", to path
1158   if (dirp->path[1] == ':' &&
1159       (dirp->path[2] == '\0' ||
1160       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1161     // No '\\' needed for cases like "Z:" or "Z:\"
1162     strcat(dirp->path, "*.*");
1163   } else {
1164     strcat(dirp->path, "\\*.*");
1165   }
1166 
1167   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1168   if (dirp->handle == INVALID_HANDLE_VALUE) {
1169     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1170       free(dirp->path);
1171       free(dirp);
1172       errno = EACCES;
1173       return 0;
1174     }
1175   }
1176   return dirp;
1177 }
1178 
1179 // parameter dbuf unused on Windows
1180 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) {
1181   assert(dirp != NULL, "just checking");      // hotspot change
1182   if (dirp->handle == INVALID_HANDLE_VALUE) {
1183     return 0;
1184   }
1185 
1186   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1187 
1188   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1189     if (GetLastError() == ERROR_INVALID_HANDLE) {
1190       errno = EBADF;
1191       return 0;
1192     }
1193     FindClose(dirp->handle);
1194     dirp->handle = INVALID_HANDLE_VALUE;
1195   }
1196 
1197   return &dirp->dirent;
1198 }
1199 
1200 int os::closedir(DIR *dirp) {
1201   assert(dirp != NULL, "just checking");      // hotspot change
1202   if (dirp->handle != INVALID_HANDLE_VALUE) {
1203     if (!FindClose(dirp->handle)) {
1204       errno = EBADF;
1205       return -1;
1206     }
1207     dirp->handle = INVALID_HANDLE_VALUE;
1208   }
1209   free(dirp->path);
1210   free(dirp);
1211   return 0;
1212 }
1213 
1214 // This must be hard coded because it's the system's temporary
1215 // directory not the java application's temp directory, ala java.io.tmpdir.
1216 const char* os::get_temp_directory() {
1217   static char path_buf[MAX_PATH];
1218   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1219     return path_buf;
1220   } else {
1221     path_buf[0] = '\0';
1222     return path_buf;
1223   }
1224 }
1225 
1226 static bool file_exists(const char* filename) {
1227   if (filename == NULL || strlen(filename) == 0) {
1228     return false;
1229   }
1230   return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1231 }
1232 
1233 bool os::dll_build_name(char *buffer, size_t buflen,
1234                         const char* pname, const char* fname) {
1235   bool retval = false;
1236   const size_t pnamelen = pname ? strlen(pname) : 0;
1237   const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1238 
1239   // Return error on buffer overflow.
1240   if (pnamelen + strlen(fname) + 10 > buflen) {
1241     return retval;
1242   }
1243 
1244   if (pnamelen == 0) {
1245     jio_snprintf(buffer, buflen, "%s.dll", fname);
1246     retval = true;
1247   } else if (c == ':' || c == '\\') {
1248     jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1249     retval = true;
1250   } else if (strchr(pname, *os::path_separator()) != NULL) {
1251     int n;
1252     char** pelements = split_path(pname, &n);
1253     if (pelements == NULL) {
1254       return false;
1255     }
1256     for (int i = 0; i < n; i++) {
1257       char* path = pelements[i];
1258       // Really shouldn't be NULL, but check can't hurt
1259       size_t plen = (path == NULL) ? 0 : strlen(path);
1260       if (plen == 0) {
1261         continue; // skip the empty path values
1262       }
1263       const char lastchar = path[plen - 1];
1264       if (lastchar == ':' || lastchar == '\\') {
1265         jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1266       } else {
1267         jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1268       }
1269       if (file_exists(buffer)) {
1270         retval = true;
1271         break;
1272       }
1273     }
1274     // release the storage
1275     for (int i = 0; i < n; i++) {
1276       if (pelements[i] != NULL) {
1277         FREE_C_HEAP_ARRAY(char, pelements[i]);
1278       }
1279     }
1280     if (pelements != NULL) {
1281       FREE_C_HEAP_ARRAY(char*, pelements);
1282     }
1283   } else {
1284     jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1285     retval = true;
1286   }
1287   return retval;
1288 }
1289 
1290 // Needs to be in os specific directory because windows requires another
1291 // header file <direct.h>
1292 const char* os::get_current_directory(char *buf, size_t buflen) {
1293   int n = static_cast<int>(buflen);
1294   if (buflen > INT_MAX)  n = INT_MAX;
1295   return _getcwd(buf, n);
1296 }
1297 
1298 //-----------------------------------------------------------
1299 // Helper functions for fatal error handler
1300 #ifdef _WIN64
1301 // Helper routine which returns true if address in
1302 // within the NTDLL address space.
1303 //
1304 static bool _addr_in_ntdll(address addr) {
1305   HMODULE hmod;
1306   MODULEINFO minfo;
1307 
1308   hmod = GetModuleHandle("NTDLL.DLL");
1309   if (hmod == NULL) return false;
1310   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1311                                           &minfo, sizeof(MODULEINFO))) {
1312     return false;
1313   }
1314 
1315   if ((addr >= minfo.lpBaseOfDll) &&
1316       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1317     return true;
1318   } else {
1319     return false;
1320   }
1321 }
1322 #endif
1323 
1324 struct _modinfo {
1325   address addr;
1326   char*   full_path;   // point to a char buffer
1327   int     buflen;      // size of the buffer
1328   address base_addr;
1329 };
1330 
1331 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1332                                   address top_address, void * param) {
1333   struct _modinfo *pmod = (struct _modinfo *)param;
1334   if (!pmod) return -1;
1335 
1336   if (base_addr   <= pmod->addr &&
1337       top_address > pmod->addr) {
1338     // if a buffer is provided, copy path name to the buffer
1339     if (pmod->full_path) {
1340       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1341     }
1342     pmod->base_addr = base_addr;
1343     return 1;
1344   }
1345   return 0;
1346 }
1347 
1348 bool os::dll_address_to_library_name(address addr, char* buf,
1349                                      int buflen, int* offset) {
1350   // buf is not optional, but offset is optional
1351   assert(buf != NULL, "sanity check");
1352 
1353 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1354 //       return the full path to the DLL file, sometimes it returns path
1355 //       to the corresponding PDB file (debug info); sometimes it only
1356 //       returns partial path, which makes life painful.
1357 
1358   struct _modinfo mi;
1359   mi.addr      = addr;
1360   mi.full_path = buf;
1361   mi.buflen    = buflen;
1362   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1363     // buf already contains path name
1364     if (offset) *offset = addr - mi.base_addr;
1365     return true;
1366   }
1367 
1368   buf[0] = '\0';
1369   if (offset) *offset = -1;
1370   return false;
1371 }
1372 
1373 bool os::dll_address_to_function_name(address addr, char *buf,
1374                                       int buflen, int *offset,
1375                                       bool demangle) {
1376   // buf is not optional, but offset is optional
1377   assert(buf != NULL, "sanity check");
1378 
1379   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1380     return true;
1381   }
1382   if (offset != NULL)  *offset  = -1;
1383   buf[0] = '\0';
1384   return false;
1385 }
1386 
1387 // save the start and end address of jvm.dll into param[0] and param[1]
1388 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1389                            address top_address, void * param) {
1390   if (!param) return -1;
1391 
1392   if (base_addr   <= (address)_locate_jvm_dll &&
1393       top_address > (address)_locate_jvm_dll) {
1394     ((address*)param)[0] = base_addr;
1395     ((address*)param)[1] = top_address;
1396     return 1;
1397   }
1398   return 0;
1399 }
1400 
1401 address vm_lib_location[2];    // start and end address of jvm.dll
1402 
1403 // check if addr is inside jvm.dll
1404 bool os::address_is_in_vm(address addr) {
1405   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1406     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1407       assert(false, "Can't find jvm module.");
1408       return false;
1409     }
1410   }
1411 
1412   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1413 }
1414 
1415 // print module info; param is outputStream*
1416 static int _print_module(const char* fname, address base_address,
1417                          address top_address, void* param) {
1418   if (!param) return -1;
1419 
1420   outputStream* st = (outputStream*)param;
1421 
1422   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1423   return 0;
1424 }
1425 
1426 // Loads .dll/.so and
1427 // in case of error it checks if .dll/.so was built for the
1428 // same architecture as Hotspot is running on
1429 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1430   void * result = LoadLibrary(name);
1431   if (result != NULL) {
1432     return result;
1433   }
1434 
1435   DWORD errcode = GetLastError();
1436   if (errcode == ERROR_MOD_NOT_FOUND) {
1437     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1438     ebuf[ebuflen - 1] = '\0';
1439     return NULL;
1440   }
1441 
1442   // Parsing dll below
1443   // If we can read dll-info and find that dll was built
1444   // for an architecture other than Hotspot is running in
1445   // - then print to buffer "DLL was built for a different architecture"
1446   // else call os::lasterror to obtain system error message
1447 
1448   // Read system error message into ebuf
1449   // It may or may not be overwritten below (in the for loop and just above)
1450   lasterror(ebuf, (size_t) ebuflen);
1451   ebuf[ebuflen - 1] = '\0';
1452   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1453   if (fd < 0) {
1454     return NULL;
1455   }
1456 
1457   uint32_t signature_offset;
1458   uint16_t lib_arch = 0;
1459   bool failed_to_get_lib_arch =
1460     ( // Go to position 3c in the dll
1461      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1462      ||
1463      // Read location of signature
1464      (sizeof(signature_offset) !=
1465      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1466      ||
1467      // Go to COFF File Header in dll
1468      // that is located after "signature" (4 bytes long)
1469      (os::seek_to_file_offset(fd,
1470      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1471      ||
1472      // Read field that contains code of architecture
1473      // that dll was built for
1474      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1475     );
1476 
1477   ::close(fd);
1478   if (failed_to_get_lib_arch) {
1479     // file i/o error - report os::lasterror(...) msg
1480     return NULL;
1481   }
1482 
1483   typedef struct {
1484     uint16_t arch_code;
1485     char* arch_name;
1486   } arch_t;
1487 
1488   static const arch_t arch_array[] = {
1489     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1490     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1491     {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
1492   };
1493 #if   (defined _M_IA64)
1494   static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64;
1495 #elif (defined _M_AMD64)
1496   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1497 #elif (defined _M_IX86)
1498   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1499 #else
1500   #error Method os::dll_load requires that one of following \
1501          is defined :_M_IA64,_M_AMD64 or _M_IX86
1502 #endif
1503 
1504 
1505   // Obtain a string for printf operation
1506   // lib_arch_str shall contain string what platform this .dll was built for
1507   // running_arch_str shall string contain what platform Hotspot was built for
1508   char *running_arch_str = NULL, *lib_arch_str = NULL;
1509   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1510     if (lib_arch == arch_array[i].arch_code) {
1511       lib_arch_str = arch_array[i].arch_name;
1512     }
1513     if (running_arch == arch_array[i].arch_code) {
1514       running_arch_str = arch_array[i].arch_name;
1515     }
1516   }
1517 
1518   assert(running_arch_str,
1519          "Didn't find running architecture code in arch_array");
1520 
1521   // If the architecture is right
1522   // but some other error took place - report os::lasterror(...) msg
1523   if (lib_arch == running_arch) {
1524     return NULL;
1525   }
1526 
1527   if (lib_arch_str != NULL) {
1528     ::_snprintf(ebuf, ebuflen - 1,
1529                 "Can't load %s-bit .dll on a %s-bit platform",
1530                 lib_arch_str, running_arch_str);
1531   } else {
1532     // don't know what architecture this dll was build for
1533     ::_snprintf(ebuf, ebuflen - 1,
1534                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1535                 lib_arch, running_arch_str);
1536   }
1537 
1538   return NULL;
1539 }
1540 
1541 void os::print_dll_info(outputStream *st) {
1542   st->print_cr("Dynamic libraries:");
1543   get_loaded_modules_info(_print_module, (void *)st);
1544 }
1545 
1546 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1547   HANDLE   hProcess;
1548 
1549 # define MAX_NUM_MODULES 128
1550   HMODULE     modules[MAX_NUM_MODULES];
1551   static char filename[MAX_PATH];
1552   int         result = 0;
1553 
1554   int pid = os::current_process_id();
1555   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1556                          FALSE, pid);
1557   if (hProcess == NULL) return 0;
1558 
1559   DWORD size_needed;
1560   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1561     CloseHandle(hProcess);
1562     return 0;
1563   }
1564 
1565   // number of modules that are currently loaded
1566   int num_modules = size_needed / sizeof(HMODULE);
1567 
1568   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1569     // Get Full pathname:
1570     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1571       filename[0] = '\0';
1572     }
1573 
1574     MODULEINFO modinfo;
1575     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1576       modinfo.lpBaseOfDll = NULL;
1577       modinfo.SizeOfImage = 0;
1578     }
1579 
1580     // Invoke callback function
1581     result = callback(filename, (address)modinfo.lpBaseOfDll,
1582                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1583     if (result) break;
1584   }
1585 
1586   CloseHandle(hProcess);
1587   return result;
1588 }
1589 
1590 bool os::get_host_name(char* buf, size_t buflen) {
1591   DWORD size = (DWORD)buflen;
1592   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1593 }
1594 
1595 void os::get_summary_os_info(char* buf, size_t buflen) {
1596   stringStream sst(buf, buflen);
1597   os::win32::print_windows_version(&sst);
1598   // chop off newline character
1599   char* nl = strchr(buf, '\n');
1600   if (nl != NULL) *nl = '\0';
1601 }
1602 
1603 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1604   int ret = vsnprintf(buf, len, fmt, args);
1605   // Get the correct buffer size if buf is too small
1606   if (ret < 0) {
1607     return _vscprintf(fmt, args);
1608   }
1609   return ret;
1610 }
1611 
1612 static inline time_t get_mtime(const char* filename) {
1613   struct stat st;
1614   int ret = os::stat(filename, &st);
1615   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
1616   return st.st_mtime;
1617 }
1618 
1619 int os::compare_file_modified_times(const char* file1, const char* file2) {
1620   time_t t1 = get_mtime(file1);
1621   time_t t2 = get_mtime(file2);
1622   return t1 - t2;
1623 }
1624 
1625 void os::print_os_info_brief(outputStream* st) {
1626   os::print_os_info(st);
1627 }
1628 
1629 void os::print_os_info(outputStream* st) {
1630 #ifdef ASSERT
1631   char buffer[1024];
1632   st->print("HostName: ");
1633   if (get_host_name(buffer, sizeof(buffer))) {
1634     st->print("%s ", buffer);
1635   } else {
1636     st->print("N/A ");
1637   }
1638 #endif
1639   st->print("OS:");
1640   os::win32::print_windows_version(st);
1641 }
1642 
1643 void os::win32::print_windows_version(outputStream* st) {
1644   OSVERSIONINFOEX osvi;
1645   VS_FIXEDFILEINFO *file_info;
1646   TCHAR kernel32_path[MAX_PATH];
1647   UINT len, ret;
1648 
1649   // Use the GetVersionEx information to see if we're on a server or
1650   // workstation edition of Windows. Starting with Windows 8.1 we can't
1651   // trust the OS version information returned by this API.
1652   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1653   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1654   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1655     st->print_cr("Call to GetVersionEx failed");
1656     return;
1657   }
1658   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1659 
1660   // Get the full path to \Windows\System32\kernel32.dll and use that for
1661   // determining what version of Windows we're running on.
1662   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1663   ret = GetSystemDirectory(kernel32_path, len);
1664   if (ret == 0 || ret > len) {
1665     st->print_cr("Call to GetSystemDirectory failed");
1666     return;
1667   }
1668   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1669 
1670   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1671   if (version_size == 0) {
1672     st->print_cr("Call to GetFileVersionInfoSize failed");
1673     return;
1674   }
1675 
1676   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1677   if (version_info == NULL) {
1678     st->print_cr("Failed to allocate version_info");
1679     return;
1680   }
1681 
1682   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1683     os::free(version_info);
1684     st->print_cr("Call to GetFileVersionInfo failed");
1685     return;
1686   }
1687 
1688   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1689     os::free(version_info);
1690     st->print_cr("Call to VerQueryValue failed");
1691     return;
1692   }
1693 
1694   int major_version = HIWORD(file_info->dwProductVersionMS);
1695   int minor_version = LOWORD(file_info->dwProductVersionMS);
1696   int build_number = HIWORD(file_info->dwProductVersionLS);
1697   int build_minor = LOWORD(file_info->dwProductVersionLS);
1698   int os_vers = major_version * 1000 + minor_version;
1699   os::free(version_info);
1700 
1701   st->print(" Windows ");
1702   switch (os_vers) {
1703 
1704   case 6000:
1705     if (is_workstation) {
1706       st->print("Vista");
1707     } else {
1708       st->print("Server 2008");
1709     }
1710     break;
1711 
1712   case 6001:
1713     if (is_workstation) {
1714       st->print("7");
1715     } else {
1716       st->print("Server 2008 R2");
1717     }
1718     break;
1719 
1720   case 6002:
1721     if (is_workstation) {
1722       st->print("8");
1723     } else {
1724       st->print("Server 2012");
1725     }
1726     break;
1727 
1728   case 6003:
1729     if (is_workstation) {
1730       st->print("8.1");
1731     } else {
1732       st->print("Server 2012 R2");
1733     }
1734     break;
1735 
1736   case 10000:
1737     if (is_workstation) {
1738       st->print("10");
1739     } else {
1740       st->print("Server 2016");
1741     }
1742     break;
1743 
1744   default:
1745     // Unrecognized windows, print out its major and minor versions
1746     st->print("%d.%d", major_version, minor_version);
1747     break;
1748   }
1749 
1750   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1751   // find out whether we are running on 64 bit processor or not
1752   SYSTEM_INFO si;
1753   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1754   GetNativeSystemInfo(&si);
1755   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1756     st->print(" , 64 bit");
1757   }
1758 
1759   st->print(" Build %d", build_number);
1760   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1761   st->cr();
1762 }
1763 
1764 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1765   // Nothing to do for now.
1766 }
1767 
1768 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1769   HKEY key;
1770   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1771                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1772   if (status == ERROR_SUCCESS) {
1773     DWORD size = (DWORD)buflen;
1774     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1775     if (status != ERROR_SUCCESS) {
1776         strncpy(buf, "## __CPU__", buflen);
1777     }
1778     RegCloseKey(key);
1779   } else {
1780     // Put generic cpu info to return
1781     strncpy(buf, "## __CPU__", buflen);
1782   }
1783 }
1784 
1785 void os::print_memory_info(outputStream* st) {
1786   st->print("Memory:");
1787   st->print(" %dk page", os::vm_page_size()>>10);
1788 
1789   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1790   // value if total memory is larger than 4GB
1791   MEMORYSTATUSEX ms;
1792   ms.dwLength = sizeof(ms);
1793   GlobalMemoryStatusEx(&ms);
1794 
1795   st->print(", physical %uk", os::physical_memory() >> 10);
1796   st->print("(%uk free)", os::available_memory() >> 10);
1797 
1798   st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1799   st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1800   st->cr();
1801 }
1802 
1803 void os::print_siginfo(outputStream *st, const void* siginfo) {
1804   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1805   st->print("siginfo:");
1806 
1807   char tmp[64];
1808   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1809     strcpy(tmp, "EXCEPTION_??");
1810   }
1811   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1812 
1813   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1814        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1815        er->NumberParameters >= 2) {
1816     switch (er->ExceptionInformation[0]) {
1817     case 0: st->print(", reading address"); break;
1818     case 1: st->print(", writing address"); break;
1819     case 8: st->print(", data execution prevention violation at address"); break;
1820     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1821                        er->ExceptionInformation[0]);
1822     }
1823     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1824   } else {
1825     int num = er->NumberParameters;
1826     if (num > 0) {
1827       st->print(", ExceptionInformation=");
1828       for (int i = 0; i < num; i++) {
1829         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1830       }
1831     }
1832   }
1833   st->cr();
1834 }
1835 
1836 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1837   // do nothing
1838 }
1839 
1840 static char saved_jvm_path[MAX_PATH] = {0};
1841 
1842 // Find the full path to the current module, jvm.dll
1843 void os::jvm_path(char *buf, jint buflen) {
1844   // Error checking.
1845   if (buflen < MAX_PATH) {
1846     assert(false, "must use a large-enough buffer");
1847     buf[0] = '\0';
1848     return;
1849   }
1850   // Lazy resolve the path to current module.
1851   if (saved_jvm_path[0] != 0) {
1852     strcpy(buf, saved_jvm_path);
1853     return;
1854   }
1855 
1856   buf[0] = '\0';
1857   if (Arguments::sun_java_launcher_is_altjvm()) {
1858     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1859     // for a JAVA_HOME environment variable and fix up the path so it
1860     // looks like jvm.dll is installed there (append a fake suffix
1861     // hotspot/jvm.dll).
1862     char* java_home_var = ::getenv("JAVA_HOME");
1863     if (java_home_var != NULL && java_home_var[0] != 0 &&
1864         strlen(java_home_var) < (size_t)buflen) {
1865       strncpy(buf, java_home_var, buflen);
1866 
1867       // determine if this is a legacy image or modules image
1868       // modules image doesn't have "jre" subdirectory
1869       size_t len = strlen(buf);
1870       char* jrebin_p = buf + len;
1871       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1872       if (0 != _access(buf, 0)) {
1873         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1874       }
1875       len = strlen(buf);
1876       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1877     }
1878   }
1879 
1880   if (buf[0] == '\0') {
1881     GetModuleFileName(vm_lib_handle, buf, buflen);
1882   }
1883   strncpy(saved_jvm_path, buf, MAX_PATH);
1884   saved_jvm_path[MAX_PATH - 1] = '\0';
1885 }
1886 
1887 
1888 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1889 #ifndef _WIN64
1890   st->print("_");
1891 #endif
1892 }
1893 
1894 
1895 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1896 #ifndef _WIN64
1897   st->print("@%d", args_size  * sizeof(int));
1898 #endif
1899 }
1900 
1901 // This method is a copy of JDK's sysGetLastErrorString
1902 // from src/windows/hpi/src/system_md.c
1903 
1904 size_t os::lasterror(char* buf, size_t len) {
1905   DWORD errval;
1906 
1907   if ((errval = GetLastError()) != 0) {
1908     // DOS error
1909     size_t n = (size_t)FormatMessage(
1910                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1911                                      NULL,
1912                                      errval,
1913                                      0,
1914                                      buf,
1915                                      (DWORD)len,
1916                                      NULL);
1917     if (n > 3) {
1918       // Drop final '.', CR, LF
1919       if (buf[n - 1] == '\n') n--;
1920       if (buf[n - 1] == '\r') n--;
1921       if (buf[n - 1] == '.') n--;
1922       buf[n] = '\0';
1923     }
1924     return n;
1925   }
1926 
1927   if (errno != 0) {
1928     // C runtime error that has no corresponding DOS error code
1929     const char* s = os::strerror(errno);
1930     size_t n = strlen(s);
1931     if (n >= len) n = len - 1;
1932     strncpy(buf, s, n);
1933     buf[n] = '\0';
1934     return n;
1935   }
1936 
1937   return 0;
1938 }
1939 
1940 int os::get_last_error() {
1941   DWORD error = GetLastError();
1942   if (error == 0) {
1943     error = errno;
1944   }
1945   return (int)error;
1946 }
1947 
1948 WindowsSemaphore::WindowsSemaphore(uint value) {
1949   _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
1950 
1951   guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError());
1952 }
1953 
1954 WindowsSemaphore::~WindowsSemaphore() {
1955   ::CloseHandle(_semaphore);
1956 }
1957 
1958 void WindowsSemaphore::signal(uint count) {
1959   if (count > 0) {
1960     BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
1961 
1962     assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError());
1963   }
1964 }
1965 
1966 void WindowsSemaphore::wait() {
1967   DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
1968   assert(ret != WAIT_FAILED,   "WaitForSingleObject failed with error code: %lu", GetLastError());
1969   assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret);
1970 }
1971 
1972 // sun.misc.Signal
1973 // NOTE that this is a workaround for an apparent kernel bug where if
1974 // a signal handler for SIGBREAK is installed then that signal handler
1975 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1976 // See bug 4416763.
1977 static void (*sigbreakHandler)(int) = NULL;
1978 
1979 static void UserHandler(int sig, void *siginfo, void *context) {
1980   os::signal_notify(sig);
1981   // We need to reinstate the signal handler each time...
1982   os::signal(sig, (void*)UserHandler);
1983 }
1984 
1985 void* os::user_handler() {
1986   return (void*) UserHandler;
1987 }
1988 
1989 void* os::signal(int signal_number, void* handler) {
1990   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1991     void (*oldHandler)(int) = sigbreakHandler;
1992     sigbreakHandler = (void (*)(int)) handler;
1993     return (void*) oldHandler;
1994   } else {
1995     return (void*)::signal(signal_number, (void (*)(int))handler);
1996   }
1997 }
1998 
1999 void os::signal_raise(int signal_number) {
2000   raise(signal_number);
2001 }
2002 
2003 // The Win32 C runtime library maps all console control events other than ^C
2004 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2005 // logoff, and shutdown events.  We therefore install our own console handler
2006 // that raises SIGTERM for the latter cases.
2007 //
2008 static BOOL WINAPI consoleHandler(DWORD event) {
2009   switch (event) {
2010   case CTRL_C_EVENT:
2011     if (is_error_reported()) {
2012       // Ctrl-C is pressed during error reporting, likely because the error
2013       // handler fails to abort. Let VM die immediately.
2014       os::die();
2015     }
2016 
2017     os::signal_raise(SIGINT);
2018     return TRUE;
2019     break;
2020   case CTRL_BREAK_EVENT:
2021     if (sigbreakHandler != NULL) {
2022       (*sigbreakHandler)(SIGBREAK);
2023     }
2024     return TRUE;
2025     break;
2026   case CTRL_LOGOFF_EVENT: {
2027     // Don't terminate JVM if it is running in a non-interactive session,
2028     // such as a service process.
2029     USEROBJECTFLAGS flags;
2030     HANDLE handle = GetProcessWindowStation();
2031     if (handle != NULL &&
2032         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2033         sizeof(USEROBJECTFLAGS), NULL)) {
2034       // If it is a non-interactive session, let next handler to deal
2035       // with it.
2036       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2037         return FALSE;
2038       }
2039     }
2040   }
2041   case CTRL_CLOSE_EVENT:
2042   case CTRL_SHUTDOWN_EVENT:
2043     os::signal_raise(SIGTERM);
2044     return TRUE;
2045     break;
2046   default:
2047     break;
2048   }
2049   return FALSE;
2050 }
2051 
2052 // The following code is moved from os.cpp for making this
2053 // code platform specific, which it is by its very nature.
2054 
2055 // Return maximum OS signal used + 1 for internal use only
2056 // Used as exit signal for signal_thread
2057 int os::sigexitnum_pd() {
2058   return NSIG;
2059 }
2060 
2061 // a counter for each possible signal value, including signal_thread exit signal
2062 static volatile jint pending_signals[NSIG+1] = { 0 };
2063 static HANDLE sig_sem = NULL;
2064 
2065 void os::signal_init_pd() {
2066   // Initialize signal structures
2067   memset((void*)pending_signals, 0, sizeof(pending_signals));
2068 
2069   sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2070 
2071   // Programs embedding the VM do not want it to attempt to receive
2072   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2073   // shutdown hooks mechanism introduced in 1.3.  For example, when
2074   // the VM is run as part of a Windows NT service (i.e., a servlet
2075   // engine in a web server), the correct behavior is for any console
2076   // control handler to return FALSE, not TRUE, because the OS's
2077   // "final" handler for such events allows the process to continue if
2078   // it is a service (while terminating it if it is not a service).
2079   // To make this behavior uniform and the mechanism simpler, we
2080   // completely disable the VM's usage of these console events if -Xrs
2081   // (=ReduceSignalUsage) is specified.  This means, for example, that
2082   // the CTRL-BREAK thread dump mechanism is also disabled in this
2083   // case.  See bugs 4323062, 4345157, and related bugs.
2084 
2085   if (!ReduceSignalUsage) {
2086     // Add a CTRL-C handler
2087     SetConsoleCtrlHandler(consoleHandler, TRUE);
2088   }
2089 }
2090 
2091 void os::signal_notify(int signal_number) {
2092   BOOL ret;
2093   if (sig_sem != NULL) {
2094     Atomic::inc(&pending_signals[signal_number]);
2095     ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2096     assert(ret != 0, "ReleaseSemaphore() failed");
2097   }
2098 }
2099 
2100 static int check_pending_signals(bool wait_for_signal) {
2101   DWORD ret;
2102   while (true) {
2103     for (int i = 0; i < NSIG + 1; i++) {
2104       jint n = pending_signals[i];
2105       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2106         return i;
2107       }
2108     }
2109     if (!wait_for_signal) {
2110       return -1;
2111     }
2112 
2113     JavaThread *thread = JavaThread::current();
2114 
2115     ThreadBlockInVM tbivm(thread);
2116 
2117     bool threadIsSuspended;
2118     do {
2119       thread->set_suspend_equivalent();
2120       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2121       ret = ::WaitForSingleObject(sig_sem, INFINITE);
2122       assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2123 
2124       // were we externally suspended while we were waiting?
2125       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2126       if (threadIsSuspended) {
2127         // The semaphore has been incremented, but while we were waiting
2128         // another thread suspended us. We don't want to continue running
2129         // while suspended because that would surprise the thread that
2130         // suspended us.
2131         ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2132         assert(ret != 0, "ReleaseSemaphore() failed");
2133 
2134         thread->java_suspend_self();
2135       }
2136     } while (threadIsSuspended);
2137   }
2138 }
2139 
2140 int os::signal_lookup() {
2141   return check_pending_signals(false);
2142 }
2143 
2144 int os::signal_wait() {
2145   return check_pending_signals(true);
2146 }
2147 
2148 // Implicit OS exception handling
2149 
2150 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2151                       address handler) {
2152     JavaThread* thread = (JavaThread*) Thread::current_or_null();
2153   // Save pc in thread
2154 #ifdef _M_IA64
2155   // Do not blow up if no thread info available.
2156   if (thread) {
2157     // Saving PRECISE pc (with slot information) in thread.
2158     uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2159     // Convert precise PC into "Unix" format
2160     precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2161     thread->set_saved_exception_pc((address)precise_pc);
2162   }
2163   // Set pc to handler
2164   exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2165   // Clear out psr.ri (= Restart Instruction) in order to continue
2166   // at the beginning of the target bundle.
2167   exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2168   assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2169 #else
2170   #ifdef _M_AMD64
2171   // Do not blow up if no thread info available.
2172   if (thread) {
2173     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2174   }
2175   // Set pc to handler
2176   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2177   #else
2178   // Do not blow up if no thread info available.
2179   if (thread) {
2180     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2181   }
2182   // Set pc to handler
2183   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2184   #endif
2185 #endif
2186 
2187   // Continue the execution
2188   return EXCEPTION_CONTINUE_EXECUTION;
2189 }
2190 
2191 
2192 // Used for PostMortemDump
2193 extern "C" void safepoints();
2194 extern "C" void find(int x);
2195 extern "C" void events();
2196 
2197 // According to Windows API documentation, an illegal instruction sequence should generate
2198 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2199 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2200 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2201 
2202 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2203 
2204 // From "Execution Protection in the Windows Operating System" draft 0.35
2205 // Once a system header becomes available, the "real" define should be
2206 // included or copied here.
2207 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2208 
2209 // Handle NAT Bit consumption on IA64.
2210 #ifdef _M_IA64
2211   #define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
2212 #endif
2213 
2214 // Windows Vista/2008 heap corruption check
2215 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2216 
2217 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2218 // C++ compiler contain this error code. Because this is a compiler-generated
2219 // error, the code is not listed in the Win32 API header files.
2220 // The code is actually a cryptic mnemonic device, with the initial "E"
2221 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2222 // ASCII values of "msc".
2223 
2224 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2225 
2226 #define def_excpt(val) { #val, (val) }
2227 
2228 static const struct { char* name; uint number; } exceptlabels[] = {
2229     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2230     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2231     def_excpt(EXCEPTION_BREAKPOINT),
2232     def_excpt(EXCEPTION_SINGLE_STEP),
2233     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2234     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2235     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2236     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2237     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2238     def_excpt(EXCEPTION_FLT_OVERFLOW),
2239     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2240     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2241     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2242     def_excpt(EXCEPTION_INT_OVERFLOW),
2243     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2244     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2245     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2246     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2247     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2248     def_excpt(EXCEPTION_STACK_OVERFLOW),
2249     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2250     def_excpt(EXCEPTION_GUARD_PAGE),
2251     def_excpt(EXCEPTION_INVALID_HANDLE),
2252     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2253     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2254 #ifdef _M_IA64
2255     , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION)
2256 #endif
2257 };
2258 
2259 #undef def_excpt
2260 
2261 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2262   uint code = static_cast<uint>(exception_code);
2263   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2264     if (exceptlabels[i].number == code) {
2265       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2266       return buf;
2267     }
2268   }
2269 
2270   return NULL;
2271 }
2272 
2273 //-----------------------------------------------------------------------------
2274 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2275   // handle exception caused by idiv; should only happen for -MinInt/-1
2276   // (division by zero is handled explicitly)
2277 #ifdef _M_IA64
2278   assert(0, "Fix Handle_IDiv_Exception");
2279 #else
2280   #ifdef  _M_AMD64
2281   PCONTEXT ctx = exceptionInfo->ContextRecord;
2282   address pc = (address)ctx->Rip;
2283   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2284   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2285   if (pc[0] == 0xF7) {
2286     // set correct result values and continue after idiv instruction
2287     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2288   } else {
2289     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2290   }
2291   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2292   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2293   // idiv opcode (0xF7).
2294   ctx->Rdx = (DWORD)0;             // remainder
2295   // Continue the execution
2296   #else
2297   PCONTEXT ctx = exceptionInfo->ContextRecord;
2298   address pc = (address)ctx->Eip;
2299   assert(pc[0] == 0xF7, "not an idiv opcode");
2300   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2301   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2302   // set correct result values and continue after idiv instruction
2303   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2304   ctx->Eax = (DWORD)min_jint;      // result
2305   ctx->Edx = (DWORD)0;             // remainder
2306   // Continue the execution
2307   #endif
2308 #endif
2309   return EXCEPTION_CONTINUE_EXECUTION;
2310 }
2311 
2312 //-----------------------------------------------------------------------------
2313 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2314   PCONTEXT ctx = exceptionInfo->ContextRecord;
2315 #ifndef  _WIN64
2316   // handle exception caused by native method modifying control word
2317   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2318 
2319   switch (exception_code) {
2320   case EXCEPTION_FLT_DENORMAL_OPERAND:
2321   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2322   case EXCEPTION_FLT_INEXACT_RESULT:
2323   case EXCEPTION_FLT_INVALID_OPERATION:
2324   case EXCEPTION_FLT_OVERFLOW:
2325   case EXCEPTION_FLT_STACK_CHECK:
2326   case EXCEPTION_FLT_UNDERFLOW:
2327     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2328     if (fp_control_word != ctx->FloatSave.ControlWord) {
2329       // Restore FPCW and mask out FLT exceptions
2330       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2331       // Mask out pending FLT exceptions
2332       ctx->FloatSave.StatusWord &=  0xffffff00;
2333       return EXCEPTION_CONTINUE_EXECUTION;
2334     }
2335   }
2336 
2337   if (prev_uef_handler != NULL) {
2338     // We didn't handle this exception so pass it to the previous
2339     // UnhandledExceptionFilter.
2340     return (prev_uef_handler)(exceptionInfo);
2341   }
2342 #else // !_WIN64
2343   // On Windows, the mxcsr control bits are non-volatile across calls
2344   // See also CR 6192333
2345   //
2346   jint MxCsr = INITIAL_MXCSR;
2347   // we can't use StubRoutines::addr_mxcsr_std()
2348   // because in Win64 mxcsr is not saved there
2349   if (MxCsr != ctx->MxCsr) {
2350     ctx->MxCsr = MxCsr;
2351     return EXCEPTION_CONTINUE_EXECUTION;
2352   }
2353 #endif // !_WIN64
2354 
2355   return EXCEPTION_CONTINUE_SEARCH;
2356 }
2357 
2358 static inline void report_error(Thread* t, DWORD exception_code,
2359                                 address addr, void* siginfo, void* context) {
2360   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2361 
2362   // If UseOsErrorReporting, this will return here and save the error file
2363   // somewhere where we can find it in the minidump.
2364 }
2365 
2366 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2367         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2368   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2369   address addr = (address) exceptionRecord->ExceptionInformation[1];
2370   if (Interpreter::contains(pc)) {
2371     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2372     if (!fr->is_first_java_frame()) {
2373       assert(fr->safe_for_sender(thread), "Safety check");
2374       *fr = fr->java_sender();
2375     }
2376   } else {
2377     // more complex code with compiled code
2378     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2379     CodeBlob* cb = CodeCache::find_blob(pc);
2380     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2381       // Not sure where the pc points to, fallback to default
2382       // stack overflow handling
2383       return false;
2384     } else {
2385       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2386       // in compiled code, the stack banging is performed just after the return pc
2387       // has been pushed on the stack
2388       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2389       if (!fr->is_java_frame()) {
2390         assert(fr->safe_for_sender(thread), "Safety check");
2391         *fr = fr->java_sender();
2392       }
2393     }
2394   }
2395   assert(fr->is_java_frame(), "Safety check");
2396   return true;
2397 }
2398 
2399 //-----------------------------------------------------------------------------
2400 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2401   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2402   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2403 #ifdef _M_IA64
2404   // On Itanium, we need the "precise pc", which has the slot number coded
2405   // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2406   address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2407   // Convert the pc to "Unix format", which has the slot number coded
2408   // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2409   // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2410   // information is saved in the Unix format.
2411   address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2412 #else
2413   #ifdef _M_AMD64
2414   address pc = (address) exceptionInfo->ContextRecord->Rip;
2415   #else
2416   address pc = (address) exceptionInfo->ContextRecord->Eip;
2417   #endif
2418 #endif
2419   Thread* t = Thread::current_or_null_safe();
2420 
2421   // Handle SafeFetch32 and SafeFetchN exceptions.
2422   if (StubRoutines::is_safefetch_fault(pc)) {
2423     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2424   }
2425 
2426 #ifndef _WIN64
2427   // Execution protection violation - win32 running on AMD64 only
2428   // Handled first to avoid misdiagnosis as a "normal" access violation;
2429   // This is safe to do because we have a new/unique ExceptionInformation
2430   // code for this condition.
2431   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2432     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2433     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2434     address addr = (address) exceptionRecord->ExceptionInformation[1];
2435 
2436     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2437       int page_size = os::vm_page_size();
2438 
2439       // Make sure the pc and the faulting address are sane.
2440       //
2441       // If an instruction spans a page boundary, and the page containing
2442       // the beginning of the instruction is executable but the following
2443       // page is not, the pc and the faulting address might be slightly
2444       // different - we still want to unguard the 2nd page in this case.
2445       //
2446       // 15 bytes seems to be a (very) safe value for max instruction size.
2447       bool pc_is_near_addr =
2448         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2449       bool instr_spans_page_boundary =
2450         (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2451                          (intptr_t) page_size) > 0);
2452 
2453       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2454         static volatile address last_addr =
2455           (address) os::non_memory_address_word();
2456 
2457         // In conservative mode, don't unguard unless the address is in the VM
2458         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2459             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2460 
2461           // Set memory to RWX and retry
2462           address page_start =
2463             (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2464           bool res = os::protect_memory((char*) page_start, page_size,
2465                                         os::MEM_PROT_RWX);
2466 
2467           log_debug(os)("Execution protection violation "
2468                         "at " INTPTR_FORMAT
2469                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2470                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2471 
2472           // Set last_addr so if we fault again at the same address, we don't
2473           // end up in an endless loop.
2474           //
2475           // There are two potential complications here.  Two threads trapping
2476           // at the same address at the same time could cause one of the
2477           // threads to think it already unguarded, and abort the VM.  Likely
2478           // very rare.
2479           //
2480           // The other race involves two threads alternately trapping at
2481           // different addresses and failing to unguard the page, resulting in
2482           // an endless loop.  This condition is probably even more unlikely
2483           // than the first.
2484           //
2485           // Although both cases could be avoided by using locks or thread
2486           // local last_addr, these solutions are unnecessary complication:
2487           // this handler is a best-effort safety net, not a complete solution.
2488           // It is disabled by default and should only be used as a workaround
2489           // in case we missed any no-execute-unsafe VM code.
2490 
2491           last_addr = addr;
2492 
2493           return EXCEPTION_CONTINUE_EXECUTION;
2494         }
2495       }
2496 
2497       // Last unguard failed or not unguarding
2498       tty->print_raw_cr("Execution protection violation");
2499       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2500                    exceptionInfo->ContextRecord);
2501       return EXCEPTION_CONTINUE_SEARCH;
2502     }
2503   }
2504 #endif // _WIN64
2505 
2506   // Check to see if we caught the safepoint code in the
2507   // process of write protecting the memory serialization page.
2508   // It write enables the page immediately after protecting it
2509   // so just return.
2510   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2511     JavaThread* thread = (JavaThread*) t;
2512     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2513     address addr = (address) exceptionRecord->ExceptionInformation[1];
2514     if (os::is_memory_serialize_page(thread, addr)) {
2515       // Block current thread until the memory serialize page permission restored.
2516       os::block_on_serialize_page_trap();
2517       return EXCEPTION_CONTINUE_EXECUTION;
2518     }
2519   }
2520 
2521   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2522       VM_Version::is_cpuinfo_segv_addr(pc)) {
2523     // Verify that OS save/restore AVX registers.
2524     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2525   }
2526 
2527   if (t != NULL && t->is_Java_thread()) {
2528     JavaThread* thread = (JavaThread*) t;
2529     bool in_java = thread->thread_state() == _thread_in_Java;
2530 
2531     // Handle potential stack overflows up front.
2532     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2533 #ifdef _M_IA64
2534       // Use guard page for register stack.
2535       PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2536       address addr = (address) exceptionRecord->ExceptionInformation[1];
2537       // Check for a register stack overflow on Itanium
2538       if (thread->addr_inside_register_stack_red_zone(addr)) {
2539         // Fatal red zone violation happens if the Java program
2540         // catches a StackOverflow error and does so much processing
2541         // that it runs beyond the unprotected yellow guard zone. As
2542         // a result, we are out of here.
2543         fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2544       } else if(thread->addr_inside_register_stack(addr)) {
2545         // Disable the yellow zone which sets the state that
2546         // we've got a stack overflow problem.
2547         if (thread->stack_yellow_reserved_zone_enabled()) {
2548           thread->disable_stack_yellow_reserved_zone();
2549         }
2550         // Give us some room to process the exception.
2551         thread->disable_register_stack_guard();
2552         // Tracing with +Verbose.
2553         if (Verbose) {
2554           tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2555           tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2556           tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2557           tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2558                         thread->register_stack_base(),
2559                         thread->register_stack_base() + thread->stack_size());
2560         }
2561 
2562         // Reguard the permanent register stack red zone just to be sure.
2563         // We saw Windows silently disabling this without telling us.
2564         thread->enable_register_stack_red_zone();
2565 
2566         return Handle_Exception(exceptionInfo,
2567                                 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2568       }
2569 #endif
2570       if (thread->stack_guards_enabled()) {
2571         if (_thread_in_Java) {
2572           frame fr;
2573           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2574           address addr = (address) exceptionRecord->ExceptionInformation[1];
2575           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2576             assert(fr.is_java_frame(), "Must be a Java frame");
2577             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2578           }
2579         }
2580         // Yellow zone violation.  The o/s has unprotected the first yellow
2581         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2582         // update the enabled status, even if the zone contains only one page.
2583         thread->disable_stack_yellow_reserved_zone();
2584         // If not in java code, return and hope for the best.
2585         return in_java
2586             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2587             :  EXCEPTION_CONTINUE_EXECUTION;
2588       } else {
2589         // Fatal red zone violation.
2590         thread->disable_stack_red_zone();
2591         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2592         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2593                       exceptionInfo->ContextRecord);
2594         return EXCEPTION_CONTINUE_SEARCH;
2595       }
2596     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2597       // Either stack overflow or null pointer exception.
2598       if (in_java) {
2599         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2600         address addr = (address) exceptionRecord->ExceptionInformation[1];
2601         address stack_end = thread->stack_end();
2602         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2603           // Stack overflow.
2604           assert(!os::uses_stack_guard_pages(),
2605                  "should be caught by red zone code above.");
2606           return Handle_Exception(exceptionInfo,
2607                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2608         }
2609         // Check for safepoint polling and implicit null
2610         // We only expect null pointers in the stubs (vtable)
2611         // the rest are checked explicitly now.
2612         CodeBlob* cb = CodeCache::find_blob(pc);
2613         if (cb != NULL) {
2614           if (os::is_poll_address(addr)) {
2615             address stub = SharedRuntime::get_poll_stub(pc);
2616             return Handle_Exception(exceptionInfo, stub);
2617           }
2618         }
2619         {
2620 #ifdef _WIN64
2621           // If it's a legal stack address map the entire region in
2622           //
2623           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2624           address addr = (address) exceptionRecord->ExceptionInformation[1];
2625           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2626             addr = (address)((uintptr_t)addr &
2627                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2628             os::commit_memory((char *)addr, thread->stack_base() - addr,
2629                               !ExecMem);
2630             return EXCEPTION_CONTINUE_EXECUTION;
2631           } else
2632 #endif
2633           {
2634             // Null pointer exception.
2635 #ifdef _M_IA64
2636             // Process implicit null checks in compiled code. Note: Implicit null checks
2637             // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2638             if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2639               CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2640               // Handle implicit null check in UEP method entry
2641               if (cb && (cb->is_frame_complete_at(pc) ||
2642                          (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2643                 if (Verbose) {
2644                   intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2645                   tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2646                   tty->print_cr("      to addr " INTPTR_FORMAT, addr);
2647                   tty->print_cr("      bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2648                                 *(bundle_start + 1), *bundle_start);
2649                 }
2650                 return Handle_Exception(exceptionInfo,
2651                                         SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2652               }
2653             }
2654 
2655             // Implicit null checks were processed above.  Hence, we should not reach
2656             // here in the usual case => die!
2657             if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2658             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2659                          exceptionInfo->ContextRecord);
2660             return EXCEPTION_CONTINUE_SEARCH;
2661 
2662 #else // !IA64
2663 
2664             if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
2665               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2666               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2667             }
2668             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2669                          exceptionInfo->ContextRecord);
2670             return EXCEPTION_CONTINUE_SEARCH;
2671 #endif
2672           }
2673         }
2674       }
2675 
2676 #ifdef _WIN64
2677       // Special care for fast JNI field accessors.
2678       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2679       // in and the heap gets shrunk before the field access.
2680       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2681         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2682         if (addr != (address)-1) {
2683           return Handle_Exception(exceptionInfo, addr);
2684         }
2685       }
2686 #endif
2687 
2688       // Stack overflow or null pointer exception in native code.
2689       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2690                    exceptionInfo->ContextRecord);
2691       return EXCEPTION_CONTINUE_SEARCH;
2692     } // /EXCEPTION_ACCESS_VIOLATION
2693     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2694 #if defined _M_IA64
2695     else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2696               exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2697       M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2698 
2699       // Compiled method patched to be non entrant? Following conditions must apply:
2700       // 1. must be first instruction in bundle
2701       // 2. must be a break instruction with appropriate code
2702       if ((((uint64_t) pc & 0x0F) == 0) &&
2703           (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2704         return Handle_Exception(exceptionInfo,
2705                                 (address)SharedRuntime::get_handle_wrong_method_stub());
2706       }
2707     } // /EXCEPTION_ILLEGAL_INSTRUCTION
2708 #endif
2709 
2710 
2711     if (in_java) {
2712       switch (exception_code) {
2713       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2714         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2715 
2716       case EXCEPTION_INT_OVERFLOW:
2717         return Handle_IDiv_Exception(exceptionInfo);
2718 
2719       } // switch
2720     }
2721     if (((thread->thread_state() == _thread_in_Java) ||
2722          (thread->thread_state() == _thread_in_native)) &&
2723          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2724       LONG result=Handle_FLT_Exception(exceptionInfo);
2725       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2726     }
2727   }
2728 
2729   if (exception_code != EXCEPTION_BREAKPOINT) {
2730     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2731                  exceptionInfo->ContextRecord);
2732   }
2733   return EXCEPTION_CONTINUE_SEARCH;
2734 }
2735 
2736 #ifndef _WIN64
2737 // Special care for fast JNI accessors.
2738 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2739 // the heap gets shrunk before the field access.
2740 // Need to install our own structured exception handler since native code may
2741 // install its own.
2742 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2743   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2744   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2745     address pc = (address) exceptionInfo->ContextRecord->Eip;
2746     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2747     if (addr != (address)-1) {
2748       return Handle_Exception(exceptionInfo, addr);
2749     }
2750   }
2751   return EXCEPTION_CONTINUE_SEARCH;
2752 }
2753 
2754 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2755   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2756                                                      jobject obj,           \
2757                                                      jfieldID fieldID) {    \
2758     __try {                                                                 \
2759       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2760                                                                  obj,       \
2761                                                                  fieldID);  \
2762     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2763                                               _exception_info())) {         \
2764     }                                                                       \
2765     return 0;                                                               \
2766   }
2767 
2768 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2769 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2770 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2771 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2772 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2773 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2774 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2775 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2776 
2777 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2778   switch (type) {
2779   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2780   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2781   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2782   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2783   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2784   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2785   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2786   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2787   default:        ShouldNotReachHere();
2788   }
2789   return (address)-1;
2790 }
2791 #endif
2792 
2793 // Virtual Memory
2794 
2795 int os::vm_page_size() { return os::win32::vm_page_size(); }
2796 int os::vm_allocation_granularity() {
2797   return os::win32::vm_allocation_granularity();
2798 }
2799 
2800 // Windows large page support is available on Windows 2003. In order to use
2801 // large page memory, the administrator must first assign additional privilege
2802 // to the user:
2803 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2804 //   + select Local Policies -> User Rights Assignment
2805 //   + double click "Lock pages in memory", add users and/or groups
2806 //   + reboot
2807 // Note the above steps are needed for administrator as well, as administrators
2808 // by default do not have the privilege to lock pages in memory.
2809 //
2810 // Note about Windows 2003: although the API supports committing large page
2811 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2812 // scenario, I found through experiment it only uses large page if the entire
2813 // memory region is reserved and committed in a single VirtualAlloc() call.
2814 // This makes Windows large page support more or less like Solaris ISM, in
2815 // that the entire heap must be committed upfront. This probably will change
2816 // in the future, if so the code below needs to be revisited.
2817 
2818 #ifndef MEM_LARGE_PAGES
2819   #define MEM_LARGE_PAGES 0x20000000
2820 #endif
2821 
2822 static HANDLE    _hProcess;
2823 static HANDLE    _hToken;
2824 
2825 // Container for NUMA node list info
2826 class NUMANodeListHolder {
2827  private:
2828   int *_numa_used_node_list;  // allocated below
2829   int _numa_used_node_count;
2830 
2831   void free_node_list() {
2832     if (_numa_used_node_list != NULL) {
2833       FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2834     }
2835   }
2836 
2837  public:
2838   NUMANodeListHolder() {
2839     _numa_used_node_count = 0;
2840     _numa_used_node_list = NULL;
2841     // do rest of initialization in build routine (after function pointers are set up)
2842   }
2843 
2844   ~NUMANodeListHolder() {
2845     free_node_list();
2846   }
2847 
2848   bool build() {
2849     DWORD_PTR proc_aff_mask;
2850     DWORD_PTR sys_aff_mask;
2851     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2852     ULONG highest_node_number;
2853     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2854     free_node_list();
2855     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2856     for (unsigned int i = 0; i <= highest_node_number; i++) {
2857       ULONGLONG proc_mask_numa_node;
2858       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2859       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2860         _numa_used_node_list[_numa_used_node_count++] = i;
2861       }
2862     }
2863     return (_numa_used_node_count > 1);
2864   }
2865 
2866   int get_count() { return _numa_used_node_count; }
2867   int get_node_list_entry(int n) {
2868     // for indexes out of range, returns -1
2869     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2870   }
2871 
2872 } numa_node_list_holder;
2873 
2874 
2875 
2876 static size_t _large_page_size = 0;
2877 
2878 static bool request_lock_memory_privilege() {
2879   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2880                           os::current_process_id());
2881 
2882   LUID luid;
2883   if (_hProcess != NULL &&
2884       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2885       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2886 
2887     TOKEN_PRIVILEGES tp;
2888     tp.PrivilegeCount = 1;
2889     tp.Privileges[0].Luid = luid;
2890     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2891 
2892     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2893     // privilege. Check GetLastError() too. See MSDN document.
2894     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2895         (GetLastError() == ERROR_SUCCESS)) {
2896       return true;
2897     }
2898   }
2899 
2900   return false;
2901 }
2902 
2903 static void cleanup_after_large_page_init() {
2904   if (_hProcess) CloseHandle(_hProcess);
2905   _hProcess = NULL;
2906   if (_hToken) CloseHandle(_hToken);
2907   _hToken = NULL;
2908 }
2909 
2910 static bool numa_interleaving_init() {
2911   bool success = false;
2912   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2913 
2914   // print a warning if UseNUMAInterleaving flag is specified on command line
2915   bool warn_on_failure = use_numa_interleaving_specified;
2916 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2917 
2918   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2919   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2920   NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2921 
2922   if (numa_node_list_holder.build()) {
2923     if (log_is_enabled(Debug, os, cpu)) {
2924       Log(os, cpu) log;
2925       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2926       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2927         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2928       }
2929     }
2930     success = true;
2931   } else {
2932     WARN("Process does not cover multiple NUMA nodes.");
2933   }
2934   if (!success) {
2935     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2936   }
2937   return success;
2938 #undef WARN
2939 }
2940 
2941 // this routine is used whenever we need to reserve a contiguous VA range
2942 // but we need to make separate VirtualAlloc calls for each piece of the range
2943 // Reasons for doing this:
2944 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2945 //  * UseNUMAInterleaving requires a separate node for each piece
2946 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2947                                          DWORD prot,
2948                                          bool should_inject_error = false) {
2949   char * p_buf;
2950   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2951   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2952   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2953 
2954   // first reserve enough address space in advance since we want to be
2955   // able to break a single contiguous virtual address range into multiple
2956   // large page commits but WS2003 does not allow reserving large page space
2957   // so we just use 4K pages for reserve, this gives us a legal contiguous
2958   // address space. then we will deallocate that reservation, and re alloc
2959   // using large pages
2960   const size_t size_of_reserve = bytes + chunk_size;
2961   if (bytes > size_of_reserve) {
2962     // Overflowed.
2963     return NULL;
2964   }
2965   p_buf = (char *) VirtualAlloc(addr,
2966                                 size_of_reserve,  // size of Reserve
2967                                 MEM_RESERVE,
2968                                 PAGE_READWRITE);
2969   // If reservation failed, return NULL
2970   if (p_buf == NULL) return NULL;
2971   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2972   os::release_memory(p_buf, bytes + chunk_size);
2973 
2974   // we still need to round up to a page boundary (in case we are using large pages)
2975   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2976   // instead we handle this in the bytes_to_rq computation below
2977   p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2978 
2979   // now go through and allocate one chunk at a time until all bytes are
2980   // allocated
2981   size_t  bytes_remaining = bytes;
2982   // An overflow of align_size_up() would have been caught above
2983   // in the calculation of size_of_reserve.
2984   char * next_alloc_addr = p_buf;
2985   HANDLE hProc = GetCurrentProcess();
2986 
2987 #ifdef ASSERT
2988   // Variable for the failure injection
2989   long ran_num = os::random();
2990   size_t fail_after = ran_num % bytes;
2991 #endif
2992 
2993   int count=0;
2994   while (bytes_remaining) {
2995     // select bytes_to_rq to get to the next chunk_size boundary
2996 
2997     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2998     // Note allocate and commit
2999     char * p_new;
3000 
3001 #ifdef ASSERT
3002     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3003 #else
3004     const bool inject_error_now = false;
3005 #endif
3006 
3007     if (inject_error_now) {
3008       p_new = NULL;
3009     } else {
3010       if (!UseNUMAInterleaving) {
3011         p_new = (char *) VirtualAlloc(next_alloc_addr,
3012                                       bytes_to_rq,
3013                                       flags,
3014                                       prot);
3015       } else {
3016         // get the next node to use from the used_node_list
3017         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3018         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3019         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3020       }
3021     }
3022 
3023     if (p_new == NULL) {
3024       // Free any allocated pages
3025       if (next_alloc_addr > p_buf) {
3026         // Some memory was committed so release it.
3027         size_t bytes_to_release = bytes - bytes_remaining;
3028         // NMT has yet to record any individual blocks, so it
3029         // need to create a dummy 'reserve' record to match
3030         // the release.
3031         MemTracker::record_virtual_memory_reserve((address)p_buf,
3032                                                   bytes_to_release, CALLER_PC);
3033         os::release_memory(p_buf, bytes_to_release);
3034       }
3035 #ifdef ASSERT
3036       if (should_inject_error) {
3037         log_develop_debug(pagesize)("Reserving pages individually failed.");
3038       }
3039 #endif
3040       return NULL;
3041     }
3042 
3043     bytes_remaining -= bytes_to_rq;
3044     next_alloc_addr += bytes_to_rq;
3045     count++;
3046   }
3047   // Although the memory is allocated individually, it is returned as one.
3048   // NMT records it as one block.
3049   if ((flags & MEM_COMMIT) != 0) {
3050     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3051   } else {
3052     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3053   }
3054 
3055   // made it this far, success
3056   return p_buf;
3057 }
3058 
3059 
3060 
3061 void os::large_page_init() {
3062   if (!UseLargePages) return;
3063 
3064   // print a warning if any large page related flag is specified on command line
3065   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3066                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3067   bool success = false;
3068 
3069 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3070   if (request_lock_memory_privilege()) {
3071     size_t s = GetLargePageMinimum();
3072     if (s) {
3073 #if defined(IA32) || defined(AMD64)
3074       if (s > 4*M || LargePageSizeInBytes > 4*M) {
3075         WARN("JVM cannot use large pages bigger than 4mb.");
3076       } else {
3077 #endif
3078         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3079           _large_page_size = LargePageSizeInBytes;
3080         } else {
3081           _large_page_size = s;
3082         }
3083         success = true;
3084 #if defined(IA32) || defined(AMD64)
3085       }
3086 #endif
3087     } else {
3088       WARN("Large page is not supported by the processor.");
3089     }
3090   } else {
3091     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3092   }
3093 #undef WARN
3094 
3095   const size_t default_page_size = (size_t) vm_page_size();
3096   if (success && _large_page_size > default_page_size) {
3097     _page_sizes[0] = _large_page_size;
3098     _page_sizes[1] = default_page_size;
3099     _page_sizes[2] = 0;
3100   }
3101 
3102   cleanup_after_large_page_init();
3103   UseLargePages = success;
3104 }
3105 
3106 // On win32, one cannot release just a part of reserved memory, it's an
3107 // all or nothing deal.  When we split a reservation, we must break the
3108 // reservation into two reservations.
3109 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3110                                   bool realloc) {
3111   if (size > 0) {
3112     release_memory(base, size);
3113     if (realloc) {
3114       reserve_memory(split, base);
3115     }
3116     if (size != split) {
3117       reserve_memory(size - split, base + split);
3118     }
3119   }
3120 }
3121 
3122 // Multiple threads can race in this code but it's not possible to unmap small sections of
3123 // virtual space to get requested alignment, like posix-like os's.
3124 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3125 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3126   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3127          "Alignment must be a multiple of allocation granularity (page size)");
3128   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3129 
3130   size_t extra_size = size + alignment;
3131   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3132 
3133   char* aligned_base = NULL;
3134 
3135   do {
3136     char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3137     if (extra_base == NULL) {
3138       return NULL;
3139     }
3140     // Do manual alignment
3141     aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3142 
3143     os::release_memory(extra_base, extra_size);
3144 
3145     aligned_base = os::reserve_memory(size, aligned_base);
3146 
3147   } while (aligned_base == NULL);
3148 
3149   return aligned_base;
3150 }
3151 
3152 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3153   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3154          "reserve alignment");
3155   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3156   char* res;
3157   // note that if UseLargePages is on, all the areas that require interleaving
3158   // will go thru reserve_memory_special rather than thru here.
3159   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3160   if (!use_individual) {
3161     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3162   } else {
3163     elapsedTimer reserveTimer;
3164     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3165     // in numa interleaving, we have to allocate pages individually
3166     // (well really chunks of NUMAInterleaveGranularity size)
3167     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3168     if (res == NULL) {
3169       warning("NUMA page allocation failed");
3170     }
3171     if (Verbose && PrintMiscellaneous) {
3172       reserveTimer.stop();
3173       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3174                     reserveTimer.milliseconds(), reserveTimer.ticks());
3175     }
3176   }
3177   assert(res == NULL || addr == NULL || addr == res,
3178          "Unexpected address from reserve.");
3179 
3180   return res;
3181 }
3182 
3183 // Reserve memory at an arbitrary address, only if that area is
3184 // available (and not reserved for something else).
3185 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3186   // Windows os::reserve_memory() fails of the requested address range is
3187   // not avilable.
3188   return reserve_memory(bytes, requested_addr);
3189 }
3190 
3191 size_t os::large_page_size() {
3192   return _large_page_size;
3193 }
3194 
3195 bool os::can_commit_large_page_memory() {
3196   // Windows only uses large page memory when the entire region is reserved
3197   // and committed in a single VirtualAlloc() call. This may change in the
3198   // future, but with Windows 2003 it's not possible to commit on demand.
3199   return false;
3200 }
3201 
3202 bool os::can_execute_large_page_memory() {
3203   return true;
3204 }
3205 
3206 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3207                                  bool exec) {
3208   assert(UseLargePages, "only for large pages");
3209 
3210   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3211     return NULL; // Fallback to small pages.
3212   }
3213 
3214   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3215   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3216 
3217   // with large pages, there are two cases where we need to use Individual Allocation
3218   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3219   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3220   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3221     log_debug(pagesize)("Reserving large pages individually.");
3222 
3223     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3224     if (p_buf == NULL) {
3225       // give an appropriate warning message
3226       if (UseNUMAInterleaving) {
3227         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3228       }
3229       if (UseLargePagesIndividualAllocation) {
3230         warning("Individually allocated large pages failed, "
3231                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3232       }
3233       return NULL;
3234     }
3235 
3236     return p_buf;
3237 
3238   } else {
3239     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3240 
3241     // normal policy just allocate it all at once
3242     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3243     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3244     if (res != NULL) {
3245       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3246     }
3247 
3248     return res;
3249   }
3250 }
3251 
3252 bool os::release_memory_special(char* base, size_t bytes) {
3253   assert(base != NULL, "Sanity check");
3254   return release_memory(base, bytes);
3255 }
3256 
3257 void os::print_statistics() {
3258 }
3259 
3260 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3261   int err = os::get_last_error();
3262   char buf[256];
3263   size_t buf_len = os::lasterror(buf, sizeof(buf));
3264   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3265           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3266           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3267 }
3268 
3269 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3270   if (bytes == 0) {
3271     // Don't bother the OS with noops.
3272     return true;
3273   }
3274   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3275   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3276   // Don't attempt to print anything if the OS call fails. We're
3277   // probably low on resources, so the print itself may cause crashes.
3278 
3279   // unless we have NUMAInterleaving enabled, the range of a commit
3280   // is always within a reserve covered by a single VirtualAlloc
3281   // in that case we can just do a single commit for the requested size
3282   if (!UseNUMAInterleaving) {
3283     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3284       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3285       return false;
3286     }
3287     if (exec) {
3288       DWORD oldprot;
3289       // Windows doc says to use VirtualProtect to get execute permissions
3290       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3291         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3292         return false;
3293       }
3294     }
3295     return true;
3296   } else {
3297 
3298     // when NUMAInterleaving is enabled, the commit might cover a range that
3299     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3300     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3301     // returns represents the number of bytes that can be committed in one step.
3302     size_t bytes_remaining = bytes;
3303     char * next_alloc_addr = addr;
3304     while (bytes_remaining > 0) {
3305       MEMORY_BASIC_INFORMATION alloc_info;
3306       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3307       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3308       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3309                        PAGE_READWRITE) == NULL) {
3310         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3311                                             exec);)
3312         return false;
3313       }
3314       if (exec) {
3315         DWORD oldprot;
3316         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3317                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3318           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3319                                               exec);)
3320           return false;
3321         }
3322       }
3323       bytes_remaining -= bytes_to_rq;
3324       next_alloc_addr += bytes_to_rq;
3325     }
3326   }
3327   // if we made it this far, return true
3328   return true;
3329 }
3330 
3331 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3332                           bool exec) {
3333   // alignment_hint is ignored on this OS
3334   return pd_commit_memory(addr, size, exec);
3335 }
3336 
3337 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3338                                   const char* mesg) {
3339   assert(mesg != NULL, "mesg must be specified");
3340   if (!pd_commit_memory(addr, size, exec)) {
3341     warn_fail_commit_memory(addr, size, exec);
3342     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3343   }
3344 }
3345 
3346 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3347                                   size_t alignment_hint, bool exec,
3348                                   const char* mesg) {
3349   // alignment_hint is ignored on this OS
3350   pd_commit_memory_or_exit(addr, size, exec, mesg);
3351 }
3352 
3353 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3354   if (bytes == 0) {
3355     // Don't bother the OS with noops.
3356     return true;
3357   }
3358   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3359   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3360   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3361 }
3362 
3363 bool os::pd_release_memory(char* addr, size_t bytes) {
3364   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3365 }
3366 
3367 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3368   return os::commit_memory(addr, size, !ExecMem);
3369 }
3370 
3371 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3372   return os::uncommit_memory(addr, size);
3373 }
3374 
3375 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3376   uint count = 0;
3377   bool ret = false;
3378   size_t bytes_remaining = bytes;
3379   char * next_protect_addr = addr;
3380 
3381   // Use VirtualQuery() to get the chunk size.
3382   while (bytes_remaining) {
3383     MEMORY_BASIC_INFORMATION alloc_info;
3384     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3385       return false;
3386     }
3387 
3388     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3389     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3390     // but we don't distinguish here as both cases are protected by same API.
3391     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3392     warning("Failed protecting pages individually for chunk #%u", count);
3393     if (!ret) {
3394       return false;
3395     }
3396 
3397     bytes_remaining -= bytes_to_protect;
3398     next_protect_addr += bytes_to_protect;
3399     count++;
3400   }
3401   return ret;
3402 }
3403 
3404 // Set protections specified
3405 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3406                         bool is_committed) {
3407   unsigned int p = 0;
3408   switch (prot) {
3409   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3410   case MEM_PROT_READ: p = PAGE_READONLY; break;
3411   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3412   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3413   default:
3414     ShouldNotReachHere();
3415   }
3416 
3417   DWORD old_status;
3418 
3419   // Strange enough, but on Win32 one can change protection only for committed
3420   // memory, not a big deal anyway, as bytes less or equal than 64K
3421   if (!is_committed) {
3422     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3423                           "cannot commit protection page");
3424   }
3425   // One cannot use os::guard_memory() here, as on Win32 guard page
3426   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3427   //
3428   // Pages in the region become guard pages. Any attempt to access a guard page
3429   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3430   // the guard page status. Guard pages thus act as a one-time access alarm.
3431   bool ret;
3432   if (UseNUMAInterleaving) {
3433     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3434     // so we must protect the chunks individually.
3435     ret = protect_pages_individually(addr, bytes, p, &old_status);
3436   } else {
3437     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3438   }
3439 #ifdef ASSERT
3440   if (!ret) {
3441     int err = os::get_last_error();
3442     char buf[256];
3443     size_t buf_len = os::lasterror(buf, sizeof(buf));
3444     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3445           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3446           buf_len != 0 ? buf : "<no_error_string>", err);
3447   }
3448 #endif
3449   return ret;
3450 }
3451 
3452 bool os::guard_memory(char* addr, size_t bytes) {
3453   DWORD old_status;
3454   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3455 }
3456 
3457 bool os::unguard_memory(char* addr, size_t bytes) {
3458   DWORD old_status;
3459   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3460 }
3461 
3462 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3463 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3464 void os::numa_make_global(char *addr, size_t bytes)    { }
3465 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3466 bool os::numa_topology_changed()                       { return false; }
3467 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3468 int os::numa_get_group_id()                            { return 0; }
3469 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3470   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3471     // Provide an answer for UMA systems
3472     ids[0] = 0;
3473     return 1;
3474   } else {
3475     // check for size bigger than actual groups_num
3476     size = MIN2(size, numa_get_groups_num());
3477     for (int i = 0; i < (int)size; i++) {
3478       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3479     }
3480     return size;
3481   }
3482 }
3483 
3484 bool os::get_page_info(char *start, page_info* info) {
3485   return false;
3486 }
3487 
3488 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3489                      page_info* page_found) {
3490   return end;
3491 }
3492 
3493 char* os::non_memory_address_word() {
3494   // Must never look like an address returned by reserve_memory,
3495   // even in its subfields (as defined by the CPU immediate fields,
3496   // if the CPU splits constants across multiple instructions).
3497   return (char*)-1;
3498 }
3499 
3500 #define MAX_ERROR_COUNT 100
3501 #define SYS_THREAD_ERROR 0xffffffffUL
3502 
3503 void os::pd_start_thread(Thread* thread) {
3504   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3505   // Returns previous suspend state:
3506   // 0:  Thread was not suspended
3507   // 1:  Thread is running now
3508   // >1: Thread is still suspended.
3509   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3510 }
3511 
3512 class HighResolutionInterval : public CHeapObj<mtThread> {
3513   // The default timer resolution seems to be 10 milliseconds.
3514   // (Where is this written down?)
3515   // If someone wants to sleep for only a fraction of the default,
3516   // then we set the timer resolution down to 1 millisecond for
3517   // the duration of their interval.
3518   // We carefully set the resolution back, since otherwise we
3519   // seem to incur an overhead (3%?) that we don't need.
3520   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3521   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3522   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3523   // timeBeginPeriod() if the relative error exceeded some threshold.
3524   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3525   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
3526   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3527   // resolution timers running.
3528  private:
3529   jlong resolution;
3530  public:
3531   HighResolutionInterval(jlong ms) {
3532     resolution = ms % 10L;
3533     if (resolution != 0) {
3534       MMRESULT result = timeBeginPeriod(1L);
3535     }
3536   }
3537   ~HighResolutionInterval() {
3538     if (resolution != 0) {
3539       MMRESULT result = timeEndPeriod(1L);
3540     }
3541     resolution = 0L;
3542   }
3543 };
3544 
3545 int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3546   jlong limit = (jlong) MAXDWORD;
3547 
3548   while (ms > limit) {
3549     int res;
3550     if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) {
3551       return res;
3552     }
3553     ms -= limit;
3554   }
3555 
3556   assert(thread == Thread::current(), "thread consistency check");
3557   OSThread* osthread = thread->osthread();
3558   OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3559   int result;
3560   if (interruptable) {
3561     assert(thread->is_Java_thread(), "must be java thread");
3562     JavaThread *jt = (JavaThread *) thread;
3563     ThreadBlockInVM tbivm(jt);
3564 
3565     jt->set_suspend_equivalent();
3566     // cleared by handle_special_suspend_equivalent_condition() or
3567     // java_suspend_self() via check_and_wait_while_suspended()
3568 
3569     HANDLE events[1];
3570     events[0] = osthread->interrupt_event();
3571     HighResolutionInterval *phri=NULL;
3572     if (!ForceTimeHighResolution) {
3573       phri = new HighResolutionInterval(ms);
3574     }
3575     if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3576       result = OS_TIMEOUT;
3577     } else {
3578       ResetEvent(osthread->interrupt_event());
3579       osthread->set_interrupted(false);
3580       result = OS_INTRPT;
3581     }
3582     delete phri; //if it is NULL, harmless
3583 
3584     // were we externally suspended while we were waiting?
3585     jt->check_and_wait_while_suspended();
3586   } else {
3587     assert(!thread->is_Java_thread(), "must not be java thread");
3588     Sleep((long) ms);
3589     result = OS_TIMEOUT;
3590   }
3591   return result;
3592 }
3593 
3594 // Short sleep, direct OS call.
3595 //
3596 // ms = 0, means allow others (if any) to run.
3597 //
3598 void os::naked_short_sleep(jlong ms) {
3599   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3600   Sleep(ms);
3601 }
3602 
3603 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3604 void os::infinite_sleep() {
3605   while (true) {    // sleep forever ...
3606     Sleep(100000);  // ... 100 seconds at a time
3607   }
3608 }
3609 
3610 typedef BOOL (WINAPI * STTSignature)(void);
3611 
3612 void os::naked_yield() {
3613   // Consider passing back the return value from SwitchToThread().
3614   SwitchToThread();
3615 }
3616 
3617 // Win32 only gives you access to seven real priorities at a time,
3618 // so we compress Java's ten down to seven.  It would be better
3619 // if we dynamically adjusted relative priorities.
3620 
3621 int os::java_to_os_priority[CriticalPriority + 1] = {
3622   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3623   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3624   THREAD_PRIORITY_LOWEST,                       // 2
3625   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3626   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3627   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3628   THREAD_PRIORITY_NORMAL,                       // 6
3629   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3630   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3631   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3632   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3633   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3634 };
3635 
3636 int prio_policy1[CriticalPriority + 1] = {
3637   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3638   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3639   THREAD_PRIORITY_LOWEST,                       // 2
3640   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3641   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3642   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3643   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3644   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3645   THREAD_PRIORITY_HIGHEST,                      // 8
3646   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3647   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3648   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3649 };
3650 
3651 static int prio_init() {
3652   // If ThreadPriorityPolicy is 1, switch tables
3653   if (ThreadPriorityPolicy == 1) {
3654     int i;
3655     for (i = 0; i < CriticalPriority + 1; i++) {
3656       os::java_to_os_priority[i] = prio_policy1[i];
3657     }
3658   }
3659   if (UseCriticalJavaThreadPriority) {
3660     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3661   }
3662   return 0;
3663 }
3664 
3665 OSReturn os::set_native_priority(Thread* thread, int priority) {
3666   if (!UseThreadPriorities) return OS_OK;
3667   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3668   return ret ? OS_OK : OS_ERR;
3669 }
3670 
3671 OSReturn os::get_native_priority(const Thread* const thread,
3672                                  int* priority_ptr) {
3673   if (!UseThreadPriorities) {
3674     *priority_ptr = java_to_os_priority[NormPriority];
3675     return OS_OK;
3676   }
3677   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3678   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3679     assert(false, "GetThreadPriority failed");
3680     return OS_ERR;
3681   }
3682   *priority_ptr = os_prio;
3683   return OS_OK;
3684 }
3685 
3686 
3687 // Hint to the underlying OS that a task switch would not be good.
3688 // Void return because it's a hint and can fail.
3689 void os::hint_no_preempt() {}
3690 
3691 void os::interrupt(Thread* thread) {
3692   assert(!thread->is_Java_thread() || Thread::current() == thread ||
3693          Threads_lock->owned_by_self(),
3694          "possibility of dangling Thread pointer");
3695 
3696   OSThread* osthread = thread->osthread();
3697   osthread->set_interrupted(true);
3698   // More than one thread can get here with the same value of osthread,
3699   // resulting in multiple notifications.  We do, however, want the store
3700   // to interrupted() to be visible to other threads before we post
3701   // the interrupt event.
3702   OrderAccess::release();
3703   SetEvent(osthread->interrupt_event());
3704   // For JSR166:  unpark after setting status
3705   if (thread->is_Java_thread()) {
3706     ((JavaThread*)thread)->parker()->unpark();
3707   }
3708 
3709   ParkEvent * ev = thread->_ParkEvent;
3710   if (ev != NULL) ev->unpark();
3711 }
3712 
3713 
3714 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3715   assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3716          "possibility of dangling Thread pointer");
3717 
3718   OSThread* osthread = thread->osthread();
3719   // There is no synchronization between the setting of the interrupt
3720   // and it being cleared here. It is critical - see 6535709 - that
3721   // we only clear the interrupt state, and reset the interrupt event,
3722   // if we are going to report that we were indeed interrupted - else
3723   // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3724   // depending on the timing. By checking thread interrupt event to see
3725   // if the thread gets real interrupt thus prevent spurious wakeup.
3726   bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3727   if (interrupted && clear_interrupted) {
3728     osthread->set_interrupted(false);
3729     ResetEvent(osthread->interrupt_event());
3730   } // Otherwise leave the interrupted state alone
3731 
3732   return interrupted;
3733 }
3734 
3735 // Get's a pc (hint) for a running thread. Currently used only for profiling.
3736 ExtendedPC os::get_thread_pc(Thread* thread) {
3737   CONTEXT context;
3738   context.ContextFlags = CONTEXT_CONTROL;
3739   HANDLE handle = thread->osthread()->thread_handle();
3740 #ifdef _M_IA64
3741   assert(0, "Fix get_thread_pc");
3742   return ExtendedPC(NULL);
3743 #else
3744   if (GetThreadContext(handle, &context)) {
3745 #ifdef _M_AMD64
3746     return ExtendedPC((address) context.Rip);
3747 #else
3748     return ExtendedPC((address) context.Eip);
3749 #endif
3750   } else {
3751     return ExtendedPC(NULL);
3752   }
3753 #endif
3754 }
3755 
3756 // GetCurrentThreadId() returns DWORD
3757 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3758 
3759 static int _initial_pid = 0;
3760 
3761 int os::current_process_id() {
3762   return (_initial_pid ? _initial_pid : _getpid());
3763 }
3764 
3765 int    os::win32::_vm_page_size              = 0;
3766 int    os::win32::_vm_allocation_granularity = 0;
3767 int    os::win32::_processor_type            = 0;
3768 // Processor level is not available on non-NT systems, use vm_version instead
3769 int    os::win32::_processor_level           = 0;
3770 julong os::win32::_physical_memory           = 0;
3771 size_t os::win32::_default_stack_size        = 0;
3772 
3773 intx          os::win32::_os_thread_limit    = 0;
3774 volatile intx os::win32::_os_thread_count    = 0;
3775 
3776 bool   os::win32::_is_windows_server         = false;
3777 
3778 // 6573254
3779 // Currently, the bug is observed across all the supported Windows releases,
3780 // including the latest one (as of this writing - Windows Server 2012 R2)
3781 bool   os::win32::_has_exit_bug              = true;
3782 
3783 void os::win32::initialize_system_info() {
3784   SYSTEM_INFO si;
3785   GetSystemInfo(&si);
3786   _vm_page_size    = si.dwPageSize;
3787   _vm_allocation_granularity = si.dwAllocationGranularity;
3788   _processor_type  = si.dwProcessorType;
3789   _processor_level = si.wProcessorLevel;
3790   set_processor_count(si.dwNumberOfProcessors);
3791 
3792   MEMORYSTATUSEX ms;
3793   ms.dwLength = sizeof(ms);
3794 
3795   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3796   // dwMemoryLoad (% of memory in use)
3797   GlobalMemoryStatusEx(&ms);
3798   _physical_memory = ms.ullTotalPhys;
3799 
3800   OSVERSIONINFOEX oi;
3801   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3802   GetVersionEx((OSVERSIONINFO*)&oi);
3803   switch (oi.dwPlatformId) {
3804   case VER_PLATFORM_WIN32_NT:
3805     {
3806       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3807       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3808           oi.wProductType == VER_NT_SERVER) {
3809         _is_windows_server = true;
3810       }
3811     }
3812     break;
3813   default: fatal("Unknown platform");
3814   }
3815 
3816   _default_stack_size = os::current_stack_size();
3817   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3818   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3819          "stack size not a multiple of page size");
3820 
3821   initialize_performance_counter();
3822 }
3823 
3824 
3825 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3826                                       int ebuflen) {
3827   char path[MAX_PATH];
3828   DWORD size;
3829   DWORD pathLen = (DWORD)sizeof(path);
3830   HINSTANCE result = NULL;
3831 
3832   // only allow library name without path component
3833   assert(strchr(name, '\\') == NULL, "path not allowed");
3834   assert(strchr(name, ':') == NULL, "path not allowed");
3835   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3836     jio_snprintf(ebuf, ebuflen,
3837                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3838     return NULL;
3839   }
3840 
3841   // search system directory
3842   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3843     if (size >= pathLen) {
3844       return NULL; // truncated
3845     }
3846     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3847       return NULL; // truncated
3848     }
3849     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3850       return result;
3851     }
3852   }
3853 
3854   // try Windows directory
3855   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3856     if (size >= pathLen) {
3857       return NULL; // truncated
3858     }
3859     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3860       return NULL; // truncated
3861     }
3862     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3863       return result;
3864     }
3865   }
3866 
3867   jio_snprintf(ebuf, ebuflen,
3868                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3869   return NULL;
3870 }
3871 
3872 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3873 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3874 
3875 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3876   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3877   return TRUE;
3878 }
3879 
3880 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3881   // Basic approach:
3882   //  - Each exiting thread registers its intent to exit and then does so.
3883   //  - A thread trying to terminate the process must wait for all
3884   //    threads currently exiting to complete their exit.
3885 
3886   if (os::win32::has_exit_bug()) {
3887     // The array holds handles of the threads that have started exiting by calling
3888     // _endthreadex().
3889     // Should be large enough to avoid blocking the exiting thread due to lack of
3890     // a free slot.
3891     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3892     static int handle_count = 0;
3893 
3894     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3895     static CRITICAL_SECTION crit_sect;
3896     static volatile jint process_exiting = 0;
3897     int i, j;
3898     DWORD res;
3899     HANDLE hproc, hthr;
3900 
3901     // We only attempt to register threads until a process exiting
3902     // thread manages to set the process_exiting flag. Any threads
3903     // that come through here after the process_exiting flag is set
3904     // are unregistered and will be caught in the SuspendThread()
3905     // infinite loop below.
3906     bool registered = false;
3907 
3908     // The first thread that reached this point, initializes the critical section.
3909     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3910       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3911     } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3912       if (what != EPT_THREAD) {
3913         // Atomically set process_exiting before the critical section
3914         // to increase the visibility between racing threads.
3915         Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0);
3916       }
3917       EnterCriticalSection(&crit_sect);
3918 
3919       if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3920         // Remove from the array those handles of the threads that have completed exiting.
3921         for (i = 0, j = 0; i < handle_count; ++i) {
3922           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3923           if (res == WAIT_TIMEOUT) {
3924             handles[j++] = handles[i];
3925           } else {
3926             if (res == WAIT_FAILED) {
3927               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3928                       GetLastError(), __FILE__, __LINE__);
3929             }
3930             // Don't keep the handle, if we failed waiting for it.
3931             CloseHandle(handles[i]);
3932           }
3933         }
3934 
3935         // If there's no free slot in the array of the kept handles, we'll have to
3936         // wait until at least one thread completes exiting.
3937         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3938           // Raise the priority of the oldest exiting thread to increase its chances
3939           // to complete sooner.
3940           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3941           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3942           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3943             i = (res - WAIT_OBJECT_0);
3944             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3945             for (; i < handle_count; ++i) {
3946               handles[i] = handles[i + 1];
3947             }
3948           } else {
3949             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3950                     (res == WAIT_FAILED ? "failed" : "timed out"),
3951                     GetLastError(), __FILE__, __LINE__);
3952             // Don't keep handles, if we failed waiting for them.
3953             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3954               CloseHandle(handles[i]);
3955             }
3956             handle_count = 0;
3957           }
3958         }
3959 
3960         // Store a duplicate of the current thread handle in the array of handles.
3961         hproc = GetCurrentProcess();
3962         hthr = GetCurrentThread();
3963         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3964                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3965           warning("DuplicateHandle failed (%u) in %s: %d\n",
3966                   GetLastError(), __FILE__, __LINE__);
3967 
3968           // We can't register this thread (no more handles) so this thread
3969           // may be racing with a thread that is calling exit(). If the thread
3970           // that is calling exit() has managed to set the process_exiting
3971           // flag, then this thread will be caught in the SuspendThread()
3972           // infinite loop below which closes that race. A small timing
3973           // window remains before the process_exiting flag is set, but it
3974           // is only exposed when we are out of handles.
3975         } else {
3976           ++handle_count;
3977           registered = true;
3978 
3979           // The current exiting thread has stored its handle in the array, and now
3980           // should leave the critical section before calling _endthreadex().
3981         }
3982 
3983       } else if (what != EPT_THREAD && handle_count > 0) {
3984         jlong start_time, finish_time, timeout_left;
3985         // Before ending the process, make sure all the threads that had called
3986         // _endthreadex() completed.
3987 
3988         // Set the priority level of the current thread to the same value as
3989         // the priority level of exiting threads.
3990         // This is to ensure it will be given a fair chance to execute if
3991         // the timeout expires.
3992         hthr = GetCurrentThread();
3993         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3994         start_time = os::javaTimeNanos();
3995         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3996         for (i = 0; ; ) {
3997           int portion_count = handle_count - i;
3998           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3999             portion_count = MAXIMUM_WAIT_OBJECTS;
4000           }
4001           for (j = 0; j < portion_count; ++j) {
4002             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4003           }
4004           timeout_left = (finish_time - start_time) / 1000000L;
4005           if (timeout_left < 0) {
4006             timeout_left = 0;
4007           }
4008           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4009           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4010             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4011                     (res == WAIT_FAILED ? "failed" : "timed out"),
4012                     GetLastError(), __FILE__, __LINE__);
4013             // Reset portion_count so we close the remaining
4014             // handles due to this error.
4015             portion_count = handle_count - i;
4016           }
4017           for (j = 0; j < portion_count; ++j) {
4018             CloseHandle(handles[i + j]);
4019           }
4020           if ((i += portion_count) >= handle_count) {
4021             break;
4022           }
4023           start_time = os::javaTimeNanos();
4024         }
4025         handle_count = 0;
4026       }
4027 
4028       LeaveCriticalSection(&crit_sect);
4029     }
4030 
4031     if (!registered &&
4032         OrderAccess::load_acquire(&process_exiting) != 0 &&
4033         process_exiting != (jint)GetCurrentThreadId()) {
4034       // Some other thread is about to call exit(), so we don't let
4035       // the current unregistered thread proceed to exit() or _endthreadex()
4036       while (true) {
4037         SuspendThread(GetCurrentThread());
4038         // Avoid busy-wait loop, if SuspendThread() failed.
4039         Sleep(EXIT_TIMEOUT);
4040       }
4041     }
4042   }
4043 
4044   // We are here if either
4045   // - there's no 'race at exit' bug on this OS release;
4046   // - initialization of the critical section failed (unlikely);
4047   // - the current thread has registered itself and left the critical section;
4048   // - the process-exiting thread has raised the flag and left the critical section.
4049   if (what == EPT_THREAD) {
4050     _endthreadex((unsigned)exit_code);
4051   } else if (what == EPT_PROCESS) {
4052     ::exit(exit_code);
4053   } else {
4054     _exit(exit_code);
4055   }
4056 
4057   // Should not reach here
4058   return exit_code;
4059 }
4060 
4061 #undef EXIT_TIMEOUT
4062 
4063 void os::win32::setmode_streams() {
4064   _setmode(_fileno(stdin), _O_BINARY);
4065   _setmode(_fileno(stdout), _O_BINARY);
4066   _setmode(_fileno(stderr), _O_BINARY);
4067 }
4068 
4069 
4070 bool os::is_debugger_attached() {
4071   return IsDebuggerPresent() ? true : false;
4072 }
4073 
4074 
4075 void os::wait_for_keypress_at_exit(void) {
4076   if (PauseAtExit) {
4077     fprintf(stderr, "Press any key to continue...\n");
4078     fgetc(stdin);
4079   }
4080 }
4081 
4082 
4083 bool os::message_box(const char* title, const char* message) {
4084   int result = MessageBox(NULL, message, title,
4085                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4086   return result == IDYES;
4087 }
4088 
4089 #ifndef PRODUCT
4090 #ifndef _WIN64
4091 // Helpers to check whether NX protection is enabled
4092 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4093   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4094       pex->ExceptionRecord->NumberParameters > 0 &&
4095       pex->ExceptionRecord->ExceptionInformation[0] ==
4096       EXCEPTION_INFO_EXEC_VIOLATION) {
4097     return EXCEPTION_EXECUTE_HANDLER;
4098   }
4099   return EXCEPTION_CONTINUE_SEARCH;
4100 }
4101 
4102 void nx_check_protection() {
4103   // If NX is enabled we'll get an exception calling into code on the stack
4104   char code[] = { (char)0xC3 }; // ret
4105   void *code_ptr = (void *)code;
4106   __try {
4107     __asm call code_ptr
4108   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4109     tty->print_raw_cr("NX protection detected.");
4110   }
4111 }
4112 #endif // _WIN64
4113 #endif // PRODUCT
4114 
4115 // This is called _before_ the global arguments have been parsed
4116 void os::init(void) {
4117   _initial_pid = _getpid();
4118 
4119   init_random(1234567);
4120 
4121   win32::initialize_system_info();
4122   win32::setmode_streams();
4123   init_page_sizes((size_t) win32::vm_page_size());
4124 
4125   // This may be overridden later when argument processing is done.
4126   FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false);
4127 
4128   // Initialize main_process and main_thread
4129   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4130   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4131                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4132     fatal("DuplicateHandle failed\n");
4133   }
4134   main_thread_id = (int) GetCurrentThreadId();
4135 
4136   // initialize fast thread access - only used for 32-bit
4137   win32::initialize_thread_ptr_offset();
4138 }
4139 
4140 // To install functions for atexit processing
4141 extern "C" {
4142   static void perfMemory_exit_helper() {
4143     perfMemory_exit();
4144   }
4145 }
4146 
4147 static jint initSock();
4148 
4149 // this is called _after_ the global arguments have been parsed
4150 jint os::init_2(void) {
4151   // Allocate a single page and mark it as readable for safepoint polling
4152   address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
4153   guarantee(polling_page != NULL, "Reserve Failed for polling page");
4154 
4155   address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
4156   guarantee(return_page != NULL, "Commit Failed for polling page");
4157 
4158   os::set_polling_page(polling_page);
4159   log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
4160 
4161   if (!UseMembar) {
4162     address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
4163     guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
4164 
4165     return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
4166     guarantee(return_page != NULL, "Commit Failed for memory serialize page");
4167 
4168     os::set_memory_serialize_page(mem_serialize_page);
4169     log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
4170   }
4171 
4172   // Setup Windows Exceptions
4173 
4174   // for debugging float code generation bugs
4175   if (ForceFloatExceptions) {
4176 #ifndef  _WIN64
4177     static long fp_control_word = 0;
4178     __asm { fstcw fp_control_word }
4179     // see Intel PPro Manual, Vol. 2, p 7-16
4180     const long precision = 0x20;
4181     const long underflow = 0x10;
4182     const long overflow  = 0x08;
4183     const long zero_div  = 0x04;
4184     const long denorm    = 0x02;
4185     const long invalid   = 0x01;
4186     fp_control_word |= invalid;
4187     __asm { fldcw fp_control_word }
4188 #endif
4189   }
4190 
4191   // If stack_commit_size is 0, windows will reserve the default size,
4192   // but only commit a small portion of it.
4193   size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
4194   size_t default_reserve_size = os::win32::default_stack_size();
4195   size_t actual_reserve_size = stack_commit_size;
4196   if (stack_commit_size < default_reserve_size) {
4197     // If stack_commit_size == 0, we want this too
4198     actual_reserve_size = default_reserve_size;
4199   }
4200 
4201   // Check minimum allowable stack size for thread creation and to initialize
4202   // the java system classes, including StackOverflowError - depends on page
4203   // size.  Add two 4K pages for compiler2 recursion in main thread.
4204   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4205   // class initialization depending on 32 or 64 bit VM.
4206   size_t min_stack_allowed =
4207             (size_t)(JavaThread::stack_guard_zone_size() +
4208                      JavaThread::stack_shadow_zone_size() +
4209                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4210 
4211   min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
4212 
4213   if (actual_reserve_size < min_stack_allowed) {
4214     tty->print_cr("\nThe stack size specified is too small, "
4215                   "Specify at least %dk",
4216                   min_stack_allowed / K);
4217     return JNI_ERR;
4218   }
4219 
4220   JavaThread::set_stack_size_at_create(stack_commit_size);
4221 
4222   // Calculate theoretical max. size of Threads to guard gainst artifical
4223   // out-of-memory situations, where all available address-space has been
4224   // reserved by thread stacks.
4225   assert(actual_reserve_size != 0, "Must have a stack");
4226 
4227   // Calculate the thread limit when we should start doing Virtual Memory
4228   // banging. Currently when the threads will have used all but 200Mb of space.
4229   //
4230   // TODO: consider performing a similar calculation for commit size instead
4231   // as reserve size, since on a 64-bit platform we'll run into that more
4232   // often than running out of virtual memory space.  We can use the
4233   // lower value of the two calculations as the os_thread_limit.
4234   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4235   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4236 
4237   // at exit methods are called in the reverse order of their registration.
4238   // there is no limit to the number of functions registered. atexit does
4239   // not set errno.
4240 
4241   if (PerfAllowAtExitRegistration) {
4242     // only register atexit functions if PerfAllowAtExitRegistration is set.
4243     // atexit functions can be delayed until process exit time, which
4244     // can be problematic for embedded VM situations. Embedded VMs should
4245     // call DestroyJavaVM() to assure that VM resources are released.
4246 
4247     // note: perfMemory_exit_helper atexit function may be removed in
4248     // the future if the appropriate cleanup code can be added to the
4249     // VM_Exit VMOperation's doit method.
4250     if (atexit(perfMemory_exit_helper) != 0) {
4251       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4252     }
4253   }
4254 
4255 #ifndef _WIN64
4256   // Print something if NX is enabled (win32 on AMD64)
4257   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4258 #endif
4259 
4260   // initialize thread priority policy
4261   prio_init();
4262 
4263   if (UseNUMA && !ForceNUMA) {
4264     UseNUMA = false; // We don't fully support this yet
4265   }
4266 
4267   if (UseNUMAInterleaving) {
4268     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4269     bool success = numa_interleaving_init();
4270     if (!success) UseNUMAInterleaving = false;
4271   }
4272 
4273   if (initSock() != JNI_OK) {
4274     return JNI_ERR;
4275   }
4276 
4277   return JNI_OK;
4278 }
4279 
4280 // Mark the polling page as unreadable
4281 void os::make_polling_page_unreadable(void) {
4282   DWORD old_status;
4283   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4284                       PAGE_NOACCESS, &old_status)) {
4285     fatal("Could not disable polling page");
4286   }
4287 }
4288 
4289 // Mark the polling page as readable
4290 void os::make_polling_page_readable(void) {
4291   DWORD old_status;
4292   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4293                       PAGE_READONLY, &old_status)) {
4294     fatal("Could not enable polling page");
4295   }
4296 }
4297 
4298 
4299 int os::stat(const char *path, struct stat *sbuf) {
4300   char pathbuf[MAX_PATH];
4301   if (strlen(path) > MAX_PATH - 1) {
4302     errno = ENAMETOOLONG;
4303     return -1;
4304   }
4305   os::native_path(strcpy(pathbuf, path));
4306   int ret = ::stat(pathbuf, sbuf);
4307   if (sbuf != NULL && UseUTCFileTimestamp) {
4308     // Fix for 6539723.  st_mtime returned from stat() is dependent on
4309     // the system timezone and so can return different values for the
4310     // same file if/when daylight savings time changes.  This adjustment
4311     // makes sure the same timestamp is returned regardless of the TZ.
4312     //
4313     // See:
4314     // http://msdn.microsoft.com/library/
4315     //   default.asp?url=/library/en-us/sysinfo/base/
4316     //   time_zone_information_str.asp
4317     // and
4318     // http://msdn.microsoft.com/library/default.asp?url=
4319     //   /library/en-us/sysinfo/base/settimezoneinformation.asp
4320     //
4321     // NOTE: there is a insidious bug here:  If the timezone is changed
4322     // after the call to stat() but before 'GetTimeZoneInformation()', then
4323     // the adjustment we do here will be wrong and we'll return the wrong
4324     // value (which will likely end up creating an invalid class data
4325     // archive).  Absent a better API for this, or some time zone locking
4326     // mechanism, we'll have to live with this risk.
4327     TIME_ZONE_INFORMATION tz;
4328     DWORD tzid = GetTimeZoneInformation(&tz);
4329     int daylightBias =
4330       (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
4331     sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4332   }
4333   return ret;
4334 }
4335 
4336 
4337 #define FT2INT64(ft) \
4338   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4339 
4340 
4341 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4342 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4343 // of a thread.
4344 //
4345 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4346 // the fast estimate available on the platform.
4347 
4348 // current_thread_cpu_time() is not optimized for Windows yet
4349 jlong os::current_thread_cpu_time() {
4350   // return user + sys since the cost is the same
4351   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4352 }
4353 
4354 jlong os::thread_cpu_time(Thread* thread) {
4355   // consistent with what current_thread_cpu_time() returns.
4356   return os::thread_cpu_time(thread, true /* user+sys */);
4357 }
4358 
4359 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4360   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4361 }
4362 
4363 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4364   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4365   // If this function changes, os::is_thread_cpu_time_supported() should too
4366   FILETIME CreationTime;
4367   FILETIME ExitTime;
4368   FILETIME KernelTime;
4369   FILETIME UserTime;
4370 
4371   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4372                       &ExitTime, &KernelTime, &UserTime) == 0) {
4373     return -1;
4374   } else if (user_sys_cpu_time) {
4375     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4376   } else {
4377     return FT2INT64(UserTime) * 100;
4378   }
4379 }
4380 
4381 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4382   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4383   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4384   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4385   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4386 }
4387 
4388 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4389   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4390   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4391   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4392   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4393 }
4394 
4395 bool os::is_thread_cpu_time_supported() {
4396   // see os::thread_cpu_time
4397   FILETIME CreationTime;
4398   FILETIME ExitTime;
4399   FILETIME KernelTime;
4400   FILETIME UserTime;
4401 
4402   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4403                       &KernelTime, &UserTime) == 0) {
4404     return false;
4405   } else {
4406     return true;
4407   }
4408 }
4409 
4410 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4411 // It does have primitives (PDH API) to get CPU usage and run queue length.
4412 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4413 // If we wanted to implement loadavg on Windows, we have a few options:
4414 //
4415 // a) Query CPU usage and run queue length and "fake" an answer by
4416 //    returning the CPU usage if it's under 100%, and the run queue
4417 //    length otherwise.  It turns out that querying is pretty slow
4418 //    on Windows, on the order of 200 microseconds on a fast machine.
4419 //    Note that on the Windows the CPU usage value is the % usage
4420 //    since the last time the API was called (and the first call
4421 //    returns 100%), so we'd have to deal with that as well.
4422 //
4423 // b) Sample the "fake" answer using a sampling thread and store
4424 //    the answer in a global variable.  The call to loadavg would
4425 //    just return the value of the global, avoiding the slow query.
4426 //
4427 // c) Sample a better answer using exponential decay to smooth the
4428 //    value.  This is basically the algorithm used by UNIX kernels.
4429 //
4430 // Note that sampling thread starvation could affect both (b) and (c).
4431 int os::loadavg(double loadavg[], int nelem) {
4432   return -1;
4433 }
4434 
4435 
4436 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4437 bool os::dont_yield() {
4438   return DontYieldALot;
4439 }
4440 
4441 // This method is a slightly reworked copy of JDK's sysOpen
4442 // from src/windows/hpi/src/sys_api_md.c
4443 
4444 int os::open(const char *path, int oflag, int mode) {
4445   char pathbuf[MAX_PATH];
4446 
4447   if (strlen(path) > MAX_PATH - 1) {
4448     errno = ENAMETOOLONG;
4449     return -1;
4450   }
4451   os::native_path(strcpy(pathbuf, path));
4452   return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4453 }
4454 
4455 FILE* os::open(int fd, const char* mode) {
4456   return ::_fdopen(fd, mode);
4457 }
4458 
4459 // Is a (classpath) directory empty?
4460 bool os::dir_is_empty(const char* path) {
4461   WIN32_FIND_DATA fd;
4462   HANDLE f = FindFirstFile(path, &fd);
4463   if (f == INVALID_HANDLE_VALUE) {
4464     return true;
4465   }
4466   FindClose(f);
4467   return false;
4468 }
4469 
4470 // create binary file, rewriting existing file if required
4471 int os::create_binary_file(const char* path, bool rewrite_existing) {
4472   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4473   if (!rewrite_existing) {
4474     oflags |= _O_EXCL;
4475   }
4476   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4477 }
4478 
4479 // return current position of file pointer
4480 jlong os::current_file_offset(int fd) {
4481   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4482 }
4483 
4484 // move file pointer to the specified offset
4485 jlong os::seek_to_file_offset(int fd, jlong offset) {
4486   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4487 }
4488 
4489 
4490 jlong os::lseek(int fd, jlong offset, int whence) {
4491   return (jlong) ::_lseeki64(fd, offset, whence);
4492 }
4493 
4494 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4495   OVERLAPPED ov;
4496   DWORD nread;
4497   BOOL result;
4498 
4499   ZeroMemory(&ov, sizeof(ov));
4500   ov.Offset = (DWORD)offset;
4501   ov.OffsetHigh = (DWORD)(offset >> 32);
4502 
4503   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4504 
4505   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4506 
4507   return result ? nread : 0;
4508 }
4509 
4510 
4511 // This method is a slightly reworked copy of JDK's sysNativePath
4512 // from src/windows/hpi/src/path_md.c
4513 
4514 // Convert a pathname to native format.  On win32, this involves forcing all
4515 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4516 // sometimes rejects '/') and removing redundant separators.  The input path is
4517 // assumed to have been converted into the character encoding used by the local
4518 // system.  Because this might be a double-byte encoding, care is taken to
4519 // treat double-byte lead characters correctly.
4520 //
4521 // This procedure modifies the given path in place, as the result is never
4522 // longer than the original.  There is no error return; this operation always
4523 // succeeds.
4524 char * os::native_path(char *path) {
4525   char *src = path, *dst = path, *end = path;
4526   char *colon = NULL;  // If a drive specifier is found, this will
4527                        // point to the colon following the drive letter
4528 
4529   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4530   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4531           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4532 
4533   // Check for leading separators
4534 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4535   while (isfilesep(*src)) {
4536     src++;
4537   }
4538 
4539   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4540     // Remove leading separators if followed by drive specifier.  This
4541     // hack is necessary to support file URLs containing drive
4542     // specifiers (e.g., "file://c:/path").  As a side effect,
4543     // "/c:/path" can be used as an alternative to "c:/path".
4544     *dst++ = *src++;
4545     colon = dst;
4546     *dst++ = ':';
4547     src++;
4548   } else {
4549     src = path;
4550     if (isfilesep(src[0]) && isfilesep(src[1])) {
4551       // UNC pathname: Retain first separator; leave src pointed at
4552       // second separator so that further separators will be collapsed
4553       // into the second separator.  The result will be a pathname
4554       // beginning with "\\\\" followed (most likely) by a host name.
4555       src = dst = path + 1;
4556       path[0] = '\\';     // Force first separator to '\\'
4557     }
4558   }
4559 
4560   end = dst;
4561 
4562   // Remove redundant separators from remainder of path, forcing all
4563   // separators to be '\\' rather than '/'. Also, single byte space
4564   // characters are removed from the end of the path because those
4565   // are not legal ending characters on this operating system.
4566   //
4567   while (*src != '\0') {
4568     if (isfilesep(*src)) {
4569       *dst++ = '\\'; src++;
4570       while (isfilesep(*src)) src++;
4571       if (*src == '\0') {
4572         // Check for trailing separator
4573         end = dst;
4574         if (colon == dst - 2) break;  // "z:\\"
4575         if (dst == path + 1) break;   // "\\"
4576         if (dst == path + 2 && isfilesep(path[0])) {
4577           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4578           // beginning of a UNC pathname.  Even though it is not, by
4579           // itself, a valid UNC pathname, we leave it as is in order
4580           // to be consistent with the path canonicalizer as well
4581           // as the win32 APIs, which treat this case as an invalid
4582           // UNC pathname rather than as an alias for the root
4583           // directory of the current drive.
4584           break;
4585         }
4586         end = --dst;  // Path does not denote a root directory, so
4587                       // remove trailing separator
4588         break;
4589       }
4590       end = dst;
4591     } else {
4592       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4593         *dst++ = *src++;
4594         if (*src) *dst++ = *src++;
4595         end = dst;
4596       } else {  // Copy a single-byte character
4597         char c = *src++;
4598         *dst++ = c;
4599         // Space is not a legal ending character
4600         if (c != ' ') end = dst;
4601       }
4602     }
4603   }
4604 
4605   *end = '\0';
4606 
4607   // For "z:", add "." to work around a bug in the C runtime library
4608   if (colon == dst - 1) {
4609     path[2] = '.';
4610     path[3] = '\0';
4611   }
4612 
4613   return path;
4614 }
4615 
4616 // This code is a copy of JDK's sysSetLength
4617 // from src/windows/hpi/src/sys_api_md.c
4618 
4619 int os::ftruncate(int fd, jlong length) {
4620   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4621   long high = (long)(length >> 32);
4622   DWORD ret;
4623 
4624   if (h == (HANDLE)(-1)) {
4625     return -1;
4626   }
4627 
4628   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4629   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4630     return -1;
4631   }
4632 
4633   if (::SetEndOfFile(h) == FALSE) {
4634     return -1;
4635   }
4636 
4637   return 0;
4638 }
4639 
4640 int os::get_fileno(FILE* fp) {
4641   return _fileno(fp);
4642 }
4643 
4644 // This code is a copy of JDK's sysSync
4645 // from src/windows/hpi/src/sys_api_md.c
4646 // except for the legacy workaround for a bug in Win 98
4647 
4648 int os::fsync(int fd) {
4649   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4650 
4651   if ((!::FlushFileBuffers(handle)) &&
4652       (GetLastError() != ERROR_ACCESS_DENIED)) {
4653     // from winerror.h
4654     return -1;
4655   }
4656   return 0;
4657 }
4658 
4659 static int nonSeekAvailable(int, long *);
4660 static int stdinAvailable(int, long *);
4661 
4662 #define S_ISCHR(mode)   (((mode) & _S_IFCHR) == _S_IFCHR)
4663 #define S_ISFIFO(mode)  (((mode) & _S_IFIFO) == _S_IFIFO)
4664 
4665 // This code is a copy of JDK's sysAvailable
4666 // from src/windows/hpi/src/sys_api_md.c
4667 
4668 int os::available(int fd, jlong *bytes) {
4669   jlong cur, end;
4670   struct _stati64 stbuf64;
4671 
4672   if (::_fstati64(fd, &stbuf64) >= 0) {
4673     int mode = stbuf64.st_mode;
4674     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4675       int ret;
4676       long lpbytes;
4677       if (fd == 0) {
4678         ret = stdinAvailable(fd, &lpbytes);
4679       } else {
4680         ret = nonSeekAvailable(fd, &lpbytes);
4681       }
4682       (*bytes) = (jlong)(lpbytes);
4683       return ret;
4684     }
4685     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4686       return FALSE;
4687     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4688       return FALSE;
4689     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4690       return FALSE;
4691     }
4692     *bytes = end - cur;
4693     return TRUE;
4694   } else {
4695     return FALSE;
4696   }
4697 }
4698 
4699 void os::flockfile(FILE* fp) {
4700   _lock_file(fp);
4701 }
4702 
4703 void os::funlockfile(FILE* fp) {
4704   _unlock_file(fp);
4705 }
4706 
4707 // This code is a copy of JDK's nonSeekAvailable
4708 // from src/windows/hpi/src/sys_api_md.c
4709 
4710 static int nonSeekAvailable(int fd, long *pbytes) {
4711   // This is used for available on non-seekable devices
4712   // (like both named and anonymous pipes, such as pipes
4713   //  connected to an exec'd process).
4714   // Standard Input is a special case.
4715   HANDLE han;
4716 
4717   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4718     return FALSE;
4719   }
4720 
4721   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4722     // PeekNamedPipe fails when at EOF.  In that case we
4723     // simply make *pbytes = 0 which is consistent with the
4724     // behavior we get on Solaris when an fd is at EOF.
4725     // The only alternative is to raise an Exception,
4726     // which isn't really warranted.
4727     //
4728     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4729       return FALSE;
4730     }
4731     *pbytes = 0;
4732   }
4733   return TRUE;
4734 }
4735 
4736 #define MAX_INPUT_EVENTS 2000
4737 
4738 // This code is a copy of JDK's stdinAvailable
4739 // from src/windows/hpi/src/sys_api_md.c
4740 
4741 static int stdinAvailable(int fd, long *pbytes) {
4742   HANDLE han;
4743   DWORD numEventsRead = 0;  // Number of events read from buffer
4744   DWORD numEvents = 0;      // Number of events in buffer
4745   DWORD i = 0;              // Loop index
4746   DWORD curLength = 0;      // Position marker
4747   DWORD actualLength = 0;   // Number of bytes readable
4748   BOOL error = FALSE;       // Error holder
4749   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4750 
4751   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4752     return FALSE;
4753   }
4754 
4755   // Construct an array of input records in the console buffer
4756   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4757   if (error == 0) {
4758     return nonSeekAvailable(fd, pbytes);
4759   }
4760 
4761   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4762   if (numEvents > MAX_INPUT_EVENTS) {
4763     numEvents = MAX_INPUT_EVENTS;
4764   }
4765 
4766   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4767   if (lpBuffer == NULL) {
4768     return FALSE;
4769   }
4770 
4771   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4772   if (error == 0) {
4773     os::free(lpBuffer);
4774     return FALSE;
4775   }
4776 
4777   // Examine input records for the number of bytes available
4778   for (i=0; i<numEvents; i++) {
4779     if (lpBuffer[i].EventType == KEY_EVENT) {
4780 
4781       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4782                                       &(lpBuffer[i].Event);
4783       if (keyRecord->bKeyDown == TRUE) {
4784         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4785         curLength++;
4786         if (*keyPressed == '\r') {
4787           actualLength = curLength;
4788         }
4789       }
4790     }
4791   }
4792 
4793   if (lpBuffer != NULL) {
4794     os::free(lpBuffer);
4795   }
4796 
4797   *pbytes = (long) actualLength;
4798   return TRUE;
4799 }
4800 
4801 // Map a block of memory.
4802 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4803                         char *addr, size_t bytes, bool read_only,
4804                         bool allow_exec) {
4805   HANDLE hFile;
4806   char* base;
4807 
4808   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4809                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4810   if (hFile == NULL) {
4811     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4812     return NULL;
4813   }
4814 
4815   if (allow_exec) {
4816     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4817     // unless it comes from a PE image (which the shared archive is not.)
4818     // Even VirtualProtect refuses to give execute access to mapped memory
4819     // that was not previously executable.
4820     //
4821     // Instead, stick the executable region in anonymous memory.  Yuck.
4822     // Penalty is that ~4 pages will not be shareable - in the future
4823     // we might consider DLLizing the shared archive with a proper PE
4824     // header so that mapping executable + sharing is possible.
4825 
4826     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4827                                 PAGE_READWRITE);
4828     if (base == NULL) {
4829       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4830       CloseHandle(hFile);
4831       return NULL;
4832     }
4833 
4834     DWORD bytes_read;
4835     OVERLAPPED overlapped;
4836     overlapped.Offset = (DWORD)file_offset;
4837     overlapped.OffsetHigh = 0;
4838     overlapped.hEvent = NULL;
4839     // ReadFile guarantees that if the return value is true, the requested
4840     // number of bytes were read before returning.
4841     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4842     if (!res) {
4843       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4844       release_memory(base, bytes);
4845       CloseHandle(hFile);
4846       return NULL;
4847     }
4848   } else {
4849     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4850                                     NULL /* file_name */);
4851     if (hMap == NULL) {
4852       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4853       CloseHandle(hFile);
4854       return NULL;
4855     }
4856 
4857     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4858     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4859                                   (DWORD)bytes, addr);
4860     if (base == NULL) {
4861       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4862       CloseHandle(hMap);
4863       CloseHandle(hFile);
4864       return NULL;
4865     }
4866 
4867     if (CloseHandle(hMap) == 0) {
4868       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4869       CloseHandle(hFile);
4870       return base;
4871     }
4872   }
4873 
4874   if (allow_exec) {
4875     DWORD old_protect;
4876     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4877     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4878 
4879     if (!res) {
4880       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4881       // Don't consider this a hard error, on IA32 even if the
4882       // VirtualProtect fails, we should still be able to execute
4883       CloseHandle(hFile);
4884       return base;
4885     }
4886   }
4887 
4888   if (CloseHandle(hFile) == 0) {
4889     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4890     return base;
4891   }
4892 
4893   return base;
4894 }
4895 
4896 
4897 // Remap a block of memory.
4898 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4899                           char *addr, size_t bytes, bool read_only,
4900                           bool allow_exec) {
4901   // This OS does not allow existing memory maps to be remapped so we
4902   // have to unmap the memory before we remap it.
4903   if (!os::unmap_memory(addr, bytes)) {
4904     return NULL;
4905   }
4906 
4907   // There is a very small theoretical window between the unmap_memory()
4908   // call above and the map_memory() call below where a thread in native
4909   // code may be able to access an address that is no longer mapped.
4910 
4911   return os::map_memory(fd, file_name, file_offset, addr, bytes,
4912                         read_only, allow_exec);
4913 }
4914 
4915 
4916 // Unmap a block of memory.
4917 // Returns true=success, otherwise false.
4918 
4919 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4920   MEMORY_BASIC_INFORMATION mem_info;
4921   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4922     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
4923     return false;
4924   }
4925 
4926   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4927   // Instead, executable region was allocated using VirtualAlloc(). See
4928   // pd_map_memory() above.
4929   //
4930   // The following flags should match the 'exec_access' flages used for
4931   // VirtualProtect() in pd_map_memory().
4932   if (mem_info.Protect == PAGE_EXECUTE_READ ||
4933       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4934     return pd_release_memory(addr, bytes);
4935   }
4936 
4937   BOOL result = UnmapViewOfFile(addr);
4938   if (result == 0) {
4939     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
4940     return false;
4941   }
4942   return true;
4943 }
4944 
4945 void os::pause() {
4946   char filename[MAX_PATH];
4947   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4948     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4949   } else {
4950     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4951   }
4952 
4953   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4954   if (fd != -1) {
4955     struct stat buf;
4956     ::close(fd);
4957     while (::stat(filename, &buf) == 0) {
4958       Sleep(100);
4959     }
4960   } else {
4961     jio_fprintf(stderr,
4962                 "Could not open pause file '%s', continuing immediately.\n", filename);
4963   }
4964 }
4965 
4966 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4967   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4968 }
4969 
4970 // See the caveats for this class in os_windows.hpp
4971 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4972 // into this method and returns false. If no OS EXCEPTION was raised, returns
4973 // true.
4974 // The callback is supposed to provide the method that should be protected.
4975 //
4976 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4977   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4978   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4979          "crash_protection already set?");
4980 
4981   bool success = true;
4982   __try {
4983     WatcherThread::watcher_thread()->set_crash_protection(this);
4984     cb.call();
4985   } __except(EXCEPTION_EXECUTE_HANDLER) {
4986     // only for protection, nothing to do
4987     success = false;
4988   }
4989   WatcherThread::watcher_thread()->set_crash_protection(NULL);
4990   return success;
4991 }
4992 
4993 // An Event wraps a win32 "CreateEvent" kernel handle.
4994 //
4995 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
4996 //
4997 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
4998 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
4999 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5000 //     In addition, an unpark() operation might fetch the handle field, but the
5001 //     event could recycle between the fetch and the SetEvent() operation.
5002 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5003 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5004 //     on an stale but recycled handle would be harmless, but in practice this might
5005 //     confuse other non-Sun code, so it's not a viable approach.
5006 //
5007 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5008 //     with the Event.  The event handle is never closed.  This could be construed
5009 //     as handle leakage, but only up to the maximum # of threads that have been extant
5010 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5011 //     permit a process to have hundreds of thousands of open handles.
5012 //
5013 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5014 //     and release unused handles.
5015 //
5016 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5017 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5018 //
5019 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5020 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5021 //
5022 // We use (2).
5023 //
5024 // TODO-FIXME:
5025 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5026 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5027 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5028 // 3.  Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
5029 //     into a single win32 CreateEvent() handle.
5030 //
5031 // Assumption:
5032 //    Only one parker can exist on an event, which is why we allocate
5033 //    them per-thread. Multiple unparkers can coexist.
5034 //
5035 // _Event transitions in park()
5036 //   -1 => -1 : illegal
5037 //    1 =>  0 : pass - return immediately
5038 //    0 => -1 : block; then set _Event to 0 before returning
5039 //
5040 // _Event transitions in unpark()
5041 //    0 => 1 : just return
5042 //    1 => 1 : just return
5043 //   -1 => either 0 or 1; must signal target thread
5044 //         That is, we can safely transition _Event from -1 to either
5045 //         0 or 1.
5046 //
5047 // _Event serves as a restricted-range semaphore.
5048 //   -1 : thread is blocked, i.e. there is a waiter
5049 //    0 : neutral: thread is running or ready,
5050 //        could have been signaled after a wait started
5051 //    1 : signaled - thread is running or ready
5052 //
5053 // Another possible encoding of _Event would be with
5054 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5055 //
5056 
5057 int os::PlatformEvent::park(jlong Millis) {
5058   // Transitions for _Event:
5059   //   -1 => -1 : illegal
5060   //    1 =>  0 : pass - return immediately
5061   //    0 => -1 : block; then set _Event to 0 before returning
5062 
5063   guarantee(_ParkHandle != NULL , "Invariant");
5064   guarantee(Millis > 0          , "Invariant");
5065 
5066   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5067   // the initial park() operation.
5068   // Consider: use atomic decrement instead of CAS-loop
5069 
5070   int v;
5071   for (;;) {
5072     v = _Event;
5073     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5074   }
5075   guarantee((v == 0) || (v == 1), "invariant");
5076   if (v != 0) return OS_OK;
5077 
5078   // Do this the hard way by blocking ...
5079   // TODO: consider a brief spin here, gated on the success of recent
5080   // spin attempts by this thread.
5081   //
5082   // We decompose long timeouts into series of shorter timed waits.
5083   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5084   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5085   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5086   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5087   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5088   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5089   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5090   // for the already waited time.  This policy does not admit any new outcomes.
5091   // In the future, however, we might want to track the accumulated wait time and
5092   // adjust Millis accordingly if we encounter a spurious wakeup.
5093 
5094   const int MAXTIMEOUT = 0x10000000;
5095   DWORD rv = WAIT_TIMEOUT;
5096   while (_Event < 0 && Millis > 0) {
5097     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5098     if (Millis > MAXTIMEOUT) {
5099       prd = MAXTIMEOUT;
5100     }
5101     rv = ::WaitForSingleObject(_ParkHandle, prd);
5102     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5103     if (rv == WAIT_TIMEOUT) {
5104       Millis -= prd;
5105     }
5106   }
5107   v = _Event;
5108   _Event = 0;
5109   // see comment at end of os::PlatformEvent::park() below:
5110   OrderAccess::fence();
5111   // If we encounter a nearly simultanous timeout expiry and unpark()
5112   // we return OS_OK indicating we awoke via unpark().
5113   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5114   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5115 }
5116 
5117 void os::PlatformEvent::park() {
5118   // Transitions for _Event:
5119   //   -1 => -1 : illegal
5120   //    1 =>  0 : pass - return immediately
5121   //    0 => -1 : block; then set _Event to 0 before returning
5122 
5123   guarantee(_ParkHandle != NULL, "Invariant");
5124   // Invariant: Only the thread associated with the Event/PlatformEvent
5125   // may call park().
5126   // Consider: use atomic decrement instead of CAS-loop
5127   int v;
5128   for (;;) {
5129     v = _Event;
5130     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5131   }
5132   guarantee((v == 0) || (v == 1), "invariant");
5133   if (v != 0) return;
5134 
5135   // Do this the hard way by blocking ...
5136   // TODO: consider a brief spin here, gated on the success of recent
5137   // spin attempts by this thread.
5138   while (_Event < 0) {
5139     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5140     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5141   }
5142 
5143   // Usually we'll find _Event == 0 at this point, but as
5144   // an optional optimization we clear it, just in case can
5145   // multiple unpark() operations drove _Event up to 1.
5146   _Event = 0;
5147   OrderAccess::fence();
5148   guarantee(_Event >= 0, "invariant");
5149 }
5150 
5151 void os::PlatformEvent::unpark() {
5152   guarantee(_ParkHandle != NULL, "Invariant");
5153 
5154   // Transitions for _Event:
5155   //    0 => 1 : just return
5156   //    1 => 1 : just return
5157   //   -1 => either 0 or 1; must signal target thread
5158   //         That is, we can safely transition _Event from -1 to either
5159   //         0 or 1.
5160   // See also: "Semaphores in Plan 9" by Mullender & Cox
5161   //
5162   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5163   // that it will take two back-to-back park() calls for the owning
5164   // thread to block. This has the benefit of forcing a spurious return
5165   // from the first park() call after an unpark() call which will help
5166   // shake out uses of park() and unpark() without condition variables.
5167 
5168   if (Atomic::xchg(1, &_Event) >= 0) return;
5169 
5170   ::SetEvent(_ParkHandle);
5171 }
5172 
5173 
5174 // JSR166
5175 // -------------------------------------------------------
5176 
5177 // The Windows implementation of Park is very straightforward: Basic
5178 // operations on Win32 Events turn out to have the right semantics to
5179 // use them directly. We opportunistically resuse the event inherited
5180 // from Monitor.
5181 
5182 void Parker::park(bool isAbsolute, jlong time) {
5183   guarantee(_ParkEvent != NULL, "invariant");
5184   // First, demultiplex/decode time arguments
5185   if (time < 0) { // don't wait
5186     return;
5187   } else if (time == 0 && !isAbsolute) {
5188     time = INFINITE;
5189   } else if (isAbsolute) {
5190     time -= os::javaTimeMillis(); // convert to relative time
5191     if (time <= 0) {  // already elapsed
5192       return;
5193     }
5194   } else { // relative
5195     time /= 1000000;  // Must coarsen from nanos to millis
5196     if (time == 0) {  // Wait for the minimal time unit if zero
5197       time = 1;
5198     }
5199   }
5200 
5201   JavaThread* thread = JavaThread::current();
5202 
5203   // Don't wait if interrupted or already triggered
5204   if (Thread::is_interrupted(thread, false) ||
5205       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5206     ResetEvent(_ParkEvent);
5207     return;
5208   } else {
5209     ThreadBlockInVM tbivm(thread);
5210     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5211     thread->set_suspend_equivalent();
5212 
5213     WaitForSingleObject(_ParkEvent, time);
5214     ResetEvent(_ParkEvent);
5215 
5216     // If externally suspended while waiting, re-suspend
5217     if (thread->handle_special_suspend_equivalent_condition()) {
5218       thread->java_suspend_self();
5219     }
5220   }
5221 }
5222 
5223 void Parker::unpark() {
5224   guarantee(_ParkEvent != NULL, "invariant");
5225   SetEvent(_ParkEvent);
5226 }
5227 
5228 // Run the specified command in a separate process. Return its exit value,
5229 // or -1 on failure (e.g. can't create a new process).
5230 int os::fork_and_exec(char* cmd) {
5231   STARTUPINFO si;
5232   PROCESS_INFORMATION pi;
5233 
5234   memset(&si, 0, sizeof(si));
5235   si.cb = sizeof(si);
5236   memset(&pi, 0, sizeof(pi));
5237   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5238                             cmd,    // command line
5239                             NULL,   // process security attribute
5240                             NULL,   // thread security attribute
5241                             TRUE,   // inherits system handles
5242                             0,      // no creation flags
5243                             NULL,   // use parent's environment block
5244                             NULL,   // use parent's starting directory
5245                             &si,    // (in) startup information
5246                             &pi);   // (out) process information
5247 
5248   if (rslt) {
5249     // Wait until child process exits.
5250     WaitForSingleObject(pi.hProcess, INFINITE);
5251 
5252     DWORD exit_code;
5253     GetExitCodeProcess(pi.hProcess, &exit_code);
5254 
5255     // Close process and thread handles.
5256     CloseHandle(pi.hProcess);
5257     CloseHandle(pi.hThread);
5258 
5259     return (int)exit_code;
5260   } else {
5261     return -1;
5262   }
5263 }
5264 
5265 //--------------------------------------------------------------------------------------------------
5266 // Non-product code
5267 
5268 static int mallocDebugIntervalCounter = 0;
5269 static int mallocDebugCounter = 0;
5270 
5271 // For debugging possible bugs inside HeapWalk (a ring buffer)
5272 #define SAVE_COUNT 8
5273 static PROCESS_HEAP_ENTRY saved_heap_entries[SAVE_COUNT];
5274 static int saved_heap_entry_index;
5275 
5276 bool os::check_heap(bool force) {
5277   if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
5278   if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
5279     // Note: HeapValidate executes two hardware breakpoints when it finds something
5280     // wrong; at these points, eax contains the address of the offending block (I think).
5281     // To get to the exlicit error message(s) below, just continue twice.
5282     //
5283     // Note:  we want to check the CRT heap, which is not necessarily located in the
5284     // process default heap.
5285     HANDLE heap = (HANDLE) _get_heap_handle();
5286     if (!heap) {
5287       return true;
5288     }
5289 
5290     // If we fail to lock the heap, then gflags.exe has been used
5291     // or some other special heap flag has been set that prevents
5292     // locking. We don't try to walk a heap we can't lock.
5293     if (HeapLock(heap) != 0) {
5294       PROCESS_HEAP_ENTRY phe;
5295       phe.lpData = NULL;
5296       memset(saved_heap_entries, 0, sizeof(saved_heap_entries));
5297       saved_heap_entry_index = 0;
5298       int count = 0;
5299 
5300       while (HeapWalk(heap, &phe) != 0) {
5301         count ++;
5302         if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
5303             !HeapValidate(heap, 0, phe.lpData)) {
5304           tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
5305           tty->print_cr("corrupted block near address %#x, length %d, count %d", phe.lpData, phe.cbData, count);
5306           HeapUnlock(heap);
5307           fatal("corrupted C heap");
5308         } else {
5309           // Save previous seen entries in a ring buffer. We have seen strange
5310           // heap corruption fatal errors that produced mdmp files, but when we load
5311           // these mdmp files in WinDBG, "!heap -triage" shows no error.
5312           // We can examine the saved_heap_entries[] array in the mdmp file to
5313           // diagnose such seemingly spurious errors reported by HeapWalk.
5314           saved_heap_entries[saved_heap_entry_index++] = phe;
5315           if (saved_heap_entry_index >= SAVE_COUNT) {
5316             saved_heap_entry_index = 0;
5317           }
5318         }
5319       }
5320       DWORD err = GetLastError();
5321       if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED &&
5322          (err == ERROR_INVALID_FUNCTION && phe.lpData != NULL)) {
5323         HeapUnlock(heap);
5324         fatal("heap walk aborted with error %d", err);
5325       }
5326       HeapUnlock(heap);
5327     }
5328     mallocDebugIntervalCounter = 0;
5329   }
5330   return true;
5331 }
5332 
5333 
5334 bool os::find(address addr, outputStream* st) {
5335   int offset = -1;
5336   bool result = false;
5337   char buf[256];
5338   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5339     st->print(PTR_FORMAT " ", addr);
5340     if (strlen(buf) < sizeof(buf) - 1) {
5341       char* p = strrchr(buf, '\\');
5342       if (p) {
5343         st->print("%s", p + 1);
5344       } else {
5345         st->print("%s", buf);
5346       }
5347     } else {
5348         // The library name is probably truncated. Let's omit the library name.
5349         // See also JDK-8147512.
5350     }
5351     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5352       st->print("::%s + 0x%x", buf, offset);
5353     }
5354     st->cr();
5355     result = true;
5356   }
5357   return result;
5358 }
5359 
5360 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5361   DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5362 
5363   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
5364     JavaThread* thread = JavaThread::current();
5365     PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5366     address addr = (address) exceptionRecord->ExceptionInformation[1];
5367 
5368     if (os::is_memory_serialize_page(thread, addr)) {
5369       return EXCEPTION_CONTINUE_EXECUTION;
5370     }
5371   }
5372 
5373   return EXCEPTION_CONTINUE_SEARCH;
5374 }
5375 
5376 // We don't build a headless jre for Windows
5377 bool os::is_headless_jre() { return false; }
5378 
5379 static jint initSock() {
5380   WSADATA wsadata;
5381 
5382   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5383     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5384                 ::GetLastError());
5385     return JNI_ERR;
5386   }
5387   return JNI_OK;
5388 }
5389 
5390 struct hostent* os::get_host_by_name(char* name) {
5391   return (struct hostent*)gethostbyname(name);
5392 }
5393 
5394 int os::socket_close(int fd) {
5395   return ::closesocket(fd);
5396 }
5397 
5398 int os::socket(int domain, int type, int protocol) {
5399   return ::socket(domain, type, protocol);
5400 }
5401 
5402 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5403   return ::connect(fd, him, len);
5404 }
5405 
5406 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5407   return ::recv(fd, buf, (int)nBytes, flags);
5408 }
5409 
5410 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5411   return ::send(fd, buf, (int)nBytes, flags);
5412 }
5413 
5414 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5415   return ::send(fd, buf, (int)nBytes, flags);
5416 }
5417 
5418 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5419 #if defined(IA32)
5420   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5421 #elif defined (AMD64)
5422   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5423 #endif
5424 
5425 // returns true if thread could be suspended,
5426 // false otherwise
5427 static bool do_suspend(HANDLE* h) {
5428   if (h != NULL) {
5429     if (SuspendThread(*h) != ~0) {
5430       return true;
5431     }
5432   }
5433   return false;
5434 }
5435 
5436 // resume the thread
5437 // calling resume on an active thread is a no-op
5438 static void do_resume(HANDLE* h) {
5439   if (h != NULL) {
5440     ResumeThread(*h);
5441   }
5442 }
5443 
5444 // retrieve a suspend/resume context capable handle
5445 // from the tid. Caller validates handle return value.
5446 void get_thread_handle_for_extended_context(HANDLE* h,
5447                                             OSThread::thread_id_t tid) {
5448   if (h != NULL) {
5449     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5450   }
5451 }
5452 
5453 // Thread sampling implementation
5454 //
5455 void os::SuspendedThreadTask::internal_do_task() {
5456   CONTEXT    ctxt;
5457   HANDLE     h = NULL;
5458 
5459   // get context capable handle for thread
5460   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5461 
5462   // sanity
5463   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5464     return;
5465   }
5466 
5467   // suspend the thread
5468   if (do_suspend(&h)) {
5469     ctxt.ContextFlags = sampling_context_flags;
5470     // get thread context
5471     GetThreadContext(h, &ctxt);
5472     SuspendedThreadTaskContext context(_thread, &ctxt);
5473     // pass context to Thread Sampling impl
5474     do_task(context);
5475     // resume thread
5476     do_resume(&h);
5477   }
5478 
5479   // close handle
5480   CloseHandle(h);
5481 }
5482 
5483 bool os::start_debugging(char *buf, int buflen) {
5484   int len = (int)strlen(buf);
5485   char *p = &buf[len];
5486 
5487   jio_snprintf(p, buflen-len,
5488              "\n\n"
5489              "Do you want to debug the problem?\n\n"
5490              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5491              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5492              "Otherwise, select 'No' to abort...",
5493              os::current_process_id(), os::current_thread_id());
5494 
5495   bool yes = os::message_box("Unexpected Error", buf);
5496 
5497   if (yes) {
5498     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5499     // exception. If VM is running inside a debugger, the debugger will
5500     // catch the exception. Otherwise, the breakpoint exception will reach
5501     // the default windows exception handler, which can spawn a debugger and
5502     // automatically attach to the dying VM.
5503     os::breakpoint();
5504     yes = false;
5505   }
5506   return yes;
5507 }
5508 
5509 void* os::get_default_process_handle() {
5510   return (void*)GetModuleHandle(NULL);
5511 }
5512 
5513 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5514 // which is used to find statically linked in agents.
5515 // Additionally for windows, takes into account __stdcall names.
5516 // Parameters:
5517 //            sym_name: Symbol in library we are looking for
5518 //            lib_name: Name of library to look in, NULL for shared libs.
5519 //            is_absolute_path == true if lib_name is absolute path to agent
5520 //                                     such as "C:/a/b/L.dll"
5521 //            == false if only the base name of the library is passed in
5522 //               such as "L"
5523 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5524                                     bool is_absolute_path) {
5525   char *agent_entry_name;
5526   size_t len;
5527   size_t name_len;
5528   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5529   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5530   const char *start;
5531 
5532   if (lib_name != NULL) {
5533     len = name_len = strlen(lib_name);
5534     if (is_absolute_path) {
5535       // Need to strip path, prefix and suffix
5536       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5537         lib_name = ++start;
5538       } else {
5539         // Need to check for drive prefix
5540         if ((start = strchr(lib_name, ':')) != NULL) {
5541           lib_name = ++start;
5542         }
5543       }
5544       if (len <= (prefix_len + suffix_len)) {
5545         return NULL;
5546       }
5547       lib_name += prefix_len;
5548       name_len = strlen(lib_name) - suffix_len;
5549     }
5550   }
5551   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5552   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5553   if (agent_entry_name == NULL) {
5554     return NULL;
5555   }
5556   if (lib_name != NULL) {
5557     const char *p = strrchr(sym_name, '@');
5558     if (p != NULL && p != sym_name) {
5559       // sym_name == _Agent_OnLoad@XX
5560       strncpy(agent_entry_name, sym_name, (p - sym_name));
5561       agent_entry_name[(p-sym_name)] = '\0';
5562       // agent_entry_name == _Agent_OnLoad
5563       strcat(agent_entry_name, "_");
5564       strncat(agent_entry_name, lib_name, name_len);
5565       strcat(agent_entry_name, p);
5566       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5567     } else {
5568       strcpy(agent_entry_name, sym_name);
5569       strcat(agent_entry_name, "_");
5570       strncat(agent_entry_name, lib_name, name_len);
5571     }
5572   } else {
5573     strcpy(agent_entry_name, sym_name);
5574   }
5575   return agent_entry_name;
5576 }
5577 
5578 #ifndef PRODUCT
5579 
5580 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5581 // contiguous memory block at a particular address.
5582 // The test first tries to find a good approximate address to allocate at by using the same
5583 // method to allocate some memory at any address. The test then tries to allocate memory in
5584 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5585 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5586 // the previously allocated memory is available for allocation. The only actual failure
5587 // that is reported is when the test tries to allocate at a particular location but gets a
5588 // different valid one. A NULL return value at this point is not considered an error but may
5589 // be legitimate.
5590 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5591 void TestReserveMemorySpecial_test() {
5592   if (!UseLargePages) {
5593     if (VerboseInternalVMTests) {
5594       tty->print("Skipping test because large pages are disabled");
5595     }
5596     return;
5597   }
5598   // save current value of globals
5599   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5600   bool old_use_numa_interleaving = UseNUMAInterleaving;
5601 
5602   // set globals to make sure we hit the correct code path
5603   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5604 
5605   // do an allocation at an address selected by the OS to get a good one.
5606   const size_t large_allocation_size = os::large_page_size() * 4;
5607   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5608   if (result == NULL) {
5609     if (VerboseInternalVMTests) {
5610       tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5611                           large_allocation_size);
5612     }
5613   } else {
5614     os::release_memory_special(result, large_allocation_size);
5615 
5616     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5617     // we managed to get it once.
5618     const size_t expected_allocation_size = os::large_page_size();
5619     char* expected_location = result + os::large_page_size();
5620     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5621     if (actual_location == NULL) {
5622       if (VerboseInternalVMTests) {
5623         tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5624                             expected_location, large_allocation_size);
5625       }
5626     } else {
5627       // release memory
5628       os::release_memory_special(actual_location, expected_allocation_size);
5629       // only now check, after releasing any memory to avoid any leaks.
5630       assert(actual_location == expected_location,
5631              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5632              expected_location, expected_allocation_size, actual_location);
5633     }
5634   }
5635 
5636   // restore globals
5637   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5638   UseNUMAInterleaving = old_use_numa_interleaving;
5639 }
5640 #endif // PRODUCT
5641 
5642 /*
5643   All the defined signal names for Windows.
5644 
5645   NOTE that not all of these names are accepted by FindSignal!
5646 
5647   For various reasons some of these may be rejected at runtime.
5648 
5649   Here are the names currently accepted by a user of sun.misc.Signal with
5650   1.4.1 (ignoring potential interaction with use of chaining, etc):
5651 
5652      (LIST TBD)
5653 
5654 */
5655 int os::get_signal_number(const char* name) {
5656   static const struct {
5657     char* name;
5658     int   number;
5659   } siglabels [] =
5660     // derived from version 6.0 VC98/include/signal.h
5661   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5662   "FPE",        SIGFPE,         // floating point exception
5663   "SEGV",       SIGSEGV,        // segment violation
5664   "INT",        SIGINT,         // interrupt
5665   "TERM",       SIGTERM,        // software term signal from kill
5666   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5667   "ILL",        SIGILL};        // illegal instruction
5668   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5669     if (strcmp(name, siglabels[i].name) == 0) {
5670       return siglabels[i].number;
5671     }
5672   }
5673   return -1;
5674 }
5675 
5676 // Fast current thread access
5677 
5678 int os::win32::_thread_ptr_offset = 0;
5679 
5680 static void call_wrapper_dummy() {}
5681 
5682 // We need to call the os_exception_wrapper once so that it sets
5683 // up the offset from FS of the thread pointer.
5684 void os::win32::initialize_thread_ptr_offset() {
5685   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5686                            NULL, NULL, NULL, NULL);
5687 }