1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "classfile/classLoader.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/disassembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "jvm_windows.h"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/filemap.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "os_share_windows.hpp"
  43 #include "os_windows.inline.hpp"
  44 #include "prims/jniFastGetField.hpp"
  45 #include "prims/jvm.h"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.inline.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "semaphore_windows.hpp"
  67 #include "services/attachListener.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/growableArray.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 
  77 #ifdef _DEBUG
  78 #include <crtdbg.h>
  79 #endif
  80 
  81 
  82 #include <windows.h>
  83 #include <sys/types.h>
  84 #include <sys/stat.h>
  85 #include <sys/timeb.h>
  86 #include <objidl.h>
  87 #include <shlobj.h>
  88 
  89 #include <malloc.h>
  90 #include <signal.h>
  91 #include <direct.h>
  92 #include <errno.h>
  93 #include <fcntl.h>
  94 #include <io.h>
  95 #include <process.h>              // For _beginthreadex(), _endthreadex()
  96 #include <imagehlp.h>             // For os::dll_address_to_function_name
  97 // for enumerating dll libraries
  98 #include <vdmdbg.h>
  99 
 100 // for timer info max values which include all bits
 101 #define ALL_64_BITS CONST64(-1)
 102 
 103 // For DLL loading/load error detection
 104 // Values of PE COFF
 105 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 106 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 107 
 108 static HANDLE main_process;
 109 static HANDLE main_thread;
 110 static int    main_thread_id;
 111 
 112 static FILETIME process_creation_time;
 113 static FILETIME process_exit_time;
 114 static FILETIME process_user_time;
 115 static FILETIME process_kernel_time;
 116 
 117 #ifdef _M_AMD64



 118   #define __CPU__ amd64
 119 #else
 120   #define __CPU__ i486

 121 #endif
 122 
 123 // save DLL module handle, used by GetModuleFileName
 124 
 125 HINSTANCE vm_lib_handle;
 126 
 127 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 128   switch (reason) {
 129   case DLL_PROCESS_ATTACH:
 130     vm_lib_handle = hinst;
 131     if (ForceTimeHighResolution) {
 132       timeBeginPeriod(1L);
 133     }
 134     break;
 135   case DLL_PROCESS_DETACH:
 136     if (ForceTimeHighResolution) {
 137       timeEndPeriod(1L);
 138     }
 139     break;
 140   default:
 141     break;
 142   }
 143   return true;
 144 }
 145 
 146 static inline double fileTimeAsDouble(FILETIME* time) {
 147   const double high  = (double) ((unsigned int) ~0);
 148   const double split = 10000000.0;
 149   double result = (time->dwLowDateTime / split) +
 150                    time->dwHighDateTime * (high/split);
 151   return result;
 152 }
 153 
 154 // Implementation of os
 155 
 156 bool os::unsetenv(const char* name) {
 157   assert(name != NULL, "Null pointer");
 158   return (SetEnvironmentVariable(name, NULL) == TRUE);
 159 }
 160 
 161 // No setuid programs under Windows.
 162 bool os::have_special_privileges() {
 163   return false;
 164 }
 165 
 166 
 167 // This method is  a periodic task to check for misbehaving JNI applications
 168 // under CheckJNI, we can add any periodic checks here.
 169 // For Windows at the moment does nothing
 170 void os::run_periodic_checks() {
 171   return;
 172 }
 173 
 174 // previous UnhandledExceptionFilter, if there is one
 175 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 176 
 177 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 178 
 179 void os::init_system_properties_values() {
 180   // sysclasspath, java_home, dll_dir
 181   {
 182     char *home_path;
 183     char *dll_path;
 184     char *pslash;
 185     char *bin = "\\bin";
 186     char home_dir[MAX_PATH + 1];
 187     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 188 
 189     if (alt_home_dir != NULL)  {
 190       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 191       home_dir[MAX_PATH] = '\0';
 192     } else {
 193       os::jvm_path(home_dir, sizeof(home_dir));
 194       // Found the full path to jvm.dll.
 195       // Now cut the path to <java_home>/jre if we can.
 196       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 197       pslash = strrchr(home_dir, '\\');
 198       if (pslash != NULL) {
 199         *pslash = '\0';                   // get rid of \{client|server}
 200         pslash = strrchr(home_dir, '\\');
 201         if (pslash != NULL) {
 202           *pslash = '\0';                 // get rid of \bin
 203         }
 204       }
 205     }
 206 
 207     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 208     if (home_path == NULL) {
 209       return;
 210     }
 211     strcpy(home_path, home_dir);
 212     Arguments::set_java_home(home_path);
 213     FREE_C_HEAP_ARRAY(char, home_path);
 214 
 215     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 216                                 mtInternal);
 217     if (dll_path == NULL) {
 218       return;
 219     }
 220     strcpy(dll_path, home_dir);
 221     strcat(dll_path, bin);
 222     Arguments::set_dll_dir(dll_path);
 223     FREE_C_HEAP_ARRAY(char, dll_path);
 224 
 225     if (!set_boot_path('\\', ';')) {
 226       return;
 227     }
 228   }
 229 
 230 // library_path
 231 #define EXT_DIR "\\lib\\ext"
 232 #define BIN_DIR "\\bin"
 233 #define PACKAGE_DIR "\\Sun\\Java"
 234   {
 235     // Win32 library search order (See the documentation for LoadLibrary):
 236     //
 237     // 1. The directory from which application is loaded.
 238     // 2. The system wide Java Extensions directory (Java only)
 239     // 3. System directory (GetSystemDirectory)
 240     // 4. Windows directory (GetWindowsDirectory)
 241     // 5. The PATH environment variable
 242     // 6. The current directory
 243 
 244     char *library_path;
 245     char tmp[MAX_PATH];
 246     char *path_str = ::getenv("PATH");
 247 
 248     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 249                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 250 
 251     library_path[0] = '\0';
 252 
 253     GetModuleFileName(NULL, tmp, sizeof(tmp));
 254     *(strrchr(tmp, '\\')) = '\0';
 255     strcat(library_path, tmp);
 256 
 257     GetWindowsDirectory(tmp, sizeof(tmp));
 258     strcat(library_path, ";");
 259     strcat(library_path, tmp);
 260     strcat(library_path, PACKAGE_DIR BIN_DIR);
 261 
 262     GetSystemDirectory(tmp, sizeof(tmp));
 263     strcat(library_path, ";");
 264     strcat(library_path, tmp);
 265 
 266     GetWindowsDirectory(tmp, sizeof(tmp));
 267     strcat(library_path, ";");
 268     strcat(library_path, tmp);
 269 
 270     if (path_str) {
 271       strcat(library_path, ";");
 272       strcat(library_path, path_str);
 273     }
 274 
 275     strcat(library_path, ";.");
 276 
 277     Arguments::set_library_path(library_path);
 278     FREE_C_HEAP_ARRAY(char, library_path);
 279   }
 280 
 281   // Default extensions directory
 282   {
 283     char path[MAX_PATH];
 284     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 285     GetWindowsDirectory(path, MAX_PATH);
 286     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 287             path, PACKAGE_DIR, EXT_DIR);
 288     Arguments::set_ext_dirs(buf);
 289   }
 290   #undef EXT_DIR
 291   #undef BIN_DIR
 292   #undef PACKAGE_DIR
 293 
 294 #ifndef _WIN64
 295   // set our UnhandledExceptionFilter and save any previous one
 296   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 297 #endif
 298 
 299   // Done
 300   return;
 301 }
 302 
 303 void os::breakpoint() {
 304   DebugBreak();
 305 }
 306 
 307 // Invoked from the BREAKPOINT Macro
 308 extern "C" void breakpoint() {
 309   os::breakpoint();
 310 }
 311 
 312 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 313 // So far, this method is only used by Native Memory Tracking, which is
 314 // only supported on Windows XP or later.
 315 //
 316 int os::get_native_stack(address* stack, int frames, int toSkip) {
 317   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 318   for (int index = captured; index < frames; index ++) {
 319     stack[index] = NULL;
 320   }
 321   return captured;
 322 }
 323 
 324 
 325 // os::current_stack_base()
 326 //
 327 //   Returns the base of the stack, which is the stack's
 328 //   starting address.  This function must be called
 329 //   while running on the stack of the thread being queried.
 330 
 331 address os::current_stack_base() {
 332   MEMORY_BASIC_INFORMATION minfo;
 333   address stack_bottom;
 334   size_t stack_size;
 335 
 336   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 337   stack_bottom =  (address)minfo.AllocationBase;
 338   stack_size = minfo.RegionSize;
 339 
 340   // Add up the sizes of all the regions with the same
 341   // AllocationBase.
 342   while (1) {
 343     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 344     if (stack_bottom == (address)minfo.AllocationBase) {
 345       stack_size += minfo.RegionSize;
 346     } else {
 347       break;
 348     }
 349   }
































 350   return stack_bottom + stack_size;
 351 }
 352 
 353 size_t os::current_stack_size() {
 354   size_t sz;
 355   MEMORY_BASIC_INFORMATION minfo;
 356   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 357   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 358   return sz;
 359 }
 360 
 361 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 362   const struct tm* time_struct_ptr = localtime(clock);
 363   if (time_struct_ptr != NULL) {
 364     *res = *time_struct_ptr;
 365     return res;
 366   }
 367   return NULL;
 368 }
 369 
 370 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 371   const struct tm* time_struct_ptr = gmtime(clock);
 372   if (time_struct_ptr != NULL) {
 373     *res = *time_struct_ptr;
 374     return res;
 375   }
 376   return NULL;
 377 }
 378 
 379 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 380 
 381 // Thread start routine for all newly created threads
 382 static unsigned __stdcall thread_native_entry(Thread* thread) {
 383   // Try to randomize the cache line index of hot stack frames.
 384   // This helps when threads of the same stack traces evict each other's
 385   // cache lines. The threads can be either from the same JVM instance, or
 386   // from different JVM instances. The benefit is especially true for
 387   // processors with hyperthreading technology.
 388   static int counter = 0;
 389   int pid = os::current_process_id();
 390   _alloca(((pid ^ counter++) & 7) * 128);
 391 
 392   thread->initialize_thread_current();
 393 
 394   OSThread* osthr = thread->osthread();
 395   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 396 
 397   if (UseNUMA) {
 398     int lgrp_id = os::numa_get_group_id();
 399     if (lgrp_id != -1) {
 400       thread->set_lgrp_id(lgrp_id);
 401     }
 402   }
 403 
 404   // Diagnostic code to investigate JDK-6573254
 405   int res = 30115;  // non-java thread
 406   if (thread->is_Java_thread()) {
 407     res = 20115;    // java thread
 408   }
 409 
 410   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 411 
 412   // Install a win32 structured exception handler around every thread created
 413   // by VM, so VM can generate error dump when an exception occurred in non-
 414   // Java thread (e.g. VM thread).
 415   __try {
 416     thread->run();
 417   } __except(topLevelExceptionFilter(
 418                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 419     // Nothing to do.
 420   }
 421 
 422   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 423 
 424   // One less thread is executing
 425   // When the VMThread gets here, the main thread may have already exited
 426   // which frees the CodeHeap containing the Atomic::add code
 427   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 428     Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
 429   }
 430 
 431   // If a thread has not deleted itself ("delete this") as part of its
 432   // termination sequence, we have to ensure thread-local-storage is
 433   // cleared before we actually terminate. No threads should ever be
 434   // deleted asynchronously with respect to their termination.
 435   if (Thread::current_or_null_safe() != NULL) {
 436     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 437     thread->clear_thread_current();
 438   }
 439 
 440   // Thread must not return from exit_process_or_thread(), but if it does,
 441   // let it proceed to exit normally
 442   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 443 }
 444 
 445 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 446                                   int thread_id) {
 447   // Allocate the OSThread object
 448   OSThread* osthread = new OSThread(NULL, NULL);
 449   if (osthread == NULL) return NULL;
 450 
 451   // Initialize support for Java interrupts
 452   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 453   if (interrupt_event == NULL) {
 454     delete osthread;
 455     return NULL;
 456   }
 457   osthread->set_interrupt_event(interrupt_event);
 458 
 459   // Store info on the Win32 thread into the OSThread
 460   osthread->set_thread_handle(thread_handle);
 461   osthread->set_thread_id(thread_id);
 462 
 463   if (UseNUMA) {
 464     int lgrp_id = os::numa_get_group_id();
 465     if (lgrp_id != -1) {
 466       thread->set_lgrp_id(lgrp_id);
 467     }
 468   }
 469 
 470   // Initial thread state is INITIALIZED, not SUSPENDED
 471   osthread->set_state(INITIALIZED);
 472 
 473   return osthread;
 474 }
 475 
 476 
 477 bool os::create_attached_thread(JavaThread* thread) {
 478 #ifdef ASSERT
 479   thread->verify_not_published();
 480 #endif
 481   HANDLE thread_h;
 482   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 483                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 484     fatal("DuplicateHandle failed\n");
 485   }
 486   OSThread* osthread = create_os_thread(thread, thread_h,
 487                                         (int)current_thread_id());
 488   if (osthread == NULL) {
 489     return false;
 490   }
 491 
 492   // Initial thread state is RUNNABLE
 493   osthread->set_state(RUNNABLE);
 494 
 495   thread->set_osthread(osthread);
 496 
 497   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 498     os::current_thread_id());
 499 
 500   return true;
 501 }
 502 
 503 bool os::create_main_thread(JavaThread* thread) {
 504 #ifdef ASSERT
 505   thread->verify_not_published();
 506 #endif
 507   if (_starting_thread == NULL) {
 508     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 509     if (_starting_thread == NULL) {
 510       return false;
 511     }
 512   }
 513 
 514   // The primordial thread is runnable from the start)
 515   _starting_thread->set_state(RUNNABLE);
 516 
 517   thread->set_osthread(_starting_thread);
 518   return true;
 519 }
 520 
 521 // Helper function to trace _beginthreadex attributes,
 522 //  similar to os::Posix::describe_pthread_attr()
 523 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 524                                                size_t stacksize, unsigned initflag) {
 525   stringStream ss(buf, buflen);
 526   if (stacksize == 0) {
 527     ss.print("stacksize: default, ");
 528   } else {
 529     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 530   }
 531   ss.print("flags: ");
 532   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 533   #define ALL(X) \
 534     X(CREATE_SUSPENDED) \
 535     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 536   ALL(PRINT_FLAG)
 537   #undef ALL
 538   #undef PRINT_FLAG
 539   return buf;
 540 }
 541 
 542 // Allocate and initialize a new OSThread
 543 bool os::create_thread(Thread* thread, ThreadType thr_type,
 544                        size_t stack_size) {
 545   unsigned thread_id;
 546 
 547   // Allocate the OSThread object
 548   OSThread* osthread = new OSThread(NULL, NULL);
 549   if (osthread == NULL) {
 550     return false;
 551   }
 552 
 553   // Initialize support for Java interrupts
 554   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 555   if (interrupt_event == NULL) {
 556     delete osthread;
 557     return NULL;
 558   }
 559   osthread->set_interrupt_event(interrupt_event);
 560   osthread->set_interrupted(false);
 561 
 562   thread->set_osthread(osthread);
 563 
 564   if (stack_size == 0) {
 565     switch (thr_type) {
 566     case os::java_thread:
 567       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 568       if (JavaThread::stack_size_at_create() > 0) {
 569         stack_size = JavaThread::stack_size_at_create();
 570       }
 571       break;
 572     case os::compiler_thread:
 573       if (CompilerThreadStackSize > 0) {
 574         stack_size = (size_t)(CompilerThreadStackSize * K);
 575         break;
 576       } // else fall through:
 577         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 578     case os::vm_thread:
 579     case os::pgc_thread:
 580     case os::cgc_thread:
 581     case os::watcher_thread:
 582       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 583       break;
 584     }
 585   }
 586 
 587   // Create the Win32 thread
 588   //
 589   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 590   // does not specify stack size. Instead, it specifies the size of
 591   // initially committed space. The stack size is determined by
 592   // PE header in the executable. If the committed "stack_size" is larger
 593   // than default value in the PE header, the stack is rounded up to the
 594   // nearest multiple of 1MB. For example if the launcher has default
 595   // stack size of 320k, specifying any size less than 320k does not
 596   // affect the actual stack size at all, it only affects the initial
 597   // commitment. On the other hand, specifying 'stack_size' larger than
 598   // default value may cause significant increase in memory usage, because
 599   // not only the stack space will be rounded up to MB, but also the
 600   // entire space is committed upfront.
 601   //
 602   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 603   // for CreateThread() that can treat 'stack_size' as stack size. However we
 604   // are not supposed to call CreateThread() directly according to MSDN
 605   // document because JVM uses C runtime library. The good news is that the
 606   // flag appears to work with _beginthredex() as well.
 607 
 608   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 609   HANDLE thread_handle =
 610     (HANDLE)_beginthreadex(NULL,
 611                            (unsigned)stack_size,
 612                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 613                            thread,
 614                            initflag,
 615                            &thread_id);
 616 
 617   char buf[64];
 618   if (thread_handle != NULL) {
 619     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 620       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 621   } else {
 622     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 623       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 624   }
 625 
 626   if (thread_handle == NULL) {
 627     // Need to clean up stuff we've allocated so far
 628     CloseHandle(osthread->interrupt_event());
 629     thread->set_osthread(NULL);
 630     delete osthread;
 631     return NULL;
 632   }
 633 
 634   Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
 635 
 636   // Store info on the Win32 thread into the OSThread
 637   osthread->set_thread_handle(thread_handle);
 638   osthread->set_thread_id(thread_id);
 639 
 640   // Initial thread state is INITIALIZED, not SUSPENDED
 641   osthread->set_state(INITIALIZED);
 642 
 643   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 644   return true;
 645 }
 646 
 647 
 648 // Free Win32 resources related to the OSThread
 649 void os::free_thread(OSThread* osthread) {
 650   assert(osthread != NULL, "osthread not set");
 651 
 652   // We are told to free resources of the argument thread,
 653   // but we can only really operate on the current thread.
 654   assert(Thread::current()->osthread() == osthread,
 655          "os::free_thread but not current thread");
 656 
 657   CloseHandle(osthread->thread_handle());
 658   CloseHandle(osthread->interrupt_event());
 659   delete osthread;
 660 }
 661 
 662 static jlong first_filetime;
 663 static jlong initial_performance_count;
 664 static jlong performance_frequency;
 665 
 666 
 667 jlong as_long(LARGE_INTEGER x) {
 668   jlong result = 0; // initialization to avoid warning
 669   set_high(&result, x.HighPart);
 670   set_low(&result, x.LowPart);
 671   return result;
 672 }
 673 
 674 
 675 jlong os::elapsed_counter() {
 676   LARGE_INTEGER count;
 677   QueryPerformanceCounter(&count);
 678   return as_long(count) - initial_performance_count;
 679 }
 680 
 681 
 682 jlong os::elapsed_frequency() {
 683   return performance_frequency;
 684 }
 685 
 686 
 687 julong os::available_memory() {
 688   return win32::available_memory();
 689 }
 690 
 691 julong os::win32::available_memory() {
 692   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 693   // value if total memory is larger than 4GB
 694   MEMORYSTATUSEX ms;
 695   ms.dwLength = sizeof(ms);
 696   GlobalMemoryStatusEx(&ms);
 697 
 698   return (julong)ms.ullAvailPhys;
 699 }
 700 
 701 julong os::physical_memory() {
 702   return win32::physical_memory();
 703 }
 704 
 705 bool os::has_allocatable_memory_limit(julong* limit) {
 706   MEMORYSTATUSEX ms;
 707   ms.dwLength = sizeof(ms);
 708   GlobalMemoryStatusEx(&ms);
 709 #ifdef _LP64
 710   *limit = (julong)ms.ullAvailVirtual;
 711   return true;
 712 #else
 713   // Limit to 1400m because of the 2gb address space wall
 714   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 715   return true;
 716 #endif
 717 }
 718 
 719 int os::active_processor_count() {
 720   DWORD_PTR lpProcessAffinityMask = 0;
 721   DWORD_PTR lpSystemAffinityMask = 0;
 722   int proc_count = processor_count();
 723   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 724       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 725     // Nof active processors is number of bits in process affinity mask
 726     int bitcount = 0;
 727     while (lpProcessAffinityMask != 0) {
 728       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 729       bitcount++;
 730     }
 731     return bitcount;
 732   } else {
 733     return proc_count;
 734   }
 735 }
 736 
 737 void os::set_native_thread_name(const char *name) {
 738 
 739   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 740   //
 741   // Note that unfortunately this only works if the process
 742   // is already attached to a debugger; debugger must observe
 743   // the exception below to show the correct name.
 744 
 745   // If there is no debugger attached skip raising the exception
 746   if (!IsDebuggerPresent()) {
 747     return;
 748   }
 749 
 750   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 751   struct {
 752     DWORD dwType;     // must be 0x1000
 753     LPCSTR szName;    // pointer to name (in user addr space)
 754     DWORD dwThreadID; // thread ID (-1=caller thread)
 755     DWORD dwFlags;    // reserved for future use, must be zero
 756   } info;
 757 
 758   info.dwType = 0x1000;
 759   info.szName = name;
 760   info.dwThreadID = -1;
 761   info.dwFlags = 0;
 762 
 763   __try {
 764     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 765   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 766 }
 767 
 768 bool os::distribute_processes(uint length, uint* distribution) {
 769   // Not yet implemented.
 770   return false;
 771 }
 772 
 773 bool os::bind_to_processor(uint processor_id) {
 774   // Not yet implemented.
 775   return false;
 776 }
 777 
 778 void os::win32::initialize_performance_counter() {
 779   LARGE_INTEGER count;
 780   QueryPerformanceFrequency(&count);
 781   performance_frequency = as_long(count);
 782   QueryPerformanceCounter(&count);
 783   initial_performance_count = as_long(count);
 784 }
 785 
 786 
 787 double os::elapsedTime() {
 788   return (double) elapsed_counter() / (double) elapsed_frequency();
 789 }
 790 
 791 
 792 // Windows format:
 793 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 794 // Java format:
 795 //   Java standards require the number of milliseconds since 1/1/1970
 796 
 797 // Constant offset - calculated using offset()
 798 static jlong  _offset   = 116444736000000000;
 799 // Fake time counter for reproducible results when debugging
 800 static jlong  fake_time = 0;
 801 
 802 #ifdef ASSERT
 803 // Just to be safe, recalculate the offset in debug mode
 804 static jlong _calculated_offset = 0;
 805 static int   _has_calculated_offset = 0;
 806 
 807 jlong offset() {
 808   if (_has_calculated_offset) return _calculated_offset;
 809   SYSTEMTIME java_origin;
 810   java_origin.wYear          = 1970;
 811   java_origin.wMonth         = 1;
 812   java_origin.wDayOfWeek     = 0; // ignored
 813   java_origin.wDay           = 1;
 814   java_origin.wHour          = 0;
 815   java_origin.wMinute        = 0;
 816   java_origin.wSecond        = 0;
 817   java_origin.wMilliseconds  = 0;
 818   FILETIME jot;
 819   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 820     fatal("Error = %d\nWindows error", GetLastError());
 821   }
 822   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 823   _has_calculated_offset = 1;
 824   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 825   return _calculated_offset;
 826 }
 827 #else
 828 jlong offset() {
 829   return _offset;
 830 }
 831 #endif
 832 
 833 jlong windows_to_java_time(FILETIME wt) {
 834   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 835   return (a - offset()) / 10000;
 836 }
 837 
 838 // Returns time ticks in (10th of micro seconds)
 839 jlong windows_to_time_ticks(FILETIME wt) {
 840   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 841   return (a - offset());
 842 }
 843 
 844 FILETIME java_to_windows_time(jlong l) {
 845   jlong a = (l * 10000) + offset();
 846   FILETIME result;
 847   result.dwHighDateTime = high(a);
 848   result.dwLowDateTime  = low(a);
 849   return result;
 850 }
 851 
 852 bool os::supports_vtime() { return true; }
 853 bool os::enable_vtime() { return false; }
 854 bool os::vtime_enabled() { return false; }
 855 
 856 double os::elapsedVTime() {
 857   FILETIME created;
 858   FILETIME exited;
 859   FILETIME kernel;
 860   FILETIME user;
 861   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 862     // the resolution of windows_to_java_time() should be sufficient (ms)
 863     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 864   } else {
 865     return elapsedTime();
 866   }
 867 }
 868 
 869 jlong os::javaTimeMillis() {
 870   if (UseFakeTimers) {
 871     return fake_time++;
 872   } else {
 873     FILETIME wt;
 874     GetSystemTimeAsFileTime(&wt);
 875     return windows_to_java_time(wt);
 876   }
 877 }
 878 
 879 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 880   FILETIME wt;
 881   GetSystemTimeAsFileTime(&wt);
 882   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 883   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 884   seconds = secs;
 885   nanos = jlong(ticks - (secs*10000000)) * 100;
 886 }
 887 
 888 jlong os::javaTimeNanos() {
 889     LARGE_INTEGER current_count;
 890     QueryPerformanceCounter(&current_count);
 891     double current = as_long(current_count);
 892     double freq = performance_frequency;
 893     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 894     return time;
 895 }
 896 
 897 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 898   jlong freq = performance_frequency;
 899   if (freq < NANOSECS_PER_SEC) {
 900     // the performance counter is 64 bits and we will
 901     // be multiplying it -- so no wrap in 64 bits
 902     info_ptr->max_value = ALL_64_BITS;
 903   } else if (freq > NANOSECS_PER_SEC) {
 904     // use the max value the counter can reach to
 905     // determine the max value which could be returned
 906     julong max_counter = (julong)ALL_64_BITS;
 907     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 908   } else {
 909     // the performance counter is 64 bits and we will
 910     // be using it directly -- so no wrap in 64 bits
 911     info_ptr->max_value = ALL_64_BITS;
 912   }
 913 
 914   // using a counter, so no skipping
 915   info_ptr->may_skip_backward = false;
 916   info_ptr->may_skip_forward = false;
 917 
 918   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 919 }
 920 
 921 char* os::local_time_string(char *buf, size_t buflen) {
 922   SYSTEMTIME st;
 923   GetLocalTime(&st);
 924   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 925                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 926   return buf;
 927 }
 928 
 929 bool os::getTimesSecs(double* process_real_time,
 930                       double* process_user_time,
 931                       double* process_system_time) {
 932   HANDLE h_process = GetCurrentProcess();
 933   FILETIME create_time, exit_time, kernel_time, user_time;
 934   BOOL result = GetProcessTimes(h_process,
 935                                 &create_time,
 936                                 &exit_time,
 937                                 &kernel_time,
 938                                 &user_time);
 939   if (result != 0) {
 940     FILETIME wt;
 941     GetSystemTimeAsFileTime(&wt);
 942     jlong rtc_millis = windows_to_java_time(wt);
 943     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 944     *process_user_time =
 945       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 946     *process_system_time =
 947       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
 948     return true;
 949   } else {
 950     return false;
 951   }
 952 }
 953 
 954 void os::shutdown() {
 955   // allow PerfMemory to attempt cleanup of any persistent resources
 956   perfMemory_exit();
 957 
 958   // flush buffered output, finish log files
 959   ostream_abort();
 960 
 961   // Check for abort hook
 962   abort_hook_t abort_hook = Arguments::abort_hook();
 963   if (abort_hook != NULL) {
 964     abort_hook();
 965   }
 966 }
 967 
 968 
 969 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
 970                                          PMINIDUMP_EXCEPTION_INFORMATION,
 971                                          PMINIDUMP_USER_STREAM_INFORMATION,
 972                                          PMINIDUMP_CALLBACK_INFORMATION);
 973 
 974 static HANDLE dumpFile = NULL;
 975 
 976 // Check if dump file can be created.
 977 void os::check_dump_limit(char* buffer, size_t buffsz) {
 978   bool status = true;
 979   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
 980     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
 981     status = false;
 982   }
 983 
 984 #ifndef ASSERT
 985   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
 986     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
 987     status = false;
 988   }
 989 #endif
 990 
 991   if (status) {
 992     const char* cwd = get_current_directory(NULL, 0);
 993     int pid = current_process_id();
 994     if (cwd != NULL) {
 995       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
 996     } else {
 997       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
 998     }
 999 
1000     if (dumpFile == NULL &&
1001        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1002                  == INVALID_HANDLE_VALUE) {
1003       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1004       status = false;
1005     }
1006   }
1007   VMError::record_coredump_status(buffer, status);
1008 }
1009 
1010 void os::abort(bool dump_core, void* siginfo, const void* context) {
1011   HINSTANCE dbghelp;
1012   EXCEPTION_POINTERS ep;
1013   MINIDUMP_EXCEPTION_INFORMATION mei;
1014   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1015 
1016   HANDLE hProcess = GetCurrentProcess();
1017   DWORD processId = GetCurrentProcessId();
1018   MINIDUMP_TYPE dumpType;
1019 
1020   shutdown();
1021   if (!dump_core || dumpFile == NULL) {
1022     if (dumpFile != NULL) {
1023       CloseHandle(dumpFile);
1024     }
1025     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1026   }
1027 
1028   dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
1029 
1030   if (dbghelp == NULL) {
1031     jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
1032     CloseHandle(dumpFile);
1033     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1034   }
1035 
1036   _MiniDumpWriteDump =
1037       CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1038                                     PMINIDUMP_EXCEPTION_INFORMATION,
1039                                     PMINIDUMP_USER_STREAM_INFORMATION,
1040                                     PMINIDUMP_CALLBACK_INFORMATION),
1041                                     GetProcAddress(dbghelp,
1042                                     "MiniDumpWriteDump"));
1043 
1044   if (_MiniDumpWriteDump == NULL) {
1045     jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
1046     CloseHandle(dumpFile);
1047     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1048   }
1049 
1050   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1051     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1052 
1053   if (siginfo != NULL && context != NULL) {
1054     ep.ContextRecord = (PCONTEXT) context;
1055     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1056 
1057     mei.ThreadId = GetCurrentThreadId();
1058     mei.ExceptionPointers = &ep;
1059     pmei = &mei;
1060   } else {
1061     pmei = NULL;
1062   }
1063 
1064   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1065   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1066   if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1067       _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1068     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1069   }
1070   CloseHandle(dumpFile);
1071   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1072 }
1073 
1074 // Die immediately, no exit hook, no abort hook, no cleanup.
1075 void os::die() {
1076   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1077 }
1078 
1079 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1080 //  * dirent_md.c       1.15 00/02/02
1081 //
1082 // The declarations for DIR and struct dirent are in jvm_win32.h.
1083 
1084 // Caller must have already run dirname through JVM_NativePath, which removes
1085 // duplicate slashes and converts all instances of '/' into '\\'.
1086 
1087 DIR * os::opendir(const char *dirname) {
1088   assert(dirname != NULL, "just checking");   // hotspot change
1089   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1090   DWORD fattr;                                // hotspot change
1091   char alt_dirname[4] = { 0, 0, 0, 0 };
1092 
1093   if (dirp == 0) {
1094     errno = ENOMEM;
1095     return 0;
1096   }
1097 
1098   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1099   // as a directory in FindFirstFile().  We detect this case here and
1100   // prepend the current drive name.
1101   //
1102   if (dirname[1] == '\0' && dirname[0] == '\\') {
1103     alt_dirname[0] = _getdrive() + 'A' - 1;
1104     alt_dirname[1] = ':';
1105     alt_dirname[2] = '\\';
1106     alt_dirname[3] = '\0';
1107     dirname = alt_dirname;
1108   }
1109 
1110   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1111   if (dirp->path == 0) {
1112     free(dirp);
1113     errno = ENOMEM;
1114     return 0;
1115   }
1116   strcpy(dirp->path, dirname);
1117 
1118   fattr = GetFileAttributes(dirp->path);
1119   if (fattr == 0xffffffff) {
1120     free(dirp->path);
1121     free(dirp);
1122     errno = ENOENT;
1123     return 0;
1124   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1125     free(dirp->path);
1126     free(dirp);
1127     errno = ENOTDIR;
1128     return 0;
1129   }
1130 
1131   // Append "*.*", or possibly "\\*.*", to path
1132   if (dirp->path[1] == ':' &&
1133       (dirp->path[2] == '\0' ||
1134       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1135     // No '\\' needed for cases like "Z:" or "Z:\"
1136     strcat(dirp->path, "*.*");
1137   } else {
1138     strcat(dirp->path, "\\*.*");
1139   }
1140 
1141   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1142   if (dirp->handle == INVALID_HANDLE_VALUE) {
1143     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1144       free(dirp->path);
1145       free(dirp);
1146       errno = EACCES;
1147       return 0;
1148     }
1149   }
1150   return dirp;
1151 }
1152 
1153 // parameter dbuf unused on Windows
1154 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) {
1155   assert(dirp != NULL, "just checking");      // hotspot change
1156   if (dirp->handle == INVALID_HANDLE_VALUE) {
1157     return 0;
1158   }
1159 
1160   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1161 
1162   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1163     if (GetLastError() == ERROR_INVALID_HANDLE) {
1164       errno = EBADF;
1165       return 0;
1166     }
1167     FindClose(dirp->handle);
1168     dirp->handle = INVALID_HANDLE_VALUE;
1169   }
1170 
1171   return &dirp->dirent;
1172 }
1173 
1174 int os::closedir(DIR *dirp) {
1175   assert(dirp != NULL, "just checking");      // hotspot change
1176   if (dirp->handle != INVALID_HANDLE_VALUE) {
1177     if (!FindClose(dirp->handle)) {
1178       errno = EBADF;
1179       return -1;
1180     }
1181     dirp->handle = INVALID_HANDLE_VALUE;
1182   }
1183   free(dirp->path);
1184   free(dirp);
1185   return 0;
1186 }
1187 
1188 // This must be hard coded because it's the system's temporary
1189 // directory not the java application's temp directory, ala java.io.tmpdir.
1190 const char* os::get_temp_directory() {
1191   static char path_buf[MAX_PATH];
1192   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1193     return path_buf;
1194   } else {
1195     path_buf[0] = '\0';
1196     return path_buf;
1197   }
1198 }
1199 
1200 static bool file_exists(const char* filename) {
1201   if (filename == NULL || strlen(filename) == 0) {
1202     return false;
1203   }
1204   return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1205 }
1206 
1207 bool os::dll_build_name(char *buffer, size_t buflen,
1208                         const char* pname, const char* fname) {
1209   bool retval = false;
1210   const size_t pnamelen = pname ? strlen(pname) : 0;
1211   const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1212 
1213   // Return error on buffer overflow.
1214   if (pnamelen + strlen(fname) + 10 > buflen) {
1215     return retval;
1216   }
1217 
1218   if (pnamelen == 0) {
1219     jio_snprintf(buffer, buflen, "%s.dll", fname);
1220     retval = true;
1221   } else if (c == ':' || c == '\\') {
1222     jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1223     retval = true;
1224   } else if (strchr(pname, *os::path_separator()) != NULL) {
1225     int n;
1226     char** pelements = split_path(pname, &n);
1227     if (pelements == NULL) {
1228       return false;
1229     }
1230     for (int i = 0; i < n; i++) {
1231       char* path = pelements[i];
1232       // Really shouldn't be NULL, but check can't hurt
1233       size_t plen = (path == NULL) ? 0 : strlen(path);
1234       if (plen == 0) {
1235         continue; // skip the empty path values
1236       }
1237       const char lastchar = path[plen - 1];
1238       if (lastchar == ':' || lastchar == '\\') {
1239         jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1240       } else {
1241         jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1242       }
1243       if (file_exists(buffer)) {
1244         retval = true;
1245         break;
1246       }
1247     }
1248     // release the storage
1249     for (int i = 0; i < n; i++) {
1250       if (pelements[i] != NULL) {
1251         FREE_C_HEAP_ARRAY(char, pelements[i]);
1252       }
1253     }
1254     if (pelements != NULL) {
1255       FREE_C_HEAP_ARRAY(char*, pelements);
1256     }
1257   } else {
1258     jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1259     retval = true;
1260   }
1261   return retval;
1262 }
1263 
1264 // Needs to be in os specific directory because windows requires another
1265 // header file <direct.h>
1266 const char* os::get_current_directory(char *buf, size_t buflen) {
1267   int n = static_cast<int>(buflen);
1268   if (buflen > INT_MAX)  n = INT_MAX;
1269   return _getcwd(buf, n);
1270 }
1271 
1272 //-----------------------------------------------------------
1273 // Helper functions for fatal error handler
1274 #ifdef _WIN64
1275 // Helper routine which returns true if address in
1276 // within the NTDLL address space.
1277 //
1278 static bool _addr_in_ntdll(address addr) {
1279   HMODULE hmod;
1280   MODULEINFO minfo;
1281 
1282   hmod = GetModuleHandle("NTDLL.DLL");
1283   if (hmod == NULL) return false;
1284   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1285                                           &minfo, sizeof(MODULEINFO))) {
1286     return false;
1287   }
1288 
1289   if ((addr >= minfo.lpBaseOfDll) &&
1290       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1291     return true;
1292   } else {
1293     return false;
1294   }
1295 }
1296 #endif
1297 
1298 struct _modinfo {
1299   address addr;
1300   char*   full_path;   // point to a char buffer
1301   int     buflen;      // size of the buffer
1302   address base_addr;
1303 };
1304 
1305 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1306                                   address top_address, void * param) {
1307   struct _modinfo *pmod = (struct _modinfo *)param;
1308   if (!pmod) return -1;
1309 
1310   if (base_addr   <= pmod->addr &&
1311       top_address > pmod->addr) {
1312     // if a buffer is provided, copy path name to the buffer
1313     if (pmod->full_path) {
1314       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1315     }
1316     pmod->base_addr = base_addr;
1317     return 1;
1318   }
1319   return 0;
1320 }
1321 
1322 bool os::dll_address_to_library_name(address addr, char* buf,
1323                                      int buflen, int* offset) {
1324   // buf is not optional, but offset is optional
1325   assert(buf != NULL, "sanity check");
1326 
1327 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1328 //       return the full path to the DLL file, sometimes it returns path
1329 //       to the corresponding PDB file (debug info); sometimes it only
1330 //       returns partial path, which makes life painful.
1331 
1332   struct _modinfo mi;
1333   mi.addr      = addr;
1334   mi.full_path = buf;
1335   mi.buflen    = buflen;
1336   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1337     // buf already contains path name
1338     if (offset) *offset = addr - mi.base_addr;
1339     return true;
1340   }
1341 
1342   buf[0] = '\0';
1343   if (offset) *offset = -1;
1344   return false;
1345 }
1346 
1347 bool os::dll_address_to_function_name(address addr, char *buf,
1348                                       int buflen, int *offset,
1349                                       bool demangle) {
1350   // buf is not optional, but offset is optional
1351   assert(buf != NULL, "sanity check");
1352 
1353   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1354     return true;
1355   }
1356   if (offset != NULL)  *offset  = -1;
1357   buf[0] = '\0';
1358   return false;
1359 }
1360 
1361 // save the start and end address of jvm.dll into param[0] and param[1]
1362 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1363                            address top_address, void * param) {
1364   if (!param) return -1;
1365 
1366   if (base_addr   <= (address)_locate_jvm_dll &&
1367       top_address > (address)_locate_jvm_dll) {
1368     ((address*)param)[0] = base_addr;
1369     ((address*)param)[1] = top_address;
1370     return 1;
1371   }
1372   return 0;
1373 }
1374 
1375 address vm_lib_location[2];    // start and end address of jvm.dll
1376 
1377 // check if addr is inside jvm.dll
1378 bool os::address_is_in_vm(address addr) {
1379   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1380     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1381       assert(false, "Can't find jvm module.");
1382       return false;
1383     }
1384   }
1385 
1386   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1387 }
1388 
1389 // print module info; param is outputStream*
1390 static int _print_module(const char* fname, address base_address,
1391                          address top_address, void* param) {
1392   if (!param) return -1;
1393 
1394   outputStream* st = (outputStream*)param;
1395 
1396   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1397   return 0;
1398 }
1399 
1400 // Loads .dll/.so and
1401 // in case of error it checks if .dll/.so was built for the
1402 // same architecture as Hotspot is running on
1403 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1404   void * result = LoadLibrary(name);
1405   if (result != NULL) {
1406     return result;
1407   }
1408 
1409   DWORD errcode = GetLastError();
1410   if (errcode == ERROR_MOD_NOT_FOUND) {
1411     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1412     ebuf[ebuflen - 1] = '\0';
1413     return NULL;
1414   }
1415 
1416   // Parsing dll below
1417   // If we can read dll-info and find that dll was built
1418   // for an architecture other than Hotspot is running in
1419   // - then print to buffer "DLL was built for a different architecture"
1420   // else call os::lasterror to obtain system error message
1421 
1422   // Read system error message into ebuf
1423   // It may or may not be overwritten below (in the for loop and just above)
1424   lasterror(ebuf, (size_t) ebuflen);
1425   ebuf[ebuflen - 1] = '\0';
1426   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1427   if (fd < 0) {
1428     return NULL;
1429   }
1430 
1431   uint32_t signature_offset;
1432   uint16_t lib_arch = 0;
1433   bool failed_to_get_lib_arch =
1434     ( // Go to position 3c in the dll
1435      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1436      ||
1437      // Read location of signature
1438      (sizeof(signature_offset) !=
1439      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1440      ||
1441      // Go to COFF File Header in dll
1442      // that is located after "signature" (4 bytes long)
1443      (os::seek_to_file_offset(fd,
1444      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1445      ||
1446      // Read field that contains code of architecture
1447      // that dll was built for
1448      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1449     );
1450 
1451   ::close(fd);
1452   if (failed_to_get_lib_arch) {
1453     // file i/o error - report os::lasterror(...) msg
1454     return NULL;
1455   }
1456 
1457   typedef struct {
1458     uint16_t arch_code;
1459     char* arch_name;
1460   } arch_t;
1461 
1462   static const arch_t arch_array[] = {
1463     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1464     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1465     {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
1466   };
1467 #if (defined _M_AMD64)


1468   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1469 #elif (defined _M_IX86)
1470   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1471 #else
1472   #error Method os::dll_load requires that one of following \
1473          is defined :_M_AMD64 or _M_IX86
1474 #endif
1475 
1476 
1477   // Obtain a string for printf operation
1478   // lib_arch_str shall contain string what platform this .dll was built for
1479   // running_arch_str shall string contain what platform Hotspot was built for
1480   char *running_arch_str = NULL, *lib_arch_str = NULL;
1481   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1482     if (lib_arch == arch_array[i].arch_code) {
1483       lib_arch_str = arch_array[i].arch_name;
1484     }
1485     if (running_arch == arch_array[i].arch_code) {
1486       running_arch_str = arch_array[i].arch_name;
1487     }
1488   }
1489 
1490   assert(running_arch_str,
1491          "Didn't find running architecture code in arch_array");
1492 
1493   // If the architecture is right
1494   // but some other error took place - report os::lasterror(...) msg
1495   if (lib_arch == running_arch) {
1496     return NULL;
1497   }
1498 
1499   if (lib_arch_str != NULL) {
1500     ::_snprintf(ebuf, ebuflen - 1,
1501                 "Can't load %s-bit .dll on a %s-bit platform",
1502                 lib_arch_str, running_arch_str);
1503   } else {
1504     // don't know what architecture this dll was build for
1505     ::_snprintf(ebuf, ebuflen - 1,
1506                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1507                 lib_arch, running_arch_str);
1508   }
1509 
1510   return NULL;
1511 }
1512 
1513 void os::print_dll_info(outputStream *st) {
1514   st->print_cr("Dynamic libraries:");
1515   get_loaded_modules_info(_print_module, (void *)st);
1516 }
1517 
1518 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1519   HANDLE   hProcess;
1520 
1521 # define MAX_NUM_MODULES 128
1522   HMODULE     modules[MAX_NUM_MODULES];
1523   static char filename[MAX_PATH];
1524   int         result = 0;
1525 
1526   int pid = os::current_process_id();
1527   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1528                          FALSE, pid);
1529   if (hProcess == NULL) return 0;
1530 
1531   DWORD size_needed;
1532   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1533     CloseHandle(hProcess);
1534     return 0;
1535   }
1536 
1537   // number of modules that are currently loaded
1538   int num_modules = size_needed / sizeof(HMODULE);
1539 
1540   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1541     // Get Full pathname:
1542     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1543       filename[0] = '\0';
1544     }
1545 
1546     MODULEINFO modinfo;
1547     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1548       modinfo.lpBaseOfDll = NULL;
1549       modinfo.SizeOfImage = 0;
1550     }
1551 
1552     // Invoke callback function
1553     result = callback(filename, (address)modinfo.lpBaseOfDll,
1554                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1555     if (result) break;
1556   }
1557 
1558   CloseHandle(hProcess);
1559   return result;
1560 }
1561 
1562 bool os::get_host_name(char* buf, size_t buflen) {
1563   DWORD size = (DWORD)buflen;
1564   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1565 }
1566 
1567 void os::get_summary_os_info(char* buf, size_t buflen) {
1568   stringStream sst(buf, buflen);
1569   os::win32::print_windows_version(&sst);
1570   // chop off newline character
1571   char* nl = strchr(buf, '\n');
1572   if (nl != NULL) *nl = '\0';
1573 }
1574 
1575 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1576   int ret = vsnprintf(buf, len, fmt, args);
1577   // Get the correct buffer size if buf is too small
1578   if (ret < 0) {
1579     return _vscprintf(fmt, args);
1580   }
1581   return ret;
1582 }
1583 
1584 static inline time_t get_mtime(const char* filename) {
1585   struct stat st;
1586   int ret = os::stat(filename, &st);
1587   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
1588   return st.st_mtime;
1589 }
1590 
1591 int os::compare_file_modified_times(const char* file1, const char* file2) {
1592   time_t t1 = get_mtime(file1);
1593   time_t t2 = get_mtime(file2);
1594   return t1 - t2;
1595 }
1596 
1597 void os::print_os_info_brief(outputStream* st) {
1598   os::print_os_info(st);
1599 }
1600 
1601 void os::print_os_info(outputStream* st) {
1602 #ifdef ASSERT
1603   char buffer[1024];
1604   st->print("HostName: ");
1605   if (get_host_name(buffer, sizeof(buffer))) {
1606     st->print("%s ", buffer);
1607   } else {
1608     st->print("N/A ");
1609   }
1610 #endif
1611   st->print("OS:");
1612   os::win32::print_windows_version(st);
1613 }
1614 
1615 void os::win32::print_windows_version(outputStream* st) {
1616   OSVERSIONINFOEX osvi;
1617   VS_FIXEDFILEINFO *file_info;
1618   TCHAR kernel32_path[MAX_PATH];
1619   UINT len, ret;
1620 
1621   // Use the GetVersionEx information to see if we're on a server or
1622   // workstation edition of Windows. Starting with Windows 8.1 we can't
1623   // trust the OS version information returned by this API.
1624   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1625   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1626   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1627     st->print_cr("Call to GetVersionEx failed");
1628     return;
1629   }
1630   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1631 
1632   // Get the full path to \Windows\System32\kernel32.dll and use that for
1633   // determining what version of Windows we're running on.
1634   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1635   ret = GetSystemDirectory(kernel32_path, len);
1636   if (ret == 0 || ret > len) {
1637     st->print_cr("Call to GetSystemDirectory failed");
1638     return;
1639   }
1640   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1641 
1642   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1643   if (version_size == 0) {
1644     st->print_cr("Call to GetFileVersionInfoSize failed");
1645     return;
1646   }
1647 
1648   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1649   if (version_info == NULL) {
1650     st->print_cr("Failed to allocate version_info");
1651     return;
1652   }
1653 
1654   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1655     os::free(version_info);
1656     st->print_cr("Call to GetFileVersionInfo failed");
1657     return;
1658   }
1659 
1660   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1661     os::free(version_info);
1662     st->print_cr("Call to VerQueryValue failed");
1663     return;
1664   }
1665 
1666   int major_version = HIWORD(file_info->dwProductVersionMS);
1667   int minor_version = LOWORD(file_info->dwProductVersionMS);
1668   int build_number = HIWORD(file_info->dwProductVersionLS);
1669   int build_minor = LOWORD(file_info->dwProductVersionLS);
1670   int os_vers = major_version * 1000 + minor_version;
1671   os::free(version_info);
1672 
1673   st->print(" Windows ");
1674   switch (os_vers) {
1675 
1676   case 6000:
1677     if (is_workstation) {
1678       st->print("Vista");
1679     } else {
1680       st->print("Server 2008");
1681     }
1682     break;
1683 
1684   case 6001:
1685     if (is_workstation) {
1686       st->print("7");
1687     } else {
1688       st->print("Server 2008 R2");
1689     }
1690     break;
1691 
1692   case 6002:
1693     if (is_workstation) {
1694       st->print("8");
1695     } else {
1696       st->print("Server 2012");
1697     }
1698     break;
1699 
1700   case 6003:
1701     if (is_workstation) {
1702       st->print("8.1");
1703     } else {
1704       st->print("Server 2012 R2");
1705     }
1706     break;
1707 
1708   case 10000:
1709     if (is_workstation) {
1710       st->print("10");
1711     } else {
1712       st->print("Server 2016");
1713     }
1714     break;
1715 
1716   default:
1717     // Unrecognized windows, print out its major and minor versions
1718     st->print("%d.%d", major_version, minor_version);
1719     break;
1720   }
1721 
1722   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1723   // find out whether we are running on 64 bit processor or not
1724   SYSTEM_INFO si;
1725   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1726   GetNativeSystemInfo(&si);
1727   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1728     st->print(" , 64 bit");
1729   }
1730 
1731   st->print(" Build %d", build_number);
1732   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1733   st->cr();
1734 }
1735 
1736 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1737   // Nothing to do for now.
1738 }
1739 
1740 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1741   HKEY key;
1742   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1743                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1744   if (status == ERROR_SUCCESS) {
1745     DWORD size = (DWORD)buflen;
1746     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1747     if (status != ERROR_SUCCESS) {
1748         strncpy(buf, "## __CPU__", buflen);
1749     }
1750     RegCloseKey(key);
1751   } else {
1752     // Put generic cpu info to return
1753     strncpy(buf, "## __CPU__", buflen);
1754   }
1755 }
1756 
1757 void os::print_memory_info(outputStream* st) {
1758   st->print("Memory:");
1759   st->print(" %dk page", os::vm_page_size()>>10);
1760 
1761   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1762   // value if total memory is larger than 4GB
1763   MEMORYSTATUSEX ms;
1764   ms.dwLength = sizeof(ms);
1765   GlobalMemoryStatusEx(&ms);
1766 
1767   st->print(", physical %uk", os::physical_memory() >> 10);
1768   st->print("(%uk free)", os::available_memory() >> 10);
1769 
1770   st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1771   st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1772   st->cr();
1773 }
1774 
1775 void os::print_siginfo(outputStream *st, const void* siginfo) {
1776   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1777   st->print("siginfo:");
1778 
1779   char tmp[64];
1780   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1781     strcpy(tmp, "EXCEPTION_??");
1782   }
1783   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1784 
1785   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1786        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1787        er->NumberParameters >= 2) {
1788     switch (er->ExceptionInformation[0]) {
1789     case 0: st->print(", reading address"); break;
1790     case 1: st->print(", writing address"); break;
1791     case 8: st->print(", data execution prevention violation at address"); break;
1792     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1793                        er->ExceptionInformation[0]);
1794     }
1795     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1796   } else {
1797     int num = er->NumberParameters;
1798     if (num > 0) {
1799       st->print(", ExceptionInformation=");
1800       for (int i = 0; i < num; i++) {
1801         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1802       }
1803     }
1804   }
1805   st->cr();
1806 }
1807 
1808 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1809   // do nothing
1810 }
1811 
1812 static char saved_jvm_path[MAX_PATH] = {0};
1813 
1814 // Find the full path to the current module, jvm.dll
1815 void os::jvm_path(char *buf, jint buflen) {
1816   // Error checking.
1817   if (buflen < MAX_PATH) {
1818     assert(false, "must use a large-enough buffer");
1819     buf[0] = '\0';
1820     return;
1821   }
1822   // Lazy resolve the path to current module.
1823   if (saved_jvm_path[0] != 0) {
1824     strcpy(buf, saved_jvm_path);
1825     return;
1826   }
1827 
1828   buf[0] = '\0';
1829   if (Arguments::sun_java_launcher_is_altjvm()) {
1830     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1831     // for a JAVA_HOME environment variable and fix up the path so it
1832     // looks like jvm.dll is installed there (append a fake suffix
1833     // hotspot/jvm.dll).
1834     char* java_home_var = ::getenv("JAVA_HOME");
1835     if (java_home_var != NULL && java_home_var[0] != 0 &&
1836         strlen(java_home_var) < (size_t)buflen) {
1837       strncpy(buf, java_home_var, buflen);
1838 
1839       // determine if this is a legacy image or modules image
1840       // modules image doesn't have "jre" subdirectory
1841       size_t len = strlen(buf);
1842       char* jrebin_p = buf + len;
1843       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1844       if (0 != _access(buf, 0)) {
1845         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1846       }
1847       len = strlen(buf);
1848       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1849     }
1850   }
1851 
1852   if (buf[0] == '\0') {
1853     GetModuleFileName(vm_lib_handle, buf, buflen);
1854   }
1855   strncpy(saved_jvm_path, buf, MAX_PATH);
1856   saved_jvm_path[MAX_PATH - 1] = '\0';
1857 }
1858 
1859 
1860 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1861 #ifndef _WIN64
1862   st->print("_");
1863 #endif
1864 }
1865 
1866 
1867 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1868 #ifndef _WIN64
1869   st->print("@%d", args_size  * sizeof(int));
1870 #endif
1871 }
1872 
1873 // This method is a copy of JDK's sysGetLastErrorString
1874 // from src/windows/hpi/src/system_md.c
1875 
1876 size_t os::lasterror(char* buf, size_t len) {
1877   DWORD errval;
1878 
1879   if ((errval = GetLastError()) != 0) {
1880     // DOS error
1881     size_t n = (size_t)FormatMessage(
1882                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1883                                      NULL,
1884                                      errval,
1885                                      0,
1886                                      buf,
1887                                      (DWORD)len,
1888                                      NULL);
1889     if (n > 3) {
1890       // Drop final '.', CR, LF
1891       if (buf[n - 1] == '\n') n--;
1892       if (buf[n - 1] == '\r') n--;
1893       if (buf[n - 1] == '.') n--;
1894       buf[n] = '\0';
1895     }
1896     return n;
1897   }
1898 
1899   if (errno != 0) {
1900     // C runtime error that has no corresponding DOS error code
1901     const char* s = os::strerror(errno);
1902     size_t n = strlen(s);
1903     if (n >= len) n = len - 1;
1904     strncpy(buf, s, n);
1905     buf[n] = '\0';
1906     return n;
1907   }
1908 
1909   return 0;
1910 }
1911 
1912 int os::get_last_error() {
1913   DWORD error = GetLastError();
1914   if (error == 0) {
1915     error = errno;
1916   }
1917   return (int)error;
1918 }
1919 
1920 WindowsSemaphore::WindowsSemaphore(uint value) {
1921   _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
1922 
1923   guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError());
1924 }
1925 
1926 WindowsSemaphore::~WindowsSemaphore() {
1927   ::CloseHandle(_semaphore);
1928 }
1929 
1930 void WindowsSemaphore::signal(uint count) {
1931   if (count > 0) {
1932     BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
1933 
1934     assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError());
1935   }
1936 }
1937 
1938 void WindowsSemaphore::wait() {
1939   DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
1940   assert(ret != WAIT_FAILED,   "WaitForSingleObject failed with error code: %lu", GetLastError());
1941   assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret);
1942 }
1943 
1944 // sun.misc.Signal
1945 // NOTE that this is a workaround for an apparent kernel bug where if
1946 // a signal handler for SIGBREAK is installed then that signal handler
1947 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1948 // See bug 4416763.
1949 static void (*sigbreakHandler)(int) = NULL;
1950 
1951 static void UserHandler(int sig, void *siginfo, void *context) {
1952   os::signal_notify(sig);
1953   // We need to reinstate the signal handler each time...
1954   os::signal(sig, (void*)UserHandler);
1955 }
1956 
1957 void* os::user_handler() {
1958   return (void*) UserHandler;
1959 }
1960 
1961 void* os::signal(int signal_number, void* handler) {
1962   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1963     void (*oldHandler)(int) = sigbreakHandler;
1964     sigbreakHandler = (void (*)(int)) handler;
1965     return (void*) oldHandler;
1966   } else {
1967     return (void*)::signal(signal_number, (void (*)(int))handler);
1968   }
1969 }
1970 
1971 void os::signal_raise(int signal_number) {
1972   raise(signal_number);
1973 }
1974 
1975 // The Win32 C runtime library maps all console control events other than ^C
1976 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
1977 // logoff, and shutdown events.  We therefore install our own console handler
1978 // that raises SIGTERM for the latter cases.
1979 //
1980 static BOOL WINAPI consoleHandler(DWORD event) {
1981   switch (event) {
1982   case CTRL_C_EVENT:
1983     if (is_error_reported()) {
1984       // Ctrl-C is pressed during error reporting, likely because the error
1985       // handler fails to abort. Let VM die immediately.
1986       os::die();
1987     }
1988 
1989     os::signal_raise(SIGINT);
1990     return TRUE;
1991     break;
1992   case CTRL_BREAK_EVENT:
1993     if (sigbreakHandler != NULL) {
1994       (*sigbreakHandler)(SIGBREAK);
1995     }
1996     return TRUE;
1997     break;
1998   case CTRL_LOGOFF_EVENT: {
1999     // Don't terminate JVM if it is running in a non-interactive session,
2000     // such as a service process.
2001     USEROBJECTFLAGS flags;
2002     HANDLE handle = GetProcessWindowStation();
2003     if (handle != NULL &&
2004         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2005         sizeof(USEROBJECTFLAGS), NULL)) {
2006       // If it is a non-interactive session, let next handler to deal
2007       // with it.
2008       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2009         return FALSE;
2010       }
2011     }
2012   }
2013   case CTRL_CLOSE_EVENT:
2014   case CTRL_SHUTDOWN_EVENT:
2015     os::signal_raise(SIGTERM);
2016     return TRUE;
2017     break;
2018   default:
2019     break;
2020   }
2021   return FALSE;
2022 }
2023 
2024 // The following code is moved from os.cpp for making this
2025 // code platform specific, which it is by its very nature.
2026 
2027 // Return maximum OS signal used + 1 for internal use only
2028 // Used as exit signal for signal_thread
2029 int os::sigexitnum_pd() {
2030   return NSIG;
2031 }
2032 
2033 // a counter for each possible signal value, including signal_thread exit signal
2034 static volatile jint pending_signals[NSIG+1] = { 0 };
2035 static HANDLE sig_sem = NULL;
2036 
2037 void os::signal_init_pd() {
2038   // Initialize signal structures
2039   memset((void*)pending_signals, 0, sizeof(pending_signals));
2040 
2041   sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2042 
2043   // Programs embedding the VM do not want it to attempt to receive
2044   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2045   // shutdown hooks mechanism introduced in 1.3.  For example, when
2046   // the VM is run as part of a Windows NT service (i.e., a servlet
2047   // engine in a web server), the correct behavior is for any console
2048   // control handler to return FALSE, not TRUE, because the OS's
2049   // "final" handler for such events allows the process to continue if
2050   // it is a service (while terminating it if it is not a service).
2051   // To make this behavior uniform and the mechanism simpler, we
2052   // completely disable the VM's usage of these console events if -Xrs
2053   // (=ReduceSignalUsage) is specified.  This means, for example, that
2054   // the CTRL-BREAK thread dump mechanism is also disabled in this
2055   // case.  See bugs 4323062, 4345157, and related bugs.
2056 
2057   if (!ReduceSignalUsage) {
2058     // Add a CTRL-C handler
2059     SetConsoleCtrlHandler(consoleHandler, TRUE);
2060   }
2061 }
2062 
2063 void os::signal_notify(int signal_number) {
2064   BOOL ret;
2065   if (sig_sem != NULL) {
2066     Atomic::inc(&pending_signals[signal_number]);
2067     ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2068     assert(ret != 0, "ReleaseSemaphore() failed");
2069   }
2070 }
2071 
2072 static int check_pending_signals(bool wait_for_signal) {
2073   DWORD ret;
2074   while (true) {
2075     for (int i = 0; i < NSIG + 1; i++) {
2076       jint n = pending_signals[i];
2077       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2078         return i;
2079       }
2080     }
2081     if (!wait_for_signal) {
2082       return -1;
2083     }
2084 
2085     JavaThread *thread = JavaThread::current();
2086 
2087     ThreadBlockInVM tbivm(thread);
2088 
2089     bool threadIsSuspended;
2090     do {
2091       thread->set_suspend_equivalent();
2092       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2093       ret = ::WaitForSingleObject(sig_sem, INFINITE);
2094       assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2095 
2096       // were we externally suspended while we were waiting?
2097       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2098       if (threadIsSuspended) {
2099         // The semaphore has been incremented, but while we were waiting
2100         // another thread suspended us. We don't want to continue running
2101         // while suspended because that would surprise the thread that
2102         // suspended us.
2103         ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2104         assert(ret != 0, "ReleaseSemaphore() failed");
2105 
2106         thread->java_suspend_self();
2107       }
2108     } while (threadIsSuspended);
2109   }
2110 }
2111 
2112 int os::signal_lookup() {
2113   return check_pending_signals(false);
2114 }
2115 
2116 int os::signal_wait() {
2117   return check_pending_signals(true);
2118 }
2119 
2120 // Implicit OS exception handling
2121 
2122 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2123                       address handler) {
2124     JavaThread* thread = (JavaThread*) Thread::current_or_null();
2125   // Save pc in thread
















2126   #ifdef _M_AMD64
2127     // Do not blow up if no thread info available.
2128     if (thread) {
2129       thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2130     }
2131     // Set pc to handler
2132     exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2133     #else
2134     // Do not blow up if no thread info available.
2135     if (thread) {
2136       thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2137     }
2138     // Set pc to handler
2139     exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2140   #endif

2141 
2142   // Continue the execution
2143   return EXCEPTION_CONTINUE_EXECUTION;
2144 }
2145 
2146 
2147 // Used for PostMortemDump
2148 extern "C" void safepoints();
2149 extern "C" void find(int x);
2150 extern "C" void events();
2151 
2152 // According to Windows API documentation, an illegal instruction sequence should generate
2153 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2154 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2155 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2156 
2157 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2158 
2159 // From "Execution Protection in the Windows Operating System" draft 0.35
2160 // Once a system header becomes available, the "real" define should be
2161 // included or copied here.
2162 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2163 





2164 // Windows Vista/2008 heap corruption check
2165 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2166 
2167 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2168 // C++ compiler contain this error code. Because this is a compiler-generated
2169 // error, the code is not listed in the Win32 API header files.
2170 // The code is actually a cryptic mnemonic device, with the initial "E"
2171 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2172 // ASCII values of "msc".
2173 
2174 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2175 
2176 #define def_excpt(val) { #val, (val) }
2177 
2178 static const struct { char* name; uint number; } exceptlabels[] = {
2179     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2180     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2181     def_excpt(EXCEPTION_BREAKPOINT),
2182     def_excpt(EXCEPTION_SINGLE_STEP),
2183     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2184     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2185     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2186     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2187     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2188     def_excpt(EXCEPTION_FLT_OVERFLOW),
2189     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2190     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2191     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2192     def_excpt(EXCEPTION_INT_OVERFLOW),
2193     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2194     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2195     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2196     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2197     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2198     def_excpt(EXCEPTION_STACK_OVERFLOW),
2199     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2200     def_excpt(EXCEPTION_GUARD_PAGE),
2201     def_excpt(EXCEPTION_INVALID_HANDLE),
2202     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2203     def_excpt(EXCEPTION_HEAP_CORRUPTION)



2204 };
2205 
2206 #undef def_excpt
2207 
2208 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2209   uint code = static_cast<uint>(exception_code);
2210   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2211     if (exceptlabels[i].number == code) {
2212       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2213       return buf;
2214     }
2215   }
2216 
2217   return NULL;
2218 }
2219 
2220 //-----------------------------------------------------------------------------
2221 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2222   // handle exception caused by idiv; should only happen for -MinInt/-1
2223   // (division by zero is handled explicitly)



2224   #ifdef  _M_AMD64
2225   PCONTEXT ctx = exceptionInfo->ContextRecord;
2226   address pc = (address)ctx->Rip;
2227   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2228   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2229   if (pc[0] == 0xF7) {
2230     // set correct result values and continue after idiv instruction
2231     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2232   } else {
2233     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2234   }
2235   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2236   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2237   // idiv opcode (0xF7).
2238   ctx->Rdx = (DWORD)0;             // remainder
2239   // Continue the execution
2240   #else
2241   PCONTEXT ctx = exceptionInfo->ContextRecord;
2242   address pc = (address)ctx->Eip;
2243   assert(pc[0] == 0xF7, "not an idiv opcode");
2244   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2245   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2246   // set correct result values and continue after idiv instruction
2247   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2248   ctx->Eax = (DWORD)min_jint;      // result
2249   ctx->Edx = (DWORD)0;             // remainder
2250   // Continue the execution
2251   #endif

2252   return EXCEPTION_CONTINUE_EXECUTION;
2253 }
2254 
2255 //-----------------------------------------------------------------------------
2256 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2257   PCONTEXT ctx = exceptionInfo->ContextRecord;
2258 #ifndef  _WIN64
2259   // handle exception caused by native method modifying control word
2260   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2261 
2262   switch (exception_code) {
2263   case EXCEPTION_FLT_DENORMAL_OPERAND:
2264   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2265   case EXCEPTION_FLT_INEXACT_RESULT:
2266   case EXCEPTION_FLT_INVALID_OPERATION:
2267   case EXCEPTION_FLT_OVERFLOW:
2268   case EXCEPTION_FLT_STACK_CHECK:
2269   case EXCEPTION_FLT_UNDERFLOW:
2270     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2271     if (fp_control_word != ctx->FloatSave.ControlWord) {
2272       // Restore FPCW and mask out FLT exceptions
2273       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2274       // Mask out pending FLT exceptions
2275       ctx->FloatSave.StatusWord &=  0xffffff00;
2276       return EXCEPTION_CONTINUE_EXECUTION;
2277     }
2278   }
2279 
2280   if (prev_uef_handler != NULL) {
2281     // We didn't handle this exception so pass it to the previous
2282     // UnhandledExceptionFilter.
2283     return (prev_uef_handler)(exceptionInfo);
2284   }
2285 #else // !_WIN64
2286   // On Windows, the mxcsr control bits are non-volatile across calls
2287   // See also CR 6192333
2288   //
2289   jint MxCsr = INITIAL_MXCSR;
2290   // we can't use StubRoutines::addr_mxcsr_std()
2291   // because in Win64 mxcsr is not saved there
2292   if (MxCsr != ctx->MxCsr) {
2293     ctx->MxCsr = MxCsr;
2294     return EXCEPTION_CONTINUE_EXECUTION;
2295   }
2296 #endif // !_WIN64
2297 
2298   return EXCEPTION_CONTINUE_SEARCH;
2299 }
2300 
2301 static inline void report_error(Thread* t, DWORD exception_code,
2302                                 address addr, void* siginfo, void* context) {
2303   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2304 
2305   // If UseOsErrorReporting, this will return here and save the error file
2306   // somewhere where we can find it in the minidump.
2307 }
2308 
2309 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2310         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2311   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2312   address addr = (address) exceptionRecord->ExceptionInformation[1];
2313   if (Interpreter::contains(pc)) {
2314     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2315     if (!fr->is_first_java_frame()) {
2316       // get_frame_at_stack_banging_point() is only called when we
2317       // have well defined stacks so java_sender() calls do not need
2318       // to assert safe_for_sender() first.
2319       *fr = fr->java_sender();
2320     }
2321   } else {
2322     // more complex code with compiled code
2323     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2324     CodeBlob* cb = CodeCache::find_blob(pc);
2325     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2326       // Not sure where the pc points to, fallback to default
2327       // stack overflow handling
2328       return false;
2329     } else {
2330       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2331       // in compiled code, the stack banging is performed just after the return pc
2332       // has been pushed on the stack
2333       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2334       if (!fr->is_java_frame()) {
2335         // See java_sender() comment above.
2336         *fr = fr->java_sender();
2337       }
2338     }
2339   }
2340   assert(fr->is_java_frame(), "Safety check");
2341   return true;
2342 }
2343 
2344 //-----------------------------------------------------------------------------
2345 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2346   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2347   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;










2348   #ifdef _M_AMD64
2349   address pc = (address) exceptionInfo->ContextRecord->Rip;
2350   #else
2351   address pc = (address) exceptionInfo->ContextRecord->Eip;
2352   #endif

2353   Thread* t = Thread::current_or_null_safe();
2354 
2355   // Handle SafeFetch32 and SafeFetchN exceptions.
2356   if (StubRoutines::is_safefetch_fault(pc)) {
2357     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2358   }
2359 
2360 #ifndef _WIN64
2361   // Execution protection violation - win32 running on AMD64 only
2362   // Handled first to avoid misdiagnosis as a "normal" access violation;
2363   // This is safe to do because we have a new/unique ExceptionInformation
2364   // code for this condition.
2365   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2366     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2367     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2368     address addr = (address) exceptionRecord->ExceptionInformation[1];
2369 
2370     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2371       int page_size = os::vm_page_size();
2372 
2373       // Make sure the pc and the faulting address are sane.
2374       //
2375       // If an instruction spans a page boundary, and the page containing
2376       // the beginning of the instruction is executable but the following
2377       // page is not, the pc and the faulting address might be slightly
2378       // different - we still want to unguard the 2nd page in this case.
2379       //
2380       // 15 bytes seems to be a (very) safe value for max instruction size.
2381       bool pc_is_near_addr =
2382         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2383       bool instr_spans_page_boundary =
2384         (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2385                          (intptr_t) page_size) > 0);
2386 
2387       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2388         static volatile address last_addr =
2389           (address) os::non_memory_address_word();
2390 
2391         // In conservative mode, don't unguard unless the address is in the VM
2392         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2393             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2394 
2395           // Set memory to RWX and retry
2396           address page_start =
2397             (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2398           bool res = os::protect_memory((char*) page_start, page_size,
2399                                         os::MEM_PROT_RWX);
2400 
2401           log_debug(os)("Execution protection violation "
2402                         "at " INTPTR_FORMAT
2403                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2404                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2405 
2406           // Set last_addr so if we fault again at the same address, we don't
2407           // end up in an endless loop.
2408           //
2409           // There are two potential complications here.  Two threads trapping
2410           // at the same address at the same time could cause one of the
2411           // threads to think it already unguarded, and abort the VM.  Likely
2412           // very rare.
2413           //
2414           // The other race involves two threads alternately trapping at
2415           // different addresses and failing to unguard the page, resulting in
2416           // an endless loop.  This condition is probably even more unlikely
2417           // than the first.
2418           //
2419           // Although both cases could be avoided by using locks or thread
2420           // local last_addr, these solutions are unnecessary complication:
2421           // this handler is a best-effort safety net, not a complete solution.
2422           // It is disabled by default and should only be used as a workaround
2423           // in case we missed any no-execute-unsafe VM code.
2424 
2425           last_addr = addr;
2426 
2427           return EXCEPTION_CONTINUE_EXECUTION;
2428         }
2429       }
2430 
2431       // Last unguard failed or not unguarding
2432       tty->print_raw_cr("Execution protection violation");
2433       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2434                    exceptionInfo->ContextRecord);
2435       return EXCEPTION_CONTINUE_SEARCH;
2436     }
2437   }
2438 #endif // _WIN64
2439 
2440   // Check to see if we caught the safepoint code in the
2441   // process of write protecting the memory serialization page.
2442   // It write enables the page immediately after protecting it
2443   // so just return.
2444   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2445     if (t != NULL && t->is_Java_thread()) {
2446       JavaThread* thread = (JavaThread*) t;
2447       PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2448       address addr = (address) exceptionRecord->ExceptionInformation[1];
2449       if (os::is_memory_serialize_page(thread, addr)) {
2450         // Block current thread until the memory serialize page permission restored.
2451         os::block_on_serialize_page_trap();
2452         return EXCEPTION_CONTINUE_EXECUTION;
2453       }
2454     }
2455   }
2456 
2457   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2458       VM_Version::is_cpuinfo_segv_addr(pc)) {
2459     // Verify that OS save/restore AVX registers.
2460     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2461   }
2462 
2463   if (t != NULL && t->is_Java_thread()) {
2464     JavaThread* thread = (JavaThread*) t;
2465     bool in_java = thread->thread_state() == _thread_in_Java;
2466 
2467     // Handle potential stack overflows up front.
2468     if (exception_code == EXCEPTION_STACK_OVERFLOW) {





































2469       if (thread->stack_guards_enabled()) {
2470         if (in_java) {
2471           frame fr;
2472           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2473           address addr = (address) exceptionRecord->ExceptionInformation[1];
2474           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2475             assert(fr.is_java_frame(), "Must be a Java frame");
2476             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2477           }
2478         }
2479         // Yellow zone violation.  The o/s has unprotected the first yellow
2480         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2481         // update the enabled status, even if the zone contains only one page.
2482         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2483         thread->disable_stack_yellow_reserved_zone();
2484         // If not in java code, return and hope for the best.
2485         return in_java
2486             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2487             :  EXCEPTION_CONTINUE_EXECUTION;
2488       } else {
2489         // Fatal red zone violation.
2490         thread->disable_stack_red_zone();
2491         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2492         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2493                       exceptionInfo->ContextRecord);
2494         return EXCEPTION_CONTINUE_SEARCH;
2495       }
2496     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2497       // Either stack overflow or null pointer exception.
2498       if (in_java) {
2499         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2500         address addr = (address) exceptionRecord->ExceptionInformation[1];
2501         address stack_end = thread->stack_end();
2502         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2503           // Stack overflow.
2504           assert(!os::uses_stack_guard_pages(),
2505                  "should be caught by red zone code above.");
2506           return Handle_Exception(exceptionInfo,
2507                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2508         }
2509         // Check for safepoint polling and implicit null
2510         // We only expect null pointers in the stubs (vtable)
2511         // the rest are checked explicitly now.
2512         CodeBlob* cb = CodeCache::find_blob(pc);
2513         if (cb != NULL) {
2514           if (os::is_poll_address(addr)) {
2515             address stub = SharedRuntime::get_poll_stub(pc);
2516             return Handle_Exception(exceptionInfo, stub);
2517           }
2518         }
2519         {
2520 #ifdef _WIN64
2521           // If it's a legal stack address map the entire region in
2522           //
2523           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2524           address addr = (address) exceptionRecord->ExceptionInformation[1];
2525           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2526             addr = (address)((uintptr_t)addr &
2527                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2528             os::commit_memory((char *)addr, thread->stack_base() - addr,
2529                               !ExecMem);
2530             return EXCEPTION_CONTINUE_EXECUTION;
2531           } else
2532 #endif
2533           {
2534             // Null pointer exception.





























2535             if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
2536               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2537               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2538             }
2539             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2540                          exceptionInfo->ContextRecord);
2541             return EXCEPTION_CONTINUE_SEARCH;

2542           }
2543         }
2544       }
2545 
2546 #ifdef _WIN64
2547       // Special care for fast JNI field accessors.
2548       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2549       // in and the heap gets shrunk before the field access.
2550       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2551         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2552         if (addr != (address)-1) {
2553           return Handle_Exception(exceptionInfo, addr);
2554         }
2555       }
2556 #endif
2557 
2558       // Stack overflow or null pointer exception in native code.
2559       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2560                    exceptionInfo->ContextRecord);
2561       return EXCEPTION_CONTINUE_SEARCH;
2562     } // /EXCEPTION_ACCESS_VIOLATION
2563     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
















2564 
2565     if (in_java) {
2566       switch (exception_code) {
2567       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2568         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2569 
2570       case EXCEPTION_INT_OVERFLOW:
2571         return Handle_IDiv_Exception(exceptionInfo);
2572 
2573       } // switch
2574     }
2575     if (((thread->thread_state() == _thread_in_Java) ||
2576          (thread->thread_state() == _thread_in_native)) &&
2577          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2578       LONG result=Handle_FLT_Exception(exceptionInfo);
2579       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2580     }
2581   }
2582 
2583   if (exception_code != EXCEPTION_BREAKPOINT) {
2584     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2585                  exceptionInfo->ContextRecord);
2586   }
2587   return EXCEPTION_CONTINUE_SEARCH;
2588 }
2589 
2590 #ifndef _WIN64
2591 // Special care for fast JNI accessors.
2592 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2593 // the heap gets shrunk before the field access.
2594 // Need to install our own structured exception handler since native code may
2595 // install its own.
2596 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2597   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2598   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2599     address pc = (address) exceptionInfo->ContextRecord->Eip;
2600     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2601     if (addr != (address)-1) {
2602       return Handle_Exception(exceptionInfo, addr);
2603     }
2604   }
2605   return EXCEPTION_CONTINUE_SEARCH;
2606 }
2607 
2608 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2609   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2610                                                      jobject obj,           \
2611                                                      jfieldID fieldID) {    \
2612     __try {                                                                 \
2613       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2614                                                                  obj,       \
2615                                                                  fieldID);  \
2616     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2617                                               _exception_info())) {         \
2618     }                                                                       \
2619     return 0;                                                               \
2620   }
2621 
2622 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2623 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2624 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2625 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2626 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2627 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2628 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2629 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2630 
2631 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2632   switch (type) {
2633   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2634   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2635   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2636   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2637   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2638   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2639   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2640   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2641   default:        ShouldNotReachHere();
2642   }
2643   return (address)-1;
2644 }
2645 #endif
2646 
2647 // Virtual Memory
2648 
2649 int os::vm_page_size() { return os::win32::vm_page_size(); }
2650 int os::vm_allocation_granularity() {
2651   return os::win32::vm_allocation_granularity();
2652 }
2653 
2654 // Windows large page support is available on Windows 2003. In order to use
2655 // large page memory, the administrator must first assign additional privilege
2656 // to the user:
2657 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2658 //   + select Local Policies -> User Rights Assignment
2659 //   + double click "Lock pages in memory", add users and/or groups
2660 //   + reboot
2661 // Note the above steps are needed for administrator as well, as administrators
2662 // by default do not have the privilege to lock pages in memory.
2663 //
2664 // Note about Windows 2003: although the API supports committing large page
2665 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2666 // scenario, I found through experiment it only uses large page if the entire
2667 // memory region is reserved and committed in a single VirtualAlloc() call.
2668 // This makes Windows large page support more or less like Solaris ISM, in
2669 // that the entire heap must be committed upfront. This probably will change
2670 // in the future, if so the code below needs to be revisited.
2671 
2672 #ifndef MEM_LARGE_PAGES
2673   #define MEM_LARGE_PAGES 0x20000000
2674 #endif
2675 
2676 static HANDLE    _hProcess;
2677 static HANDLE    _hToken;
2678 
2679 // Container for NUMA node list info
2680 class NUMANodeListHolder {
2681  private:
2682   int *_numa_used_node_list;  // allocated below
2683   int _numa_used_node_count;
2684 
2685   void free_node_list() {
2686     if (_numa_used_node_list != NULL) {
2687       FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2688     }
2689   }
2690 
2691  public:
2692   NUMANodeListHolder() {
2693     _numa_used_node_count = 0;
2694     _numa_used_node_list = NULL;
2695     // do rest of initialization in build routine (after function pointers are set up)
2696   }
2697 
2698   ~NUMANodeListHolder() {
2699     free_node_list();
2700   }
2701 
2702   bool build() {
2703     DWORD_PTR proc_aff_mask;
2704     DWORD_PTR sys_aff_mask;
2705     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2706     ULONG highest_node_number;
2707     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2708     free_node_list();
2709     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2710     for (unsigned int i = 0; i <= highest_node_number; i++) {
2711       ULONGLONG proc_mask_numa_node;
2712       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2713       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2714         _numa_used_node_list[_numa_used_node_count++] = i;
2715       }
2716     }
2717     return (_numa_used_node_count > 1);
2718   }
2719 
2720   int get_count() { return _numa_used_node_count; }
2721   int get_node_list_entry(int n) {
2722     // for indexes out of range, returns -1
2723     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2724   }
2725 
2726 } numa_node_list_holder;
2727 
2728 
2729 
2730 static size_t _large_page_size = 0;
2731 
2732 static bool request_lock_memory_privilege() {
2733   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2734                           os::current_process_id());
2735 
2736   LUID luid;
2737   if (_hProcess != NULL &&
2738       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2739       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2740 
2741     TOKEN_PRIVILEGES tp;
2742     tp.PrivilegeCount = 1;
2743     tp.Privileges[0].Luid = luid;
2744     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2745 
2746     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2747     // privilege. Check GetLastError() too. See MSDN document.
2748     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2749         (GetLastError() == ERROR_SUCCESS)) {
2750       return true;
2751     }
2752   }
2753 
2754   return false;
2755 }
2756 
2757 static void cleanup_after_large_page_init() {
2758   if (_hProcess) CloseHandle(_hProcess);
2759   _hProcess = NULL;
2760   if (_hToken) CloseHandle(_hToken);
2761   _hToken = NULL;
2762 }
2763 
2764 static bool numa_interleaving_init() {
2765   bool success = false;
2766   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2767 
2768   // print a warning if UseNUMAInterleaving flag is specified on command line
2769   bool warn_on_failure = use_numa_interleaving_specified;
2770 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2771 
2772   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2773   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2774   NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2775 
2776   if (numa_node_list_holder.build()) {
2777     if (log_is_enabled(Debug, os, cpu)) {
2778       Log(os, cpu) log;
2779       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2780       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2781         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2782       }
2783     }
2784     success = true;
2785   } else {
2786     WARN("Process does not cover multiple NUMA nodes.");
2787   }
2788   if (!success) {
2789     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2790   }
2791   return success;
2792 #undef WARN
2793 }
2794 
2795 // this routine is used whenever we need to reserve a contiguous VA range
2796 // but we need to make separate VirtualAlloc calls for each piece of the range
2797 // Reasons for doing this:
2798 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2799 //  * UseNUMAInterleaving requires a separate node for each piece
2800 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2801                                          DWORD prot,
2802                                          bool should_inject_error = false) {
2803   char * p_buf;
2804   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2805   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2806   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2807 
2808   // first reserve enough address space in advance since we want to be
2809   // able to break a single contiguous virtual address range into multiple
2810   // large page commits but WS2003 does not allow reserving large page space
2811   // so we just use 4K pages for reserve, this gives us a legal contiguous
2812   // address space. then we will deallocate that reservation, and re alloc
2813   // using large pages
2814   const size_t size_of_reserve = bytes + chunk_size;
2815   if (bytes > size_of_reserve) {
2816     // Overflowed.
2817     return NULL;
2818   }
2819   p_buf = (char *) VirtualAlloc(addr,
2820                                 size_of_reserve,  // size of Reserve
2821                                 MEM_RESERVE,
2822                                 PAGE_READWRITE);
2823   // If reservation failed, return NULL
2824   if (p_buf == NULL) return NULL;
2825   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2826   os::release_memory(p_buf, bytes + chunk_size);
2827 
2828   // we still need to round up to a page boundary (in case we are using large pages)
2829   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2830   // instead we handle this in the bytes_to_rq computation below
2831   p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2832 
2833   // now go through and allocate one chunk at a time until all bytes are
2834   // allocated
2835   size_t  bytes_remaining = bytes;
2836   // An overflow of align_size_up() would have been caught above
2837   // in the calculation of size_of_reserve.
2838   char * next_alloc_addr = p_buf;
2839   HANDLE hProc = GetCurrentProcess();
2840 
2841 #ifdef ASSERT
2842   // Variable for the failure injection
2843   long ran_num = os::random();
2844   size_t fail_after = ran_num % bytes;
2845 #endif
2846 
2847   int count=0;
2848   while (bytes_remaining) {
2849     // select bytes_to_rq to get to the next chunk_size boundary
2850 
2851     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2852     // Note allocate and commit
2853     char * p_new;
2854 
2855 #ifdef ASSERT
2856     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2857 #else
2858     const bool inject_error_now = false;
2859 #endif
2860 
2861     if (inject_error_now) {
2862       p_new = NULL;
2863     } else {
2864       if (!UseNUMAInterleaving) {
2865         p_new = (char *) VirtualAlloc(next_alloc_addr,
2866                                       bytes_to_rq,
2867                                       flags,
2868                                       prot);
2869       } else {
2870         // get the next node to use from the used_node_list
2871         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2872         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2873         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2874       }
2875     }
2876 
2877     if (p_new == NULL) {
2878       // Free any allocated pages
2879       if (next_alloc_addr > p_buf) {
2880         // Some memory was committed so release it.
2881         size_t bytes_to_release = bytes - bytes_remaining;
2882         // NMT has yet to record any individual blocks, so it
2883         // need to create a dummy 'reserve' record to match
2884         // the release.
2885         MemTracker::record_virtual_memory_reserve((address)p_buf,
2886                                                   bytes_to_release, CALLER_PC);
2887         os::release_memory(p_buf, bytes_to_release);
2888       }
2889 #ifdef ASSERT
2890       if (should_inject_error) {
2891         log_develop_debug(pagesize)("Reserving pages individually failed.");
2892       }
2893 #endif
2894       return NULL;
2895     }
2896 
2897     bytes_remaining -= bytes_to_rq;
2898     next_alloc_addr += bytes_to_rq;
2899     count++;
2900   }
2901   // Although the memory is allocated individually, it is returned as one.
2902   // NMT records it as one block.
2903   if ((flags & MEM_COMMIT) != 0) {
2904     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
2905   } else {
2906     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
2907   }
2908 
2909   // made it this far, success
2910   return p_buf;
2911 }
2912 
2913 
2914 
2915 void os::large_page_init() {
2916   if (!UseLargePages) return;
2917 
2918   // print a warning if any large page related flag is specified on command line
2919   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2920                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2921   bool success = false;
2922 
2923 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2924   if (request_lock_memory_privilege()) {
2925     size_t s = GetLargePageMinimum();
2926     if (s) {
2927 #if defined(IA32) || defined(AMD64)
2928       if (s > 4*M || LargePageSizeInBytes > 4*M) {
2929         WARN("JVM cannot use large pages bigger than 4mb.");
2930       } else {
2931 #endif
2932         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
2933           _large_page_size = LargePageSizeInBytes;
2934         } else {
2935           _large_page_size = s;
2936         }
2937         success = true;
2938 #if defined(IA32) || defined(AMD64)
2939       }
2940 #endif
2941     } else {
2942       WARN("Large page is not supported by the processor.");
2943     }
2944   } else {
2945     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
2946   }
2947 #undef WARN
2948 
2949   const size_t default_page_size = (size_t) vm_page_size();
2950   if (success && _large_page_size > default_page_size) {
2951     _page_sizes[0] = _large_page_size;
2952     _page_sizes[1] = default_page_size;
2953     _page_sizes[2] = 0;
2954   }
2955 
2956   cleanup_after_large_page_init();
2957   UseLargePages = success;
2958 }
2959 
2960 // On win32, one cannot release just a part of reserved memory, it's an
2961 // all or nothing deal.  When we split a reservation, we must break the
2962 // reservation into two reservations.
2963 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
2964                                   bool realloc) {
2965   if (size > 0) {
2966     release_memory(base, size);
2967     if (realloc) {
2968       reserve_memory(split, base);
2969     }
2970     if (size != split) {
2971       reserve_memory(size - split, base + split);
2972     }
2973   }
2974 }
2975 
2976 // Multiple threads can race in this code but it's not possible to unmap small sections of
2977 // virtual space to get requested alignment, like posix-like os's.
2978 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
2979 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
2980   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
2981          "Alignment must be a multiple of allocation granularity (page size)");
2982   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
2983 
2984   size_t extra_size = size + alignment;
2985   assert(extra_size >= size, "overflow, size is too large to allow alignment");
2986 
2987   char* aligned_base = NULL;
2988 
2989   do {
2990     char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
2991     if (extra_base == NULL) {
2992       return NULL;
2993     }
2994     // Do manual alignment
2995     aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
2996 
2997     os::release_memory(extra_base, extra_size);
2998 
2999     aligned_base = os::reserve_memory(size, aligned_base);
3000 
3001   } while (aligned_base == NULL);
3002 
3003   return aligned_base;
3004 }
3005 
3006 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3007   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3008          "reserve alignment");
3009   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3010   char* res;
3011   // note that if UseLargePages is on, all the areas that require interleaving
3012   // will go thru reserve_memory_special rather than thru here.
3013   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3014   if (!use_individual) {
3015     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3016   } else {
3017     elapsedTimer reserveTimer;
3018     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3019     // in numa interleaving, we have to allocate pages individually
3020     // (well really chunks of NUMAInterleaveGranularity size)
3021     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3022     if (res == NULL) {
3023       warning("NUMA page allocation failed");
3024     }
3025     if (Verbose && PrintMiscellaneous) {
3026       reserveTimer.stop();
3027       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3028                     reserveTimer.milliseconds(), reserveTimer.ticks());
3029     }
3030   }
3031   assert(res == NULL || addr == NULL || addr == res,
3032          "Unexpected address from reserve.");
3033 
3034   return res;
3035 }
3036 
3037 // Reserve memory at an arbitrary address, only if that area is
3038 // available (and not reserved for something else).
3039 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3040   // Windows os::reserve_memory() fails of the requested address range is
3041   // not avilable.
3042   return reserve_memory(bytes, requested_addr);
3043 }
3044 
3045 size_t os::large_page_size() {
3046   return _large_page_size;
3047 }
3048 
3049 bool os::can_commit_large_page_memory() {
3050   // Windows only uses large page memory when the entire region is reserved
3051   // and committed in a single VirtualAlloc() call. This may change in the
3052   // future, but with Windows 2003 it's not possible to commit on demand.
3053   return false;
3054 }
3055 
3056 bool os::can_execute_large_page_memory() {
3057   return true;
3058 }
3059 
3060 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3061                                  bool exec) {
3062   assert(UseLargePages, "only for large pages");
3063 
3064   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3065     return NULL; // Fallback to small pages.
3066   }
3067 
3068   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3069   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3070 
3071   // with large pages, there are two cases where we need to use Individual Allocation
3072   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3073   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3074   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3075     log_debug(pagesize)("Reserving large pages individually.");
3076 
3077     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3078     if (p_buf == NULL) {
3079       // give an appropriate warning message
3080       if (UseNUMAInterleaving) {
3081         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3082       }
3083       if (UseLargePagesIndividualAllocation) {
3084         warning("Individually allocated large pages failed, "
3085                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3086       }
3087       return NULL;
3088     }
3089 
3090     return p_buf;
3091 
3092   } else {
3093     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3094 
3095     // normal policy just allocate it all at once
3096     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3097     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3098     if (res != NULL) {
3099       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3100     }
3101 
3102     return res;
3103   }
3104 }
3105 
3106 bool os::release_memory_special(char* base, size_t bytes) {
3107   assert(base != NULL, "Sanity check");
3108   return release_memory(base, bytes);
3109 }
3110 
3111 void os::print_statistics() {
3112 }
3113 
3114 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3115   int err = os::get_last_error();
3116   char buf[256];
3117   size_t buf_len = os::lasterror(buf, sizeof(buf));
3118   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3119           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3120           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3121 }
3122 
3123 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3124   if (bytes == 0) {
3125     // Don't bother the OS with noops.
3126     return true;
3127   }
3128   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3129   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3130   // Don't attempt to print anything if the OS call fails. We're
3131   // probably low on resources, so the print itself may cause crashes.
3132 
3133   // unless we have NUMAInterleaving enabled, the range of a commit
3134   // is always within a reserve covered by a single VirtualAlloc
3135   // in that case we can just do a single commit for the requested size
3136   if (!UseNUMAInterleaving) {
3137     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3138       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3139       return false;
3140     }
3141     if (exec) {
3142       DWORD oldprot;
3143       // Windows doc says to use VirtualProtect to get execute permissions
3144       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3145         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3146         return false;
3147       }
3148     }
3149     return true;
3150   } else {
3151 
3152     // when NUMAInterleaving is enabled, the commit might cover a range that
3153     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3154     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3155     // returns represents the number of bytes that can be committed in one step.
3156     size_t bytes_remaining = bytes;
3157     char * next_alloc_addr = addr;
3158     while (bytes_remaining > 0) {
3159       MEMORY_BASIC_INFORMATION alloc_info;
3160       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3161       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3162       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3163                        PAGE_READWRITE) == NULL) {
3164         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3165                                             exec);)
3166         return false;
3167       }
3168       if (exec) {
3169         DWORD oldprot;
3170         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3171                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3172           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3173                                               exec);)
3174           return false;
3175         }
3176       }
3177       bytes_remaining -= bytes_to_rq;
3178       next_alloc_addr += bytes_to_rq;
3179     }
3180   }
3181   // if we made it this far, return true
3182   return true;
3183 }
3184 
3185 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3186                           bool exec) {
3187   // alignment_hint is ignored on this OS
3188   return pd_commit_memory(addr, size, exec);
3189 }
3190 
3191 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3192                                   const char* mesg) {
3193   assert(mesg != NULL, "mesg must be specified");
3194   if (!pd_commit_memory(addr, size, exec)) {
3195     warn_fail_commit_memory(addr, size, exec);
3196     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3197   }
3198 }
3199 
3200 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3201                                   size_t alignment_hint, bool exec,
3202                                   const char* mesg) {
3203   // alignment_hint is ignored on this OS
3204   pd_commit_memory_or_exit(addr, size, exec, mesg);
3205 }
3206 
3207 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3208   if (bytes == 0) {
3209     // Don't bother the OS with noops.
3210     return true;
3211   }
3212   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3213   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3214   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3215 }
3216 
3217 bool os::pd_release_memory(char* addr, size_t bytes) {
3218   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3219 }
3220 
3221 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3222   return os::commit_memory(addr, size, !ExecMem);
3223 }
3224 
3225 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3226   return os::uncommit_memory(addr, size);
3227 }
3228 
3229 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3230   uint count = 0;
3231   bool ret = false;
3232   size_t bytes_remaining = bytes;
3233   char * next_protect_addr = addr;
3234 
3235   // Use VirtualQuery() to get the chunk size.
3236   while (bytes_remaining) {
3237     MEMORY_BASIC_INFORMATION alloc_info;
3238     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3239       return false;
3240     }
3241 
3242     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3243     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3244     // but we don't distinguish here as both cases are protected by same API.
3245     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3246     warning("Failed protecting pages individually for chunk #%u", count);
3247     if (!ret) {
3248       return false;
3249     }
3250 
3251     bytes_remaining -= bytes_to_protect;
3252     next_protect_addr += bytes_to_protect;
3253     count++;
3254   }
3255   return ret;
3256 }
3257 
3258 // Set protections specified
3259 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3260                         bool is_committed) {
3261   unsigned int p = 0;
3262   switch (prot) {
3263   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3264   case MEM_PROT_READ: p = PAGE_READONLY; break;
3265   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3266   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3267   default:
3268     ShouldNotReachHere();
3269   }
3270 
3271   DWORD old_status;
3272 
3273   // Strange enough, but on Win32 one can change protection only for committed
3274   // memory, not a big deal anyway, as bytes less or equal than 64K
3275   if (!is_committed) {
3276     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3277                           "cannot commit protection page");
3278   }
3279   // One cannot use os::guard_memory() here, as on Win32 guard page
3280   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3281   //
3282   // Pages in the region become guard pages. Any attempt to access a guard page
3283   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3284   // the guard page status. Guard pages thus act as a one-time access alarm.
3285   bool ret;
3286   if (UseNUMAInterleaving) {
3287     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3288     // so we must protect the chunks individually.
3289     ret = protect_pages_individually(addr, bytes, p, &old_status);
3290   } else {
3291     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3292   }
3293 #ifdef ASSERT
3294   if (!ret) {
3295     int err = os::get_last_error();
3296     char buf[256];
3297     size_t buf_len = os::lasterror(buf, sizeof(buf));
3298     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3299           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3300           buf_len != 0 ? buf : "<no_error_string>", err);
3301   }
3302 #endif
3303   return ret;
3304 }
3305 
3306 bool os::guard_memory(char* addr, size_t bytes) {
3307   DWORD old_status;
3308   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3309 }
3310 
3311 bool os::unguard_memory(char* addr, size_t bytes) {
3312   DWORD old_status;
3313   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3314 }
3315 
3316 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3317 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3318 void os::numa_make_global(char *addr, size_t bytes)    { }
3319 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3320 bool os::numa_topology_changed()                       { return false; }
3321 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3322 int os::numa_get_group_id()                            { return 0; }
3323 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3324   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3325     // Provide an answer for UMA systems
3326     ids[0] = 0;
3327     return 1;
3328   } else {
3329     // check for size bigger than actual groups_num
3330     size = MIN2(size, numa_get_groups_num());
3331     for (int i = 0; i < (int)size; i++) {
3332       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3333     }
3334     return size;
3335   }
3336 }
3337 
3338 bool os::get_page_info(char *start, page_info* info) {
3339   return false;
3340 }
3341 
3342 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3343                      page_info* page_found) {
3344   return end;
3345 }
3346 
3347 char* os::non_memory_address_word() {
3348   // Must never look like an address returned by reserve_memory,
3349   // even in its subfields (as defined by the CPU immediate fields,
3350   // if the CPU splits constants across multiple instructions).
3351   return (char*)-1;
3352 }
3353 
3354 #define MAX_ERROR_COUNT 100
3355 #define SYS_THREAD_ERROR 0xffffffffUL
3356 
3357 void os::pd_start_thread(Thread* thread) {
3358   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3359   // Returns previous suspend state:
3360   // 0:  Thread was not suspended
3361   // 1:  Thread is running now
3362   // >1: Thread is still suspended.
3363   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3364 }
3365 
3366 class HighResolutionInterval : public CHeapObj<mtThread> {
3367   // The default timer resolution seems to be 10 milliseconds.
3368   // (Where is this written down?)
3369   // If someone wants to sleep for only a fraction of the default,
3370   // then we set the timer resolution down to 1 millisecond for
3371   // the duration of their interval.
3372   // We carefully set the resolution back, since otherwise we
3373   // seem to incur an overhead (3%?) that we don't need.
3374   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3375   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3376   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3377   // timeBeginPeriod() if the relative error exceeded some threshold.
3378   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3379   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
3380   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3381   // resolution timers running.
3382  private:
3383   jlong resolution;
3384  public:
3385   HighResolutionInterval(jlong ms) {
3386     resolution = ms % 10L;
3387     if (resolution != 0) {
3388       MMRESULT result = timeBeginPeriod(1L);
3389     }
3390   }
3391   ~HighResolutionInterval() {
3392     if (resolution != 0) {
3393       MMRESULT result = timeEndPeriod(1L);
3394     }
3395     resolution = 0L;
3396   }
3397 };
3398 
3399 int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3400   jlong limit = (jlong) MAXDWORD;
3401 
3402   while (ms > limit) {
3403     int res;
3404     if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) {
3405       return res;
3406     }
3407     ms -= limit;
3408   }
3409 
3410   assert(thread == Thread::current(), "thread consistency check");
3411   OSThread* osthread = thread->osthread();
3412   OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3413   int result;
3414   if (interruptable) {
3415     assert(thread->is_Java_thread(), "must be java thread");
3416     JavaThread *jt = (JavaThread *) thread;
3417     ThreadBlockInVM tbivm(jt);
3418 
3419     jt->set_suspend_equivalent();
3420     // cleared by handle_special_suspend_equivalent_condition() or
3421     // java_suspend_self() via check_and_wait_while_suspended()
3422 
3423     HANDLE events[1];
3424     events[0] = osthread->interrupt_event();
3425     HighResolutionInterval *phri=NULL;
3426     if (!ForceTimeHighResolution) {
3427       phri = new HighResolutionInterval(ms);
3428     }
3429     if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3430       result = OS_TIMEOUT;
3431     } else {
3432       ResetEvent(osthread->interrupt_event());
3433       osthread->set_interrupted(false);
3434       result = OS_INTRPT;
3435     }
3436     delete phri; //if it is NULL, harmless
3437 
3438     // were we externally suspended while we were waiting?
3439     jt->check_and_wait_while_suspended();
3440   } else {
3441     assert(!thread->is_Java_thread(), "must not be java thread");
3442     Sleep((long) ms);
3443     result = OS_TIMEOUT;
3444   }
3445   return result;
3446 }
3447 
3448 // Short sleep, direct OS call.
3449 //
3450 // ms = 0, means allow others (if any) to run.
3451 //
3452 void os::naked_short_sleep(jlong ms) {
3453   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3454   Sleep(ms);
3455 }
3456 
3457 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3458 void os::infinite_sleep() {
3459   while (true) {    // sleep forever ...
3460     Sleep(100000);  // ... 100 seconds at a time
3461   }
3462 }
3463 
3464 typedef BOOL (WINAPI * STTSignature)(void);
3465 
3466 void os::naked_yield() {
3467   // Consider passing back the return value from SwitchToThread().
3468   SwitchToThread();
3469 }
3470 
3471 // Win32 only gives you access to seven real priorities at a time,
3472 // so we compress Java's ten down to seven.  It would be better
3473 // if we dynamically adjusted relative priorities.
3474 
3475 int os::java_to_os_priority[CriticalPriority + 1] = {
3476   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3477   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3478   THREAD_PRIORITY_LOWEST,                       // 2
3479   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3480   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3481   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3482   THREAD_PRIORITY_NORMAL,                       // 6
3483   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3484   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3485   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3486   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3487   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3488 };
3489 
3490 int prio_policy1[CriticalPriority + 1] = {
3491   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3492   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3493   THREAD_PRIORITY_LOWEST,                       // 2
3494   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3495   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3496   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3497   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3498   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3499   THREAD_PRIORITY_HIGHEST,                      // 8
3500   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3501   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3502   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3503 };
3504 
3505 static int prio_init() {
3506   // If ThreadPriorityPolicy is 1, switch tables
3507   if (ThreadPriorityPolicy == 1) {
3508     int i;
3509     for (i = 0; i < CriticalPriority + 1; i++) {
3510       os::java_to_os_priority[i] = prio_policy1[i];
3511     }
3512   }
3513   if (UseCriticalJavaThreadPriority) {
3514     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3515   }
3516   return 0;
3517 }
3518 
3519 OSReturn os::set_native_priority(Thread* thread, int priority) {
3520   if (!UseThreadPriorities) return OS_OK;
3521   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3522   return ret ? OS_OK : OS_ERR;
3523 }
3524 
3525 OSReturn os::get_native_priority(const Thread* const thread,
3526                                  int* priority_ptr) {
3527   if (!UseThreadPriorities) {
3528     *priority_ptr = java_to_os_priority[NormPriority];
3529     return OS_OK;
3530   }
3531   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3532   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3533     assert(false, "GetThreadPriority failed");
3534     return OS_ERR;
3535   }
3536   *priority_ptr = os_prio;
3537   return OS_OK;
3538 }
3539 
3540 
3541 // Hint to the underlying OS that a task switch would not be good.
3542 // Void return because it's a hint and can fail.
3543 void os::hint_no_preempt() {}
3544 
3545 void os::interrupt(Thread* thread) {
3546   assert(!thread->is_Java_thread() || Thread::current() == thread ||
3547          Threads_lock->owned_by_self(),
3548          "possibility of dangling Thread pointer");
3549 
3550   OSThread* osthread = thread->osthread();
3551   osthread->set_interrupted(true);
3552   // More than one thread can get here with the same value of osthread,
3553   // resulting in multiple notifications.  We do, however, want the store
3554   // to interrupted() to be visible to other threads before we post
3555   // the interrupt event.
3556   OrderAccess::release();
3557   SetEvent(osthread->interrupt_event());
3558   // For JSR166:  unpark after setting status
3559   if (thread->is_Java_thread()) {
3560     ((JavaThread*)thread)->parker()->unpark();
3561   }
3562 
3563   ParkEvent * ev = thread->_ParkEvent;
3564   if (ev != NULL) ev->unpark();
3565 }
3566 
3567 
3568 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3569   assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3570          "possibility of dangling Thread pointer");
3571 
3572   OSThread* osthread = thread->osthread();
3573   // There is no synchronization between the setting of the interrupt
3574   // and it being cleared here. It is critical - see 6535709 - that
3575   // we only clear the interrupt state, and reset the interrupt event,
3576   // if we are going to report that we were indeed interrupted - else
3577   // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3578   // depending on the timing. By checking thread interrupt event to see
3579   // if the thread gets real interrupt thus prevent spurious wakeup.
3580   bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3581   if (interrupted && clear_interrupted) {
3582     osthread->set_interrupted(false);
3583     ResetEvent(osthread->interrupt_event());
3584   } // Otherwise leave the interrupted state alone
3585 
3586   return interrupted;
3587 }
3588 
3589 // Get's a pc (hint) for a running thread. Currently used only for profiling.
3590 ExtendedPC os::get_thread_pc(Thread* thread) {
3591   CONTEXT context;
3592   context.ContextFlags = CONTEXT_CONTROL;
3593   HANDLE handle = thread->osthread()->thread_handle();




3594   if (GetThreadContext(handle, &context)) {
3595     #ifdef _M_AMD64
3596       return ExtendedPC((address) context.Rip);
3597     #else
3598       return ExtendedPC((address) context.Eip);
3599     #endif
3600   } else {
3601     return ExtendedPC(NULL);
3602   }

3603 }
3604 
3605 // GetCurrentThreadId() returns DWORD
3606 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3607 
3608 static int _initial_pid = 0;
3609 
3610 int os::current_process_id() {
3611   return (_initial_pid ? _initial_pid : _getpid());
3612 }
3613 
3614 int    os::win32::_vm_page_size              = 0;
3615 int    os::win32::_vm_allocation_granularity = 0;
3616 int    os::win32::_processor_type            = 0;
3617 // Processor level is not available on non-NT systems, use vm_version instead
3618 int    os::win32::_processor_level           = 0;
3619 julong os::win32::_physical_memory           = 0;
3620 size_t os::win32::_default_stack_size        = 0;
3621 
3622 intx          os::win32::_os_thread_limit    = 0;
3623 volatile intx os::win32::_os_thread_count    = 0;
3624 
3625 bool   os::win32::_is_windows_server         = false;
3626 
3627 // 6573254
3628 // Currently, the bug is observed across all the supported Windows releases,
3629 // including the latest one (as of this writing - Windows Server 2012 R2)
3630 bool   os::win32::_has_exit_bug              = true;
3631 
3632 void os::win32::initialize_system_info() {
3633   SYSTEM_INFO si;
3634   GetSystemInfo(&si);
3635   _vm_page_size    = si.dwPageSize;
3636   _vm_allocation_granularity = si.dwAllocationGranularity;
3637   _processor_type  = si.dwProcessorType;
3638   _processor_level = si.wProcessorLevel;
3639   set_processor_count(si.dwNumberOfProcessors);
3640 
3641   MEMORYSTATUSEX ms;
3642   ms.dwLength = sizeof(ms);
3643 
3644   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3645   // dwMemoryLoad (% of memory in use)
3646   GlobalMemoryStatusEx(&ms);
3647   _physical_memory = ms.ullTotalPhys;
3648 
3649   if (FLAG_IS_DEFAULT(MaxRAM)) {
3650     // Adjust MaxRAM according to the maximum virtual address space available.
3651     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3652   }
3653 
3654   OSVERSIONINFOEX oi;
3655   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3656   GetVersionEx((OSVERSIONINFO*)&oi);
3657   switch (oi.dwPlatformId) {
3658   case VER_PLATFORM_WIN32_NT:
3659     {
3660       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3661       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3662           oi.wProductType == VER_NT_SERVER) {
3663         _is_windows_server = true;
3664       }
3665     }
3666     break;
3667   default: fatal("Unknown platform");
3668   }
3669 
3670   _default_stack_size = os::current_stack_size();
3671   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3672   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3673          "stack size not a multiple of page size");
3674 
3675   initialize_performance_counter();
3676 }
3677 
3678 
3679 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3680                                       int ebuflen) {
3681   char path[MAX_PATH];
3682   DWORD size;
3683   DWORD pathLen = (DWORD)sizeof(path);
3684   HINSTANCE result = NULL;
3685 
3686   // only allow library name without path component
3687   assert(strchr(name, '\\') == NULL, "path not allowed");
3688   assert(strchr(name, ':') == NULL, "path not allowed");
3689   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3690     jio_snprintf(ebuf, ebuflen,
3691                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3692     return NULL;
3693   }
3694 
3695   // search system directory
3696   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3697     if (size >= pathLen) {
3698       return NULL; // truncated
3699     }
3700     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3701       return NULL; // truncated
3702     }
3703     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3704       return result;
3705     }
3706   }
3707 
3708   // try Windows directory
3709   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3710     if (size >= pathLen) {
3711       return NULL; // truncated
3712     }
3713     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3714       return NULL; // truncated
3715     }
3716     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3717       return result;
3718     }
3719   }
3720 
3721   jio_snprintf(ebuf, ebuflen,
3722                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3723   return NULL;
3724 }
3725 
3726 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3727 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3728 
3729 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3730   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3731   return TRUE;
3732 }
3733 
3734 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3735   // Basic approach:
3736   //  - Each exiting thread registers its intent to exit and then does so.
3737   //  - A thread trying to terminate the process must wait for all
3738   //    threads currently exiting to complete their exit.
3739 
3740   if (os::win32::has_exit_bug()) {
3741     // The array holds handles of the threads that have started exiting by calling
3742     // _endthreadex().
3743     // Should be large enough to avoid blocking the exiting thread due to lack of
3744     // a free slot.
3745     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3746     static int handle_count = 0;
3747 
3748     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3749     static CRITICAL_SECTION crit_sect;
3750     static volatile jint process_exiting = 0;
3751     int i, j;
3752     DWORD res;
3753     HANDLE hproc, hthr;
3754 
3755     // We only attempt to register threads until a process exiting
3756     // thread manages to set the process_exiting flag. Any threads
3757     // that come through here after the process_exiting flag is set
3758     // are unregistered and will be caught in the SuspendThread()
3759     // infinite loop below.
3760     bool registered = false;
3761 
3762     // The first thread that reached this point, initializes the critical section.
3763     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3764       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3765     } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3766       if (what != EPT_THREAD) {
3767         // Atomically set process_exiting before the critical section
3768         // to increase the visibility between racing threads.
3769         Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0);
3770       }
3771       EnterCriticalSection(&crit_sect);
3772 
3773       if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3774         // Remove from the array those handles of the threads that have completed exiting.
3775         for (i = 0, j = 0; i < handle_count; ++i) {
3776           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3777           if (res == WAIT_TIMEOUT) {
3778             handles[j++] = handles[i];
3779           } else {
3780             if (res == WAIT_FAILED) {
3781               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3782                       GetLastError(), __FILE__, __LINE__);
3783             }
3784             // Don't keep the handle, if we failed waiting for it.
3785             CloseHandle(handles[i]);
3786           }
3787         }
3788 
3789         // If there's no free slot in the array of the kept handles, we'll have to
3790         // wait until at least one thread completes exiting.
3791         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3792           // Raise the priority of the oldest exiting thread to increase its chances
3793           // to complete sooner.
3794           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3795           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3796           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3797             i = (res - WAIT_OBJECT_0);
3798             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3799             for (; i < handle_count; ++i) {
3800               handles[i] = handles[i + 1];
3801             }
3802           } else {
3803             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3804                     (res == WAIT_FAILED ? "failed" : "timed out"),
3805                     GetLastError(), __FILE__, __LINE__);
3806             // Don't keep handles, if we failed waiting for them.
3807             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3808               CloseHandle(handles[i]);
3809             }
3810             handle_count = 0;
3811           }
3812         }
3813 
3814         // Store a duplicate of the current thread handle in the array of handles.
3815         hproc = GetCurrentProcess();
3816         hthr = GetCurrentThread();
3817         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3818                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3819           warning("DuplicateHandle failed (%u) in %s: %d\n",
3820                   GetLastError(), __FILE__, __LINE__);
3821 
3822           // We can't register this thread (no more handles) so this thread
3823           // may be racing with a thread that is calling exit(). If the thread
3824           // that is calling exit() has managed to set the process_exiting
3825           // flag, then this thread will be caught in the SuspendThread()
3826           // infinite loop below which closes that race. A small timing
3827           // window remains before the process_exiting flag is set, but it
3828           // is only exposed when we are out of handles.
3829         } else {
3830           ++handle_count;
3831           registered = true;
3832 
3833           // The current exiting thread has stored its handle in the array, and now
3834           // should leave the critical section before calling _endthreadex().
3835         }
3836 
3837       } else if (what != EPT_THREAD && handle_count > 0) {
3838         jlong start_time, finish_time, timeout_left;
3839         // Before ending the process, make sure all the threads that had called
3840         // _endthreadex() completed.
3841 
3842         // Set the priority level of the current thread to the same value as
3843         // the priority level of exiting threads.
3844         // This is to ensure it will be given a fair chance to execute if
3845         // the timeout expires.
3846         hthr = GetCurrentThread();
3847         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3848         start_time = os::javaTimeNanos();
3849         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3850         for (i = 0; ; ) {
3851           int portion_count = handle_count - i;
3852           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3853             portion_count = MAXIMUM_WAIT_OBJECTS;
3854           }
3855           for (j = 0; j < portion_count; ++j) {
3856             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3857           }
3858           timeout_left = (finish_time - start_time) / 1000000L;
3859           if (timeout_left < 0) {
3860             timeout_left = 0;
3861           }
3862           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3863           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3864             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3865                     (res == WAIT_FAILED ? "failed" : "timed out"),
3866                     GetLastError(), __FILE__, __LINE__);
3867             // Reset portion_count so we close the remaining
3868             // handles due to this error.
3869             portion_count = handle_count - i;
3870           }
3871           for (j = 0; j < portion_count; ++j) {
3872             CloseHandle(handles[i + j]);
3873           }
3874           if ((i += portion_count) >= handle_count) {
3875             break;
3876           }
3877           start_time = os::javaTimeNanos();
3878         }
3879         handle_count = 0;
3880       }
3881 
3882       LeaveCriticalSection(&crit_sect);
3883     }
3884 
3885     if (!registered &&
3886         OrderAccess::load_acquire(&process_exiting) != 0 &&
3887         process_exiting != (jint)GetCurrentThreadId()) {
3888       // Some other thread is about to call exit(), so we don't let
3889       // the current unregistered thread proceed to exit() or _endthreadex()
3890       while (true) {
3891         SuspendThread(GetCurrentThread());
3892         // Avoid busy-wait loop, if SuspendThread() failed.
3893         Sleep(EXIT_TIMEOUT);
3894       }
3895     }
3896   }
3897 
3898   // We are here if either
3899   // - there's no 'race at exit' bug on this OS release;
3900   // - initialization of the critical section failed (unlikely);
3901   // - the current thread has registered itself and left the critical section;
3902   // - the process-exiting thread has raised the flag and left the critical section.
3903   if (what == EPT_THREAD) {
3904     _endthreadex((unsigned)exit_code);
3905   } else if (what == EPT_PROCESS) {
3906     ::exit(exit_code);
3907   } else {
3908     _exit(exit_code);
3909   }
3910 
3911   // Should not reach here
3912   return exit_code;
3913 }
3914 
3915 #undef EXIT_TIMEOUT
3916 
3917 void os::win32::setmode_streams() {
3918   _setmode(_fileno(stdin), _O_BINARY);
3919   _setmode(_fileno(stdout), _O_BINARY);
3920   _setmode(_fileno(stderr), _O_BINARY);
3921 }
3922 
3923 
3924 bool os::is_debugger_attached() {
3925   return IsDebuggerPresent() ? true : false;
3926 }
3927 
3928 
3929 void os::wait_for_keypress_at_exit(void) {
3930   if (PauseAtExit) {
3931     fprintf(stderr, "Press any key to continue...\n");
3932     fgetc(stdin);
3933   }
3934 }
3935 
3936 
3937 bool os::message_box(const char* title, const char* message) {
3938   int result = MessageBox(NULL, message, title,
3939                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3940   return result == IDYES;
3941 }
3942 
3943 #ifndef PRODUCT
3944 #ifndef _WIN64
3945 // Helpers to check whether NX protection is enabled
3946 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3947   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3948       pex->ExceptionRecord->NumberParameters > 0 &&
3949       pex->ExceptionRecord->ExceptionInformation[0] ==
3950       EXCEPTION_INFO_EXEC_VIOLATION) {
3951     return EXCEPTION_EXECUTE_HANDLER;
3952   }
3953   return EXCEPTION_CONTINUE_SEARCH;
3954 }
3955 
3956 void nx_check_protection() {
3957   // If NX is enabled we'll get an exception calling into code on the stack
3958   char code[] = { (char)0xC3 }; // ret
3959   void *code_ptr = (void *)code;
3960   __try {
3961     __asm call code_ptr
3962   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3963     tty->print_raw_cr("NX protection detected.");
3964   }
3965 }
3966 #endif // _WIN64
3967 #endif // PRODUCT
3968 
3969 // This is called _before_ the global arguments have been parsed
3970 void os::init(void) {
3971   _initial_pid = _getpid();
3972 
3973   init_random(1234567);
3974 
3975   win32::initialize_system_info();
3976   win32::setmode_streams();
3977   init_page_sizes((size_t) win32::vm_page_size());
3978 
3979   // This may be overridden later when argument processing is done.
3980   FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false);
3981 
3982   // Initialize main_process and main_thread
3983   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
3984   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3985                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3986     fatal("DuplicateHandle failed\n");
3987   }
3988   main_thread_id = (int) GetCurrentThreadId();
3989 
3990   // initialize fast thread access - only used for 32-bit
3991   win32::initialize_thread_ptr_offset();
3992 }
3993 
3994 // To install functions for atexit processing
3995 extern "C" {
3996   static void perfMemory_exit_helper() {
3997     perfMemory_exit();
3998   }
3999 }
4000 
4001 static jint initSock();
4002 
4003 // this is called _after_ the global arguments have been parsed
4004 jint os::init_2(void) {
4005   // Allocate a single page and mark it as readable for safepoint polling
4006   address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
4007   guarantee(polling_page != NULL, "Reserve Failed for polling page");
4008 
4009   address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
4010   guarantee(return_page != NULL, "Commit Failed for polling page");
4011 
4012   os::set_polling_page(polling_page);
4013   log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
4014 
4015   if (!UseMembar) {
4016     address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
4017     guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
4018 
4019     return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
4020     guarantee(return_page != NULL, "Commit Failed for memory serialize page");
4021 
4022     os::set_memory_serialize_page(mem_serialize_page);
4023     log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
4024   }
4025 
4026   // Setup Windows Exceptions
4027 
4028   // for debugging float code generation bugs
4029   if (ForceFloatExceptions) {
4030 #ifndef  _WIN64
4031     static long fp_control_word = 0;
4032     __asm { fstcw fp_control_word }
4033     // see Intel PPro Manual, Vol. 2, p 7-16
4034     const long precision = 0x20;
4035     const long underflow = 0x10;
4036     const long overflow  = 0x08;
4037     const long zero_div  = 0x04;
4038     const long denorm    = 0x02;
4039     const long invalid   = 0x01;
4040     fp_control_word |= invalid;
4041     __asm { fldcw fp_control_word }
4042 #endif
4043   }
4044 
4045   // If stack_commit_size is 0, windows will reserve the default size,
4046   // but only commit a small portion of it.
4047   size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
4048   size_t default_reserve_size = os::win32::default_stack_size();
4049   size_t actual_reserve_size = stack_commit_size;
4050   if (stack_commit_size < default_reserve_size) {
4051     // If stack_commit_size == 0, we want this too
4052     actual_reserve_size = default_reserve_size;
4053   }
4054 
4055   // Check minimum allowable stack size for thread creation and to initialize
4056   // the java system classes, including StackOverflowError - depends on page
4057   // size.  Add two 4K pages for compiler2 recursion in main thread.
4058   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4059   // class initialization depending on 32 or 64 bit VM.
4060   size_t min_stack_allowed =
4061             (size_t)(JavaThread::stack_guard_zone_size() +
4062                      JavaThread::stack_shadow_zone_size() +
4063                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4064 
4065   min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
4066 
4067   if (actual_reserve_size < min_stack_allowed) {
4068     tty->print_cr("\nThe Java thread stack size specified is too small. "
4069                   "Specify at least %dk",
4070                   min_stack_allowed / K);
4071     return JNI_ERR;
4072   }
4073 
4074   JavaThread::set_stack_size_at_create(stack_commit_size);
4075 
4076   // Calculate theoretical max. size of Threads to guard gainst artifical
4077   // out-of-memory situations, where all available address-space has been
4078   // reserved by thread stacks.
4079   assert(actual_reserve_size != 0, "Must have a stack");
4080 
4081   // Calculate the thread limit when we should start doing Virtual Memory
4082   // banging. Currently when the threads will have used all but 200Mb of space.
4083   //
4084   // TODO: consider performing a similar calculation for commit size instead
4085   // as reserve size, since on a 64-bit platform we'll run into that more
4086   // often than running out of virtual memory space.  We can use the
4087   // lower value of the two calculations as the os_thread_limit.
4088   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4089   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4090 
4091   // at exit methods are called in the reverse order of their registration.
4092   // there is no limit to the number of functions registered. atexit does
4093   // not set errno.
4094 
4095   if (PerfAllowAtExitRegistration) {
4096     // only register atexit functions if PerfAllowAtExitRegistration is set.
4097     // atexit functions can be delayed until process exit time, which
4098     // can be problematic for embedded VM situations. Embedded VMs should
4099     // call DestroyJavaVM() to assure that VM resources are released.
4100 
4101     // note: perfMemory_exit_helper atexit function may be removed in
4102     // the future if the appropriate cleanup code can be added to the
4103     // VM_Exit VMOperation's doit method.
4104     if (atexit(perfMemory_exit_helper) != 0) {
4105       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4106     }
4107   }
4108 
4109 #ifndef _WIN64
4110   // Print something if NX is enabled (win32 on AMD64)
4111   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4112 #endif
4113 
4114   // initialize thread priority policy
4115   prio_init();
4116 
4117   if (UseNUMA && !ForceNUMA) {
4118     UseNUMA = false; // We don't fully support this yet
4119   }
4120 
4121   if (UseNUMAInterleaving) {
4122     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4123     bool success = numa_interleaving_init();
4124     if (!success) UseNUMAInterleaving = false;
4125   }
4126 
4127   if (initSock() != JNI_OK) {
4128     return JNI_ERR;
4129   }
4130 
4131   return JNI_OK;
4132 }
4133 
4134 // Mark the polling page as unreadable
4135 void os::make_polling_page_unreadable(void) {
4136   DWORD old_status;
4137   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4138                       PAGE_NOACCESS, &old_status)) {
4139     fatal("Could not disable polling page");
4140   }
4141 }
4142 
4143 // Mark the polling page as readable
4144 void os::make_polling_page_readable(void) {
4145   DWORD old_status;
4146   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4147                       PAGE_READONLY, &old_status)) {
4148     fatal("Could not enable polling page");
4149   }
4150 }
4151 
4152 
4153 int os::stat(const char *path, struct stat *sbuf) {
4154   char pathbuf[MAX_PATH];
4155   if (strlen(path) > MAX_PATH - 1) {
4156     errno = ENAMETOOLONG;
4157     return -1;
4158   }
4159   os::native_path(strcpy(pathbuf, path));
4160   int ret = ::stat(pathbuf, sbuf);
4161   if (sbuf != NULL && UseUTCFileTimestamp) {
4162     // Fix for 6539723.  st_mtime returned from stat() is dependent on
4163     // the system timezone and so can return different values for the
4164     // same file if/when daylight savings time changes.  This adjustment
4165     // makes sure the same timestamp is returned regardless of the TZ.
4166     //
4167     // See:
4168     // http://msdn.microsoft.com/library/
4169     //   default.asp?url=/library/en-us/sysinfo/base/
4170     //   time_zone_information_str.asp
4171     // and
4172     // http://msdn.microsoft.com/library/default.asp?url=
4173     //   /library/en-us/sysinfo/base/settimezoneinformation.asp
4174     //
4175     // NOTE: there is a insidious bug here:  If the timezone is changed
4176     // after the call to stat() but before 'GetTimeZoneInformation()', then
4177     // the adjustment we do here will be wrong and we'll return the wrong
4178     // value (which will likely end up creating an invalid class data
4179     // archive).  Absent a better API for this, or some time zone locking
4180     // mechanism, we'll have to live with this risk.
4181     TIME_ZONE_INFORMATION tz;
4182     DWORD tzid = GetTimeZoneInformation(&tz);
4183     int daylightBias =
4184       (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
4185     sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4186   }
4187   return ret;
4188 }
4189 
4190 
4191 #define FT2INT64(ft) \
4192   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4193 
4194 
4195 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4196 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4197 // of a thread.
4198 //
4199 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4200 // the fast estimate available on the platform.
4201 
4202 // current_thread_cpu_time() is not optimized for Windows yet
4203 jlong os::current_thread_cpu_time() {
4204   // return user + sys since the cost is the same
4205   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4206 }
4207 
4208 jlong os::thread_cpu_time(Thread* thread) {
4209   // consistent with what current_thread_cpu_time() returns.
4210   return os::thread_cpu_time(thread, true /* user+sys */);
4211 }
4212 
4213 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4214   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4215 }
4216 
4217 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4218   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4219   // If this function changes, os::is_thread_cpu_time_supported() should too
4220   FILETIME CreationTime;
4221   FILETIME ExitTime;
4222   FILETIME KernelTime;
4223   FILETIME UserTime;
4224 
4225   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4226                       &ExitTime, &KernelTime, &UserTime) == 0) {
4227     return -1;
4228   } else if (user_sys_cpu_time) {
4229     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4230   } else {
4231     return FT2INT64(UserTime) * 100;
4232   }
4233 }
4234 
4235 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4236   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4237   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4238   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4239   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4240 }
4241 
4242 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4243   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4244   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4245   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4246   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4247 }
4248 
4249 bool os::is_thread_cpu_time_supported() {
4250   // see os::thread_cpu_time
4251   FILETIME CreationTime;
4252   FILETIME ExitTime;
4253   FILETIME KernelTime;
4254   FILETIME UserTime;
4255 
4256   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4257                       &KernelTime, &UserTime) == 0) {
4258     return false;
4259   } else {
4260     return true;
4261   }
4262 }
4263 
4264 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4265 // It does have primitives (PDH API) to get CPU usage and run queue length.
4266 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4267 // If we wanted to implement loadavg on Windows, we have a few options:
4268 //
4269 // a) Query CPU usage and run queue length and "fake" an answer by
4270 //    returning the CPU usage if it's under 100%, and the run queue
4271 //    length otherwise.  It turns out that querying is pretty slow
4272 //    on Windows, on the order of 200 microseconds on a fast machine.
4273 //    Note that on the Windows the CPU usage value is the % usage
4274 //    since the last time the API was called (and the first call
4275 //    returns 100%), so we'd have to deal with that as well.
4276 //
4277 // b) Sample the "fake" answer using a sampling thread and store
4278 //    the answer in a global variable.  The call to loadavg would
4279 //    just return the value of the global, avoiding the slow query.
4280 //
4281 // c) Sample a better answer using exponential decay to smooth the
4282 //    value.  This is basically the algorithm used by UNIX kernels.
4283 //
4284 // Note that sampling thread starvation could affect both (b) and (c).
4285 int os::loadavg(double loadavg[], int nelem) {
4286   return -1;
4287 }
4288 
4289 
4290 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4291 bool os::dont_yield() {
4292   return DontYieldALot;
4293 }
4294 
4295 // This method is a slightly reworked copy of JDK's sysOpen
4296 // from src/windows/hpi/src/sys_api_md.c
4297 
4298 int os::open(const char *path, int oflag, int mode) {
4299   char pathbuf[MAX_PATH];
4300 
4301   if (strlen(path) > MAX_PATH - 1) {
4302     errno = ENAMETOOLONG;
4303     return -1;
4304   }
4305   os::native_path(strcpy(pathbuf, path));
4306   return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4307 }
4308 
4309 FILE* os::open(int fd, const char* mode) {
4310   return ::_fdopen(fd, mode);
4311 }
4312 
4313 // Is a (classpath) directory empty?
4314 bool os::dir_is_empty(const char* path) {
4315   WIN32_FIND_DATA fd;
4316   HANDLE f = FindFirstFile(path, &fd);
4317   if (f == INVALID_HANDLE_VALUE) {
4318     return true;
4319   }
4320   FindClose(f);
4321   return false;
4322 }
4323 
4324 // create binary file, rewriting existing file if required
4325 int os::create_binary_file(const char* path, bool rewrite_existing) {
4326   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4327   if (!rewrite_existing) {
4328     oflags |= _O_EXCL;
4329   }
4330   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4331 }
4332 
4333 // return current position of file pointer
4334 jlong os::current_file_offset(int fd) {
4335   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4336 }
4337 
4338 // move file pointer to the specified offset
4339 jlong os::seek_to_file_offset(int fd, jlong offset) {
4340   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4341 }
4342 
4343 
4344 jlong os::lseek(int fd, jlong offset, int whence) {
4345   return (jlong) ::_lseeki64(fd, offset, whence);
4346 }
4347 
4348 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4349   OVERLAPPED ov;
4350   DWORD nread;
4351   BOOL result;
4352 
4353   ZeroMemory(&ov, sizeof(ov));
4354   ov.Offset = (DWORD)offset;
4355   ov.OffsetHigh = (DWORD)(offset >> 32);
4356 
4357   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4358 
4359   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4360 
4361   return result ? nread : 0;
4362 }
4363 
4364 
4365 // This method is a slightly reworked copy of JDK's sysNativePath
4366 // from src/windows/hpi/src/path_md.c
4367 
4368 // Convert a pathname to native format.  On win32, this involves forcing all
4369 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4370 // sometimes rejects '/') and removing redundant separators.  The input path is
4371 // assumed to have been converted into the character encoding used by the local
4372 // system.  Because this might be a double-byte encoding, care is taken to
4373 // treat double-byte lead characters correctly.
4374 //
4375 // This procedure modifies the given path in place, as the result is never
4376 // longer than the original.  There is no error return; this operation always
4377 // succeeds.
4378 char * os::native_path(char *path) {
4379   char *src = path, *dst = path, *end = path;
4380   char *colon = NULL;  // If a drive specifier is found, this will
4381                        // point to the colon following the drive letter
4382 
4383   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4384   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4385           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4386 
4387   // Check for leading separators
4388 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4389   while (isfilesep(*src)) {
4390     src++;
4391   }
4392 
4393   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4394     // Remove leading separators if followed by drive specifier.  This
4395     // hack is necessary to support file URLs containing drive
4396     // specifiers (e.g., "file://c:/path").  As a side effect,
4397     // "/c:/path" can be used as an alternative to "c:/path".
4398     *dst++ = *src++;
4399     colon = dst;
4400     *dst++ = ':';
4401     src++;
4402   } else {
4403     src = path;
4404     if (isfilesep(src[0]) && isfilesep(src[1])) {
4405       // UNC pathname: Retain first separator; leave src pointed at
4406       // second separator so that further separators will be collapsed
4407       // into the second separator.  The result will be a pathname
4408       // beginning with "\\\\" followed (most likely) by a host name.
4409       src = dst = path + 1;
4410       path[0] = '\\';     // Force first separator to '\\'
4411     }
4412   }
4413 
4414   end = dst;
4415 
4416   // Remove redundant separators from remainder of path, forcing all
4417   // separators to be '\\' rather than '/'. Also, single byte space
4418   // characters are removed from the end of the path because those
4419   // are not legal ending characters on this operating system.
4420   //
4421   while (*src != '\0') {
4422     if (isfilesep(*src)) {
4423       *dst++ = '\\'; src++;
4424       while (isfilesep(*src)) src++;
4425       if (*src == '\0') {
4426         // Check for trailing separator
4427         end = dst;
4428         if (colon == dst - 2) break;  // "z:\\"
4429         if (dst == path + 1) break;   // "\\"
4430         if (dst == path + 2 && isfilesep(path[0])) {
4431           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4432           // beginning of a UNC pathname.  Even though it is not, by
4433           // itself, a valid UNC pathname, we leave it as is in order
4434           // to be consistent with the path canonicalizer as well
4435           // as the win32 APIs, which treat this case as an invalid
4436           // UNC pathname rather than as an alias for the root
4437           // directory of the current drive.
4438           break;
4439         }
4440         end = --dst;  // Path does not denote a root directory, so
4441                       // remove trailing separator
4442         break;
4443       }
4444       end = dst;
4445     } else {
4446       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4447         *dst++ = *src++;
4448         if (*src) *dst++ = *src++;
4449         end = dst;
4450       } else {  // Copy a single-byte character
4451         char c = *src++;
4452         *dst++ = c;
4453         // Space is not a legal ending character
4454         if (c != ' ') end = dst;
4455       }
4456     }
4457   }
4458 
4459   *end = '\0';
4460 
4461   // For "z:", add "." to work around a bug in the C runtime library
4462   if (colon == dst - 1) {
4463     path[2] = '.';
4464     path[3] = '\0';
4465   }
4466 
4467   return path;
4468 }
4469 
4470 // This code is a copy of JDK's sysSetLength
4471 // from src/windows/hpi/src/sys_api_md.c
4472 
4473 int os::ftruncate(int fd, jlong length) {
4474   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4475   long high = (long)(length >> 32);
4476   DWORD ret;
4477 
4478   if (h == (HANDLE)(-1)) {
4479     return -1;
4480   }
4481 
4482   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4483   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4484     return -1;
4485   }
4486 
4487   if (::SetEndOfFile(h) == FALSE) {
4488     return -1;
4489   }
4490 
4491   return 0;
4492 }
4493 
4494 int os::get_fileno(FILE* fp) {
4495   return _fileno(fp);
4496 }
4497 
4498 // This code is a copy of JDK's sysSync
4499 // from src/windows/hpi/src/sys_api_md.c
4500 // except for the legacy workaround for a bug in Win 98
4501 
4502 int os::fsync(int fd) {
4503   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4504 
4505   if ((!::FlushFileBuffers(handle)) &&
4506       (GetLastError() != ERROR_ACCESS_DENIED)) {
4507     // from winerror.h
4508     return -1;
4509   }
4510   return 0;
4511 }
4512 
4513 static int nonSeekAvailable(int, long *);
4514 static int stdinAvailable(int, long *);
4515 
4516 #define S_ISCHR(mode)   (((mode) & _S_IFCHR) == _S_IFCHR)
4517 #define S_ISFIFO(mode)  (((mode) & _S_IFIFO) == _S_IFIFO)
4518 
4519 // This code is a copy of JDK's sysAvailable
4520 // from src/windows/hpi/src/sys_api_md.c
4521 
4522 int os::available(int fd, jlong *bytes) {
4523   jlong cur, end;
4524   struct _stati64 stbuf64;
4525 
4526   if (::_fstati64(fd, &stbuf64) >= 0) {
4527     int mode = stbuf64.st_mode;
4528     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4529       int ret;
4530       long lpbytes;
4531       if (fd == 0) {
4532         ret = stdinAvailable(fd, &lpbytes);
4533       } else {
4534         ret = nonSeekAvailable(fd, &lpbytes);
4535       }
4536       (*bytes) = (jlong)(lpbytes);
4537       return ret;
4538     }
4539     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4540       return FALSE;
4541     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4542       return FALSE;
4543     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4544       return FALSE;
4545     }
4546     *bytes = end - cur;
4547     return TRUE;
4548   } else {
4549     return FALSE;
4550   }
4551 }
4552 
4553 void os::flockfile(FILE* fp) {
4554   _lock_file(fp);
4555 }
4556 
4557 void os::funlockfile(FILE* fp) {
4558   _unlock_file(fp);
4559 }
4560 
4561 // This code is a copy of JDK's nonSeekAvailable
4562 // from src/windows/hpi/src/sys_api_md.c
4563 
4564 static int nonSeekAvailable(int fd, long *pbytes) {
4565   // This is used for available on non-seekable devices
4566   // (like both named and anonymous pipes, such as pipes
4567   //  connected to an exec'd process).
4568   // Standard Input is a special case.
4569   HANDLE han;
4570 
4571   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4572     return FALSE;
4573   }
4574 
4575   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4576     // PeekNamedPipe fails when at EOF.  In that case we
4577     // simply make *pbytes = 0 which is consistent with the
4578     // behavior we get on Solaris when an fd is at EOF.
4579     // The only alternative is to raise an Exception,
4580     // which isn't really warranted.
4581     //
4582     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4583       return FALSE;
4584     }
4585     *pbytes = 0;
4586   }
4587   return TRUE;
4588 }
4589 
4590 #define MAX_INPUT_EVENTS 2000
4591 
4592 // This code is a copy of JDK's stdinAvailable
4593 // from src/windows/hpi/src/sys_api_md.c
4594 
4595 static int stdinAvailable(int fd, long *pbytes) {
4596   HANDLE han;
4597   DWORD numEventsRead = 0;  // Number of events read from buffer
4598   DWORD numEvents = 0;      // Number of events in buffer
4599   DWORD i = 0;              // Loop index
4600   DWORD curLength = 0;      // Position marker
4601   DWORD actualLength = 0;   // Number of bytes readable
4602   BOOL error = FALSE;       // Error holder
4603   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4604 
4605   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4606     return FALSE;
4607   }
4608 
4609   // Construct an array of input records in the console buffer
4610   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4611   if (error == 0) {
4612     return nonSeekAvailable(fd, pbytes);
4613   }
4614 
4615   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4616   if (numEvents > MAX_INPUT_EVENTS) {
4617     numEvents = MAX_INPUT_EVENTS;
4618   }
4619 
4620   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4621   if (lpBuffer == NULL) {
4622     return FALSE;
4623   }
4624 
4625   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4626   if (error == 0) {
4627     os::free(lpBuffer);
4628     return FALSE;
4629   }
4630 
4631   // Examine input records for the number of bytes available
4632   for (i=0; i<numEvents; i++) {
4633     if (lpBuffer[i].EventType == KEY_EVENT) {
4634 
4635       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4636                                       &(lpBuffer[i].Event);
4637       if (keyRecord->bKeyDown == TRUE) {
4638         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4639         curLength++;
4640         if (*keyPressed == '\r') {
4641           actualLength = curLength;
4642         }
4643       }
4644     }
4645   }
4646 
4647   if (lpBuffer != NULL) {
4648     os::free(lpBuffer);
4649   }
4650 
4651   *pbytes = (long) actualLength;
4652   return TRUE;
4653 }
4654 
4655 // Map a block of memory.
4656 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4657                         char *addr, size_t bytes, bool read_only,
4658                         bool allow_exec) {
4659   HANDLE hFile;
4660   char* base;
4661 
4662   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4663                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4664   if (hFile == NULL) {
4665     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4666     return NULL;
4667   }
4668 
4669   if (allow_exec) {
4670     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4671     // unless it comes from a PE image (which the shared archive is not.)
4672     // Even VirtualProtect refuses to give execute access to mapped memory
4673     // that was not previously executable.
4674     //
4675     // Instead, stick the executable region in anonymous memory.  Yuck.
4676     // Penalty is that ~4 pages will not be shareable - in the future
4677     // we might consider DLLizing the shared archive with a proper PE
4678     // header so that mapping executable + sharing is possible.
4679 
4680     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4681                                 PAGE_READWRITE);
4682     if (base == NULL) {
4683       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4684       CloseHandle(hFile);
4685       return NULL;
4686     }
4687 
4688     DWORD bytes_read;
4689     OVERLAPPED overlapped;
4690     overlapped.Offset = (DWORD)file_offset;
4691     overlapped.OffsetHigh = 0;
4692     overlapped.hEvent = NULL;
4693     // ReadFile guarantees that if the return value is true, the requested
4694     // number of bytes were read before returning.
4695     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4696     if (!res) {
4697       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4698       release_memory(base, bytes);
4699       CloseHandle(hFile);
4700       return NULL;
4701     }
4702   } else {
4703     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4704                                     NULL /* file_name */);
4705     if (hMap == NULL) {
4706       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4707       CloseHandle(hFile);
4708       return NULL;
4709     }
4710 
4711     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4712     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4713                                   (DWORD)bytes, addr);
4714     if (base == NULL) {
4715       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4716       CloseHandle(hMap);
4717       CloseHandle(hFile);
4718       return NULL;
4719     }
4720 
4721     if (CloseHandle(hMap) == 0) {
4722       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4723       CloseHandle(hFile);
4724       return base;
4725     }
4726   }
4727 
4728   if (allow_exec) {
4729     DWORD old_protect;
4730     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4731     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4732 
4733     if (!res) {
4734       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4735       // Don't consider this a hard error, on IA32 even if the
4736       // VirtualProtect fails, we should still be able to execute
4737       CloseHandle(hFile);
4738       return base;
4739     }
4740   }
4741 
4742   if (CloseHandle(hFile) == 0) {
4743     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4744     return base;
4745   }
4746 
4747   return base;
4748 }
4749 
4750 
4751 // Remap a block of memory.
4752 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4753                           char *addr, size_t bytes, bool read_only,
4754                           bool allow_exec) {
4755   // This OS does not allow existing memory maps to be remapped so we
4756   // have to unmap the memory before we remap it.
4757   if (!os::unmap_memory(addr, bytes)) {
4758     return NULL;
4759   }
4760 
4761   // There is a very small theoretical window between the unmap_memory()
4762   // call above and the map_memory() call below where a thread in native
4763   // code may be able to access an address that is no longer mapped.
4764 
4765   return os::map_memory(fd, file_name, file_offset, addr, bytes,
4766                         read_only, allow_exec);
4767 }
4768 
4769 
4770 // Unmap a block of memory.
4771 // Returns true=success, otherwise false.
4772 
4773 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4774   MEMORY_BASIC_INFORMATION mem_info;
4775   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4776     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
4777     return false;
4778   }
4779 
4780   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4781   // Instead, executable region was allocated using VirtualAlloc(). See
4782   // pd_map_memory() above.
4783   //
4784   // The following flags should match the 'exec_access' flages used for
4785   // VirtualProtect() in pd_map_memory().
4786   if (mem_info.Protect == PAGE_EXECUTE_READ ||
4787       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4788     return pd_release_memory(addr, bytes);
4789   }
4790 
4791   BOOL result = UnmapViewOfFile(addr);
4792   if (result == 0) {
4793     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
4794     return false;
4795   }
4796   return true;
4797 }
4798 
4799 void os::pause() {
4800   char filename[MAX_PATH];
4801   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4802     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4803   } else {
4804     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4805   }
4806 
4807   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4808   if (fd != -1) {
4809     struct stat buf;
4810     ::close(fd);
4811     while (::stat(filename, &buf) == 0) {
4812       Sleep(100);
4813     }
4814   } else {
4815     jio_fprintf(stderr,
4816                 "Could not open pause file '%s', continuing immediately.\n", filename);
4817   }
4818 }
4819 
4820 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4821   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4822 }
4823 
4824 // See the caveats for this class in os_windows.hpp
4825 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4826 // into this method and returns false. If no OS EXCEPTION was raised, returns
4827 // true.
4828 // The callback is supposed to provide the method that should be protected.
4829 //
4830 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4831   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4832   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4833          "crash_protection already set?");
4834 
4835   bool success = true;
4836   __try {
4837     WatcherThread::watcher_thread()->set_crash_protection(this);
4838     cb.call();
4839   } __except(EXCEPTION_EXECUTE_HANDLER) {
4840     // only for protection, nothing to do
4841     success = false;
4842   }
4843   WatcherThread::watcher_thread()->set_crash_protection(NULL);
4844   return success;
4845 }
4846 
4847 // An Event wraps a win32 "CreateEvent" kernel handle.
4848 //
4849 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
4850 //
4851 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
4852 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
4853 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
4854 //     In addition, an unpark() operation might fetch the handle field, but the
4855 //     event could recycle between the fetch and the SetEvent() operation.
4856 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
4857 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
4858 //     on an stale but recycled handle would be harmless, but in practice this might
4859 //     confuse other non-Sun code, so it's not a viable approach.
4860 //
4861 // 2:  Once a win32 event handle is associated with an Event, it remains associated
4862 //     with the Event.  The event handle is never closed.  This could be construed
4863 //     as handle leakage, but only up to the maximum # of threads that have been extant
4864 //     at any one time.  This shouldn't be an issue, as windows platforms typically
4865 //     permit a process to have hundreds of thousands of open handles.
4866 //
4867 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
4868 //     and release unused handles.
4869 //
4870 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
4871 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
4872 //
4873 // 5.  Use an RCU-like mechanism (Read-Copy Update).
4874 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
4875 //
4876 // We use (2).
4877 //
4878 // TODO-FIXME:
4879 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
4880 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
4881 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
4882 // 3.  Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
4883 //     into a single win32 CreateEvent() handle.
4884 //
4885 // Assumption:
4886 //    Only one parker can exist on an event, which is why we allocate
4887 //    them per-thread. Multiple unparkers can coexist.
4888 //
4889 // _Event transitions in park()
4890 //   -1 => -1 : illegal
4891 //    1 =>  0 : pass - return immediately
4892 //    0 => -1 : block; then set _Event to 0 before returning
4893 //
4894 // _Event transitions in unpark()
4895 //    0 => 1 : just return
4896 //    1 => 1 : just return
4897 //   -1 => either 0 or 1; must signal target thread
4898 //         That is, we can safely transition _Event from -1 to either
4899 //         0 or 1.
4900 //
4901 // _Event serves as a restricted-range semaphore.
4902 //   -1 : thread is blocked, i.e. there is a waiter
4903 //    0 : neutral: thread is running or ready,
4904 //        could have been signaled after a wait started
4905 //    1 : signaled - thread is running or ready
4906 //
4907 // Another possible encoding of _Event would be with
4908 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
4909 //
4910 
4911 int os::PlatformEvent::park(jlong Millis) {
4912   // Transitions for _Event:
4913   //   -1 => -1 : illegal
4914   //    1 =>  0 : pass - return immediately
4915   //    0 => -1 : block; then set _Event to 0 before returning
4916 
4917   guarantee(_ParkHandle != NULL , "Invariant");
4918   guarantee(Millis > 0          , "Invariant");
4919 
4920   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
4921   // the initial park() operation.
4922   // Consider: use atomic decrement instead of CAS-loop
4923 
4924   int v;
4925   for (;;) {
4926     v = _Event;
4927     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
4928   }
4929   guarantee((v == 0) || (v == 1), "invariant");
4930   if (v != 0) return OS_OK;
4931 
4932   // Do this the hard way by blocking ...
4933   // TODO: consider a brief spin here, gated on the success of recent
4934   // spin attempts by this thread.
4935   //
4936   // We decompose long timeouts into series of shorter timed waits.
4937   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
4938   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
4939   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
4940   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
4941   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
4942   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
4943   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
4944   // for the already waited time.  This policy does not admit any new outcomes.
4945   // In the future, however, we might want to track the accumulated wait time and
4946   // adjust Millis accordingly if we encounter a spurious wakeup.
4947 
4948   const int MAXTIMEOUT = 0x10000000;
4949   DWORD rv = WAIT_TIMEOUT;
4950   while (_Event < 0 && Millis > 0) {
4951     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
4952     if (Millis > MAXTIMEOUT) {
4953       prd = MAXTIMEOUT;
4954     }
4955     rv = ::WaitForSingleObject(_ParkHandle, prd);
4956     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
4957     if (rv == WAIT_TIMEOUT) {
4958       Millis -= prd;
4959     }
4960   }
4961   v = _Event;
4962   _Event = 0;
4963   // see comment at end of os::PlatformEvent::park() below:
4964   OrderAccess::fence();
4965   // If we encounter a nearly simultanous timeout expiry and unpark()
4966   // we return OS_OK indicating we awoke via unpark().
4967   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
4968   return (v >= 0) ? OS_OK : OS_TIMEOUT;
4969 }
4970 
4971 void os::PlatformEvent::park() {
4972   // Transitions for _Event:
4973   //   -1 => -1 : illegal
4974   //    1 =>  0 : pass - return immediately
4975   //    0 => -1 : block; then set _Event to 0 before returning
4976 
4977   guarantee(_ParkHandle != NULL, "Invariant");
4978   // Invariant: Only the thread associated with the Event/PlatformEvent
4979   // may call park().
4980   // Consider: use atomic decrement instead of CAS-loop
4981   int v;
4982   for (;;) {
4983     v = _Event;
4984     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
4985   }
4986   guarantee((v == 0) || (v == 1), "invariant");
4987   if (v != 0) return;
4988 
4989   // Do this the hard way by blocking ...
4990   // TODO: consider a brief spin here, gated on the success of recent
4991   // spin attempts by this thread.
4992   while (_Event < 0) {
4993     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
4994     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
4995   }
4996 
4997   // Usually we'll find _Event == 0 at this point, but as
4998   // an optional optimization we clear it, just in case can
4999   // multiple unpark() operations drove _Event up to 1.
5000   _Event = 0;
5001   OrderAccess::fence();
5002   guarantee(_Event >= 0, "invariant");
5003 }
5004 
5005 void os::PlatformEvent::unpark() {
5006   guarantee(_ParkHandle != NULL, "Invariant");
5007 
5008   // Transitions for _Event:
5009   //    0 => 1 : just return
5010   //    1 => 1 : just return
5011   //   -1 => either 0 or 1; must signal target thread
5012   //         That is, we can safely transition _Event from -1 to either
5013   //         0 or 1.
5014   // See also: "Semaphores in Plan 9" by Mullender & Cox
5015   //
5016   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5017   // that it will take two back-to-back park() calls for the owning
5018   // thread to block. This has the benefit of forcing a spurious return
5019   // from the first park() call after an unpark() call which will help
5020   // shake out uses of park() and unpark() without condition variables.
5021 
5022   if (Atomic::xchg(1, &_Event) >= 0) return;
5023 
5024   ::SetEvent(_ParkHandle);
5025 }
5026 
5027 
5028 // JSR166
5029 // -------------------------------------------------------
5030 
5031 // The Windows implementation of Park is very straightforward: Basic
5032 // operations on Win32 Events turn out to have the right semantics to
5033 // use them directly. We opportunistically resuse the event inherited
5034 // from Monitor.
5035 
5036 void Parker::park(bool isAbsolute, jlong time) {
5037   guarantee(_ParkEvent != NULL, "invariant");
5038   // First, demultiplex/decode time arguments
5039   if (time < 0) { // don't wait
5040     return;
5041   } else if (time == 0 && !isAbsolute) {
5042     time = INFINITE;
5043   } else if (isAbsolute) {
5044     time -= os::javaTimeMillis(); // convert to relative time
5045     if (time <= 0) {  // already elapsed
5046       return;
5047     }
5048   } else { // relative
5049     time /= 1000000;  // Must coarsen from nanos to millis
5050     if (time == 0) {  // Wait for the minimal time unit if zero
5051       time = 1;
5052     }
5053   }
5054 
5055   JavaThread* thread = JavaThread::current();
5056 
5057   // Don't wait if interrupted or already triggered
5058   if (Thread::is_interrupted(thread, false) ||
5059       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5060     ResetEvent(_ParkEvent);
5061     return;
5062   } else {
5063     ThreadBlockInVM tbivm(thread);
5064     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5065     thread->set_suspend_equivalent();
5066 
5067     WaitForSingleObject(_ParkEvent, time);
5068     ResetEvent(_ParkEvent);
5069 
5070     // If externally suspended while waiting, re-suspend
5071     if (thread->handle_special_suspend_equivalent_condition()) {
5072       thread->java_suspend_self();
5073     }
5074   }
5075 }
5076 
5077 void Parker::unpark() {
5078   guarantee(_ParkEvent != NULL, "invariant");
5079   SetEvent(_ParkEvent);
5080 }
5081 
5082 // Run the specified command in a separate process. Return its exit value,
5083 // or -1 on failure (e.g. can't create a new process).
5084 int os::fork_and_exec(char* cmd) {
5085   STARTUPINFO si;
5086   PROCESS_INFORMATION pi;
5087   DWORD exit_code;
5088 
5089   char * cmd_string;
5090   char * cmd_prefix = "cmd /C ";
5091   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5092   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5093   if (cmd_string == NULL) {
5094     return -1;
5095   }
5096   cmd_string[0] = '\0';
5097   strcat(cmd_string, cmd_prefix);
5098   strcat(cmd_string, cmd);
5099 
5100   // now replace all '\n' with '&'
5101   char * substring = cmd_string;
5102   while ((substring = strchr(substring, '\n')) != NULL) {
5103     substring[0] = '&';
5104     substring++;
5105   }
5106   memset(&si, 0, sizeof(si));
5107   si.cb = sizeof(si);
5108   memset(&pi, 0, sizeof(pi));
5109   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5110                             cmd_string,    // command line
5111                             NULL,   // process security attribute
5112                             NULL,   // thread security attribute
5113                             TRUE,   // inherits system handles
5114                             0,      // no creation flags
5115                             NULL,   // use parent's environment block
5116                             NULL,   // use parent's starting directory
5117                             &si,    // (in) startup information
5118                             &pi);   // (out) process information
5119 
5120   if (rslt) {
5121     // Wait until child process exits.
5122     WaitForSingleObject(pi.hProcess, INFINITE);
5123 
5124     GetExitCodeProcess(pi.hProcess, &exit_code);
5125 
5126     // Close process and thread handles.
5127     CloseHandle(pi.hProcess);
5128     CloseHandle(pi.hThread);
5129   } else {
5130     exit_code = -1;
5131   }
5132 
5133   FREE_C_HEAP_ARRAY(char, cmd_string);
5134   return (int)exit_code;
5135 }
5136 
5137 bool os::find(address addr, outputStream* st) {
5138   int offset = -1;
5139   bool result = false;
5140   char buf[256];
5141   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5142     st->print(PTR_FORMAT " ", addr);
5143     if (strlen(buf) < sizeof(buf) - 1) {
5144       char* p = strrchr(buf, '\\');
5145       if (p) {
5146         st->print("%s", p + 1);
5147       } else {
5148         st->print("%s", buf);
5149       }
5150     } else {
5151         // The library name is probably truncated. Let's omit the library name.
5152         // See also JDK-8147512.
5153     }
5154     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5155       st->print("::%s + 0x%x", buf, offset);
5156     }
5157     st->cr();
5158     result = true;
5159   }
5160   return result;
5161 }
5162 
5163 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5164   DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5165 
5166   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
5167     JavaThread* thread = JavaThread::current();
5168     PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5169     address addr = (address) exceptionRecord->ExceptionInformation[1];
5170 
5171     if (os::is_memory_serialize_page(thread, addr)) {
5172       return EXCEPTION_CONTINUE_EXECUTION;
5173     }
5174   }
5175 
5176   return EXCEPTION_CONTINUE_SEARCH;
5177 }
5178 
5179 // We don't build a headless jre for Windows
5180 bool os::is_headless_jre() { return false; }
5181 
5182 static jint initSock() {
5183   WSADATA wsadata;
5184 
5185   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5186     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5187                 ::GetLastError());
5188     return JNI_ERR;
5189   }
5190   return JNI_OK;
5191 }
5192 
5193 struct hostent* os::get_host_by_name(char* name) {
5194   return (struct hostent*)gethostbyname(name);
5195 }
5196 
5197 int os::socket_close(int fd) {
5198   return ::closesocket(fd);
5199 }
5200 
5201 int os::socket(int domain, int type, int protocol) {
5202   return ::socket(domain, type, protocol);
5203 }
5204 
5205 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5206   return ::connect(fd, him, len);
5207 }
5208 
5209 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5210   return ::recv(fd, buf, (int)nBytes, flags);
5211 }
5212 
5213 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5214   return ::send(fd, buf, (int)nBytes, flags);
5215 }
5216 
5217 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5218   return ::send(fd, buf, (int)nBytes, flags);
5219 }
5220 
5221 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5222 #if defined(IA32)
5223   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5224 #elif defined (AMD64)
5225   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5226 #endif
5227 
5228 // returns true if thread could be suspended,
5229 // false otherwise
5230 static bool do_suspend(HANDLE* h) {
5231   if (h != NULL) {
5232     if (SuspendThread(*h) != ~0) {
5233       return true;
5234     }
5235   }
5236   return false;
5237 }
5238 
5239 // resume the thread
5240 // calling resume on an active thread is a no-op
5241 static void do_resume(HANDLE* h) {
5242   if (h != NULL) {
5243     ResumeThread(*h);
5244   }
5245 }
5246 
5247 // retrieve a suspend/resume context capable handle
5248 // from the tid. Caller validates handle return value.
5249 void get_thread_handle_for_extended_context(HANDLE* h,
5250                                             OSThread::thread_id_t tid) {
5251   if (h != NULL) {
5252     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5253   }
5254 }
5255 
5256 // Thread sampling implementation
5257 //
5258 void os::SuspendedThreadTask::internal_do_task() {
5259   CONTEXT    ctxt;
5260   HANDLE     h = NULL;
5261 
5262   // get context capable handle for thread
5263   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5264 
5265   // sanity
5266   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5267     return;
5268   }
5269 
5270   // suspend the thread
5271   if (do_suspend(&h)) {
5272     ctxt.ContextFlags = sampling_context_flags;
5273     // get thread context
5274     GetThreadContext(h, &ctxt);
5275     SuspendedThreadTaskContext context(_thread, &ctxt);
5276     // pass context to Thread Sampling impl
5277     do_task(context);
5278     // resume thread
5279     do_resume(&h);
5280   }
5281 
5282   // close handle
5283   CloseHandle(h);
5284 }
5285 
5286 bool os::start_debugging(char *buf, int buflen) {
5287   int len = (int)strlen(buf);
5288   char *p = &buf[len];
5289 
5290   jio_snprintf(p, buflen-len,
5291              "\n\n"
5292              "Do you want to debug the problem?\n\n"
5293              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5294              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5295              "Otherwise, select 'No' to abort...",
5296              os::current_process_id(), os::current_thread_id());
5297 
5298   bool yes = os::message_box("Unexpected Error", buf);
5299 
5300   if (yes) {
5301     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5302     // exception. If VM is running inside a debugger, the debugger will
5303     // catch the exception. Otherwise, the breakpoint exception will reach
5304     // the default windows exception handler, which can spawn a debugger and
5305     // automatically attach to the dying VM.
5306     os::breakpoint();
5307     yes = false;
5308   }
5309   return yes;
5310 }
5311 
5312 void* os::get_default_process_handle() {
5313   return (void*)GetModuleHandle(NULL);
5314 }
5315 
5316 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5317 // which is used to find statically linked in agents.
5318 // Additionally for windows, takes into account __stdcall names.
5319 // Parameters:
5320 //            sym_name: Symbol in library we are looking for
5321 //            lib_name: Name of library to look in, NULL for shared libs.
5322 //            is_absolute_path == true if lib_name is absolute path to agent
5323 //                                     such as "C:/a/b/L.dll"
5324 //            == false if only the base name of the library is passed in
5325 //               such as "L"
5326 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5327                                     bool is_absolute_path) {
5328   char *agent_entry_name;
5329   size_t len;
5330   size_t name_len;
5331   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5332   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5333   const char *start;
5334 
5335   if (lib_name != NULL) {
5336     len = name_len = strlen(lib_name);
5337     if (is_absolute_path) {
5338       // Need to strip path, prefix and suffix
5339       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5340         lib_name = ++start;
5341       } else {
5342         // Need to check for drive prefix
5343         if ((start = strchr(lib_name, ':')) != NULL) {
5344           lib_name = ++start;
5345         }
5346       }
5347       if (len <= (prefix_len + suffix_len)) {
5348         return NULL;
5349       }
5350       lib_name += prefix_len;
5351       name_len = strlen(lib_name) - suffix_len;
5352     }
5353   }
5354   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5355   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5356   if (agent_entry_name == NULL) {
5357     return NULL;
5358   }
5359   if (lib_name != NULL) {
5360     const char *p = strrchr(sym_name, '@');
5361     if (p != NULL && p != sym_name) {
5362       // sym_name == _Agent_OnLoad@XX
5363       strncpy(agent_entry_name, sym_name, (p - sym_name));
5364       agent_entry_name[(p-sym_name)] = '\0';
5365       // agent_entry_name == _Agent_OnLoad
5366       strcat(agent_entry_name, "_");
5367       strncat(agent_entry_name, lib_name, name_len);
5368       strcat(agent_entry_name, p);
5369       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5370     } else {
5371       strcpy(agent_entry_name, sym_name);
5372       strcat(agent_entry_name, "_");
5373       strncat(agent_entry_name, lib_name, name_len);
5374     }
5375   } else {
5376     strcpy(agent_entry_name, sym_name);
5377   }
5378   return agent_entry_name;
5379 }
5380 
5381 #ifndef PRODUCT
5382 
5383 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5384 // contiguous memory block at a particular address.
5385 // The test first tries to find a good approximate address to allocate at by using the same
5386 // method to allocate some memory at any address. The test then tries to allocate memory in
5387 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5388 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5389 // the previously allocated memory is available for allocation. The only actual failure
5390 // that is reported is when the test tries to allocate at a particular location but gets a
5391 // different valid one. A NULL return value at this point is not considered an error but may
5392 // be legitimate.
5393 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5394 void TestReserveMemorySpecial_test() {
5395   if (!UseLargePages) {
5396     if (VerboseInternalVMTests) {
5397       tty->print("Skipping test because large pages are disabled");
5398     }
5399     return;
5400   }
5401   // save current value of globals
5402   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5403   bool old_use_numa_interleaving = UseNUMAInterleaving;
5404 
5405   // set globals to make sure we hit the correct code path
5406   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5407 
5408   // do an allocation at an address selected by the OS to get a good one.
5409   const size_t large_allocation_size = os::large_page_size() * 4;
5410   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5411   if (result == NULL) {
5412     if (VerboseInternalVMTests) {
5413       tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5414                           large_allocation_size);
5415     }
5416   } else {
5417     os::release_memory_special(result, large_allocation_size);
5418 
5419     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5420     // we managed to get it once.
5421     const size_t expected_allocation_size = os::large_page_size();
5422     char* expected_location = result + os::large_page_size();
5423     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5424     if (actual_location == NULL) {
5425       if (VerboseInternalVMTests) {
5426         tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5427                             expected_location, large_allocation_size);
5428       }
5429     } else {
5430       // release memory
5431       os::release_memory_special(actual_location, expected_allocation_size);
5432       // only now check, after releasing any memory to avoid any leaks.
5433       assert(actual_location == expected_location,
5434              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5435              expected_location, expected_allocation_size, actual_location);
5436     }
5437   }
5438 
5439   // restore globals
5440   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5441   UseNUMAInterleaving = old_use_numa_interleaving;
5442 }
5443 #endif // PRODUCT
5444 
5445 /*
5446   All the defined signal names for Windows.
5447 
5448   NOTE that not all of these names are accepted by FindSignal!
5449 
5450   For various reasons some of these may be rejected at runtime.
5451 
5452   Here are the names currently accepted by a user of sun.misc.Signal with
5453   1.4.1 (ignoring potential interaction with use of chaining, etc):
5454 
5455      (LIST TBD)
5456 
5457 */
5458 int os::get_signal_number(const char* name) {
5459   static const struct {
5460     char* name;
5461     int   number;
5462   } siglabels [] =
5463     // derived from version 6.0 VC98/include/signal.h
5464   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5465   "FPE",        SIGFPE,         // floating point exception
5466   "SEGV",       SIGSEGV,        // segment violation
5467   "INT",        SIGINT,         // interrupt
5468   "TERM",       SIGTERM,        // software term signal from kill
5469   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5470   "ILL",        SIGILL};        // illegal instruction
5471   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5472     if (strcmp(name, siglabels[i].name) == 0) {
5473       return siglabels[i].number;
5474     }
5475   }
5476   return -1;
5477 }
5478 
5479 // Fast current thread access
5480 
5481 int os::win32::_thread_ptr_offset = 0;
5482 
5483 static void call_wrapper_dummy() {}
5484 
5485 // We need to call the os_exception_wrapper once so that it sets
5486 // up the offset from FS of the thread pointer.
5487 void os::win32::initialize_thread_ptr_offset() {
5488   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5489                            NULL, NULL, NULL, NULL);
5490 }
--- EOF ---