1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent
  26 #define _WIN32_WINNT 0x500
  27 
  28 // no precompiled headers
  29 #include "classfile/classLoader.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/disassembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "jvm_windows.h"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/filemap.hpp"
  40 #include "mutex_windows.inline.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "os_share_windows.hpp"
  43 #include "prims/jniFastGetField.hpp"
  44 #include "prims/jvm.h"
  45 #include "prims/jvm_misc.hpp"
  46 #include "runtime/arguments.hpp"
  47 #include "runtime/extendedPC.hpp"
  48 #include "runtime/globals.hpp"
  49 #include "runtime/interfaceSupport.hpp"
  50 #include "runtime/java.hpp"
  51 #include "runtime/javaCalls.hpp"
  52 #include "runtime/mutexLocker.hpp"
  53 #include "runtime/objectMonitor.hpp"
  54 #include "runtime/osThread.hpp"
  55 #include "runtime/perfMemory.hpp"
  56 #include "runtime/sharedRuntime.hpp"
  57 #include "runtime/statSampler.hpp"
  58 #include "runtime/stubRoutines.hpp"
  59 #include "runtime/thread.inline.hpp"
  60 #include "runtime/threadCritical.hpp"
  61 #include "runtime/timer.hpp"
  62 #include "services/attachListener.hpp"
  63 #include "services/memTracker.hpp"
  64 #include "services/runtimeService.hpp"
  65 #include "utilities/decoder.hpp"
  66 #include "utilities/defaultStream.hpp"
  67 #include "utilities/events.hpp"
  68 #include "utilities/growableArray.hpp"
  69 #include "utilities/vmError.hpp"
  70 
  71 #ifdef _DEBUG
  72 #include <crtdbg.h>
  73 #endif
  74 
  75 
  76 #include <windows.h>
  77 #include <sys/types.h>
  78 #include <sys/stat.h>
  79 #include <sys/timeb.h>
  80 #include <objidl.h>
  81 #include <shlobj.h>
  82 
  83 #include <malloc.h>
  84 #include <signal.h>
  85 #include <direct.h>
  86 #include <errno.h>
  87 #include <fcntl.h>
  88 #include <io.h>
  89 #include <process.h>              // For _beginthreadex(), _endthreadex()
  90 #include <imagehlp.h>             // For os::dll_address_to_function_name
  91 /* for enumerating dll libraries */
  92 #include <vdmdbg.h>
  93 
  94 // for timer info max values which include all bits
  95 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
  96 
  97 // For DLL loading/load error detection
  98 // Values of PE COFF
  99 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 100 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 101 
 102 static HANDLE main_process;
 103 static HANDLE main_thread;
 104 static int    main_thread_id;
 105 
 106 static FILETIME process_creation_time;
 107 static FILETIME process_exit_time;
 108 static FILETIME process_user_time;
 109 static FILETIME process_kernel_time;
 110 
 111 #ifdef _M_IA64
 112 #define __CPU__ ia64
 113 #elif _M_AMD64
 114 #define __CPU__ amd64
 115 #else
 116 #define __CPU__ i486
 117 #endif
 118 
 119 // save DLL module handle, used by GetModuleFileName
 120 
 121 HINSTANCE vm_lib_handle;
 122 
 123 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 124   switch (reason) {
 125     case DLL_PROCESS_ATTACH:
 126       vm_lib_handle = hinst;
 127       if(ForceTimeHighResolution)
 128         timeBeginPeriod(1L);
 129       break;
 130     case DLL_PROCESS_DETACH:
 131       if(ForceTimeHighResolution)
 132         timeEndPeriod(1L);
 133       break;
 134     default:
 135       break;
 136   }
 137   return true;
 138 }
 139 
 140 static inline double fileTimeAsDouble(FILETIME* time) {
 141   const double high  = (double) ((unsigned int) ~0);
 142   const double split = 10000000.0;
 143   double result = (time->dwLowDateTime / split) +
 144                    time->dwHighDateTime * (high/split);
 145   return result;
 146 }
 147 
 148 // Implementation of os
 149 
 150 bool os::getenv(const char* name, char* buffer, int len) {
 151  int result = GetEnvironmentVariable(name, buffer, len);
 152  return result > 0 && result < len;
 153 }
 154 
 155 
 156 // No setuid programs under Windows.
 157 bool os::have_special_privileges() {
 158   return false;
 159 }
 160 
 161 
 162 // This method is  a periodic task to check for misbehaving JNI applications
 163 // under CheckJNI, we can add any periodic checks here.
 164 // For Windows at the moment does nothing
 165 void os::run_periodic_checks() {
 166   return;
 167 }
 168 
 169 // previous UnhandledExceptionFilter, if there is one
 170 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 171 
 172 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 173 void os::init_system_properties_values() {
 174   /* sysclasspath, java_home, dll_dir */
 175   {
 176       char *home_path;
 177       char *dll_path;
 178       char *pslash;
 179       char *bin = "\\bin";
 180       char home_dir[MAX_PATH];
 181 
 182       if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
 183           os::jvm_path(home_dir, sizeof(home_dir));
 184           // Found the full path to jvm.dll.
 185           // Now cut the path to <java_home>/jre if we can.
 186           *(strrchr(home_dir, '\\')) = '\0';  /* get rid of \jvm.dll */
 187           pslash = strrchr(home_dir, '\\');
 188           if (pslash != NULL) {
 189               *pslash = '\0';                 /* get rid of \{client|server} */
 190               pslash = strrchr(home_dir, '\\');
 191               if (pslash != NULL)
 192                   *pslash = '\0';             /* get rid of \bin */
 193           }
 194       }
 195 
 196       home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 197       if (home_path == NULL)
 198           return;
 199       strcpy(home_path, home_dir);
 200       Arguments::set_java_home(home_path);
 201 
 202       dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal);
 203       if (dll_path == NULL)
 204           return;
 205       strcpy(dll_path, home_dir);
 206       strcat(dll_path, bin);
 207       Arguments::set_dll_dir(dll_path);
 208 
 209       if (!set_boot_path('\\', ';'))
 210           return;
 211   }
 212 
 213   /* library_path */
 214   #define EXT_DIR "\\lib\\ext"
 215   #define BIN_DIR "\\bin"
 216   #define PACKAGE_DIR "\\Sun\\Java"
 217   {
 218     /* Win32 library search order (See the documentation for LoadLibrary):
 219      *
 220      * 1. The directory from which application is loaded.
 221      * 2. The system wide Java Extensions directory (Java only)
 222      * 3. System directory (GetSystemDirectory)
 223      * 4. Windows directory (GetWindowsDirectory)
 224      * 5. The PATH environment variable
 225      * 6. The current directory
 226      */
 227 
 228     char *library_path;
 229     char tmp[MAX_PATH];
 230     char *path_str = ::getenv("PATH");
 231 
 232     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 233         sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 234 
 235     library_path[0] = '\0';
 236 
 237     GetModuleFileName(NULL, tmp, sizeof(tmp));
 238     *(strrchr(tmp, '\\')) = '\0';
 239     strcat(library_path, tmp);
 240 
 241     GetWindowsDirectory(tmp, sizeof(tmp));
 242     strcat(library_path, ";");
 243     strcat(library_path, tmp);
 244     strcat(library_path, PACKAGE_DIR BIN_DIR);
 245 
 246     GetSystemDirectory(tmp, sizeof(tmp));
 247     strcat(library_path, ";");
 248     strcat(library_path, tmp);
 249 
 250     GetWindowsDirectory(tmp, sizeof(tmp));
 251     strcat(library_path, ";");
 252     strcat(library_path, tmp);
 253 
 254     if (path_str) {
 255         strcat(library_path, ";");
 256         strcat(library_path, path_str);
 257     }
 258 
 259     strcat(library_path, ";.");
 260 
 261     Arguments::set_library_path(library_path);
 262     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 263   }
 264 
 265   /* Default extensions directory */
 266   {
 267     char path[MAX_PATH];
 268     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 269     GetWindowsDirectory(path, MAX_PATH);
 270     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 271         path, PACKAGE_DIR, EXT_DIR);
 272     Arguments::set_ext_dirs(buf);
 273   }
 274   #undef EXT_DIR
 275   #undef BIN_DIR
 276   #undef PACKAGE_DIR
 277 
 278   /* Default endorsed standards directory. */
 279   {
 280     #define ENDORSED_DIR "\\lib\\endorsed"
 281     size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR);
 282     char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal);
 283     sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR);
 284     Arguments::set_endorsed_dirs(buf);
 285     #undef ENDORSED_DIR
 286   }
 287 
 288 #ifndef _WIN64
 289   // set our UnhandledExceptionFilter and save any previous one
 290   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 291 #endif
 292 
 293   // Done
 294   return;
 295 }
 296 
 297 void os::breakpoint() {
 298   DebugBreak();
 299 }
 300 
 301 // Invoked from the BREAKPOINT Macro
 302 extern "C" void breakpoint() {
 303   os::breakpoint();
 304 }
 305 
 306 /*
 307  * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 308  * So far, this method is only used by Native Memory Tracking, which is
 309  * only supported on Windows XP or later.
 310  */
 311 address os::get_caller_pc(int n) {
 312 #ifdef _NMT_NOINLINE_
 313   n ++;
 314 #endif
 315   address pc;
 316   if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
 317     return pc;
 318   }
 319   return NULL;
 320 }
 321 
 322 
 323 // os::current_stack_base()
 324 //
 325 //   Returns the base of the stack, which is the stack's
 326 //   starting address.  This function must be called
 327 //   while running on the stack of the thread being queried.
 328 
 329 address os::current_stack_base() {
 330   MEMORY_BASIC_INFORMATION minfo;
 331   address stack_bottom;
 332   size_t stack_size;
 333 
 334   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 335   stack_bottom =  (address)minfo.AllocationBase;
 336   stack_size = minfo.RegionSize;
 337 
 338   // Add up the sizes of all the regions with the same
 339   // AllocationBase.
 340   while( 1 )
 341   {
 342     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 343     if ( stack_bottom == (address)minfo.AllocationBase )
 344       stack_size += minfo.RegionSize;
 345     else
 346       break;
 347   }
 348 
 349 #ifdef _M_IA64
 350   // IA64 has memory and register stacks
 351   //
 352   // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
 353   // at thread creation (1MB backing store growing upwards, 1MB memory stack
 354   // growing downwards, 2MB summed up)
 355   //
 356   // ...
 357   // ------- top of stack (high address) -----
 358   // |
 359   // |      1MB
 360   // |      Backing Store (Register Stack)
 361   // |
 362   // |         / \
 363   // |          |
 364   // |          |
 365   // |          |
 366   // ------------------------ stack base -----
 367   // |      1MB
 368   // |      Memory Stack
 369   // |
 370   // |          |
 371   // |          |
 372   // |          |
 373   // |         \ /
 374   // |
 375   // ----- bottom of stack (low address) -----
 376   // ...
 377 
 378   stack_size = stack_size / 2;
 379 #endif
 380   return stack_bottom + stack_size;
 381 }
 382 
 383 size_t os::current_stack_size() {
 384   size_t sz;
 385   MEMORY_BASIC_INFORMATION minfo;
 386   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 387   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 388   return sz;
 389 }
 390 
 391 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 392   const struct tm* time_struct_ptr = localtime(clock);
 393   if (time_struct_ptr != NULL) {
 394     *res = *time_struct_ptr;
 395     return res;
 396   }
 397   return NULL;
 398 }
 399 
 400 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 401 
 402 // Thread start routine for all new Java threads
 403 static unsigned __stdcall java_start(Thread* thread) {
 404   // Try to randomize the cache line index of hot stack frames.
 405   // This helps when threads of the same stack traces evict each other's
 406   // cache lines. The threads can be either from the same JVM instance, or
 407   // from different JVM instances. The benefit is especially true for
 408   // processors with hyperthreading technology.
 409   static int counter = 0;
 410   int pid = os::current_process_id();
 411   _alloca(((pid ^ counter++) & 7) * 128);
 412 
 413   OSThread* osthr = thread->osthread();
 414   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 415 
 416   if (UseNUMA) {
 417     int lgrp_id = os::numa_get_group_id();
 418     if (lgrp_id != -1) {
 419       thread->set_lgrp_id(lgrp_id);
 420     }
 421   }
 422 
 423 
 424   // Install a win32 structured exception handler around every thread created
 425   // by VM, so VM can genrate error dump when an exception occurred in non-
 426   // Java thread (e.g. VM thread).
 427   __try {
 428      thread->run();
 429   } __except(topLevelExceptionFilter(
 430              (_EXCEPTION_POINTERS*)_exception_info())) {
 431       // Nothing to do.
 432   }
 433 
 434   // One less thread is executing
 435   // When the VMThread gets here, the main thread may have already exited
 436   // which frees the CodeHeap containing the Atomic::add code
 437   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 438     Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
 439   }
 440 
 441   return 0;
 442 }
 443 
 444 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) {
 445   // Allocate the OSThread object
 446   OSThread* osthread = new OSThread(NULL, NULL);
 447   if (osthread == NULL) return NULL;
 448 
 449   // Initialize support for Java interrupts
 450   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 451   if (interrupt_event == NULL) {
 452     delete osthread;
 453     return NULL;
 454   }
 455   osthread->set_interrupt_event(interrupt_event);
 456 
 457   // Store info on the Win32 thread into the OSThread
 458   osthread->set_thread_handle(thread_handle);
 459   osthread->set_thread_id(thread_id);
 460 
 461   if (UseNUMA) {
 462     int lgrp_id = os::numa_get_group_id();
 463     if (lgrp_id != -1) {
 464       thread->set_lgrp_id(lgrp_id);
 465     }
 466   }
 467 
 468   // Initial thread state is INITIALIZED, not SUSPENDED
 469   osthread->set_state(INITIALIZED);
 470 
 471   return osthread;
 472 }
 473 
 474 
 475 bool os::create_attached_thread(JavaThread* thread) {
 476 #ifdef ASSERT
 477   thread->verify_not_published();
 478 #endif
 479   HANDLE thread_h;
 480   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 481                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 482     fatal("DuplicateHandle failed\n");
 483   }
 484   OSThread* osthread = create_os_thread(thread, thread_h,
 485                                         (int)current_thread_id());
 486   if (osthread == NULL) {
 487      return false;
 488   }
 489 
 490   // Initial thread state is RUNNABLE
 491   osthread->set_state(RUNNABLE);
 492 
 493   thread->set_osthread(osthread);
 494   return true;
 495 }
 496 
 497 bool os::create_main_thread(JavaThread* thread) {
 498 #ifdef ASSERT
 499   thread->verify_not_published();
 500 #endif
 501   if (_starting_thread == NULL) {
 502     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 503      if (_starting_thread == NULL) {
 504         return false;
 505      }
 506   }
 507 
 508   // The primordial thread is runnable from the start)
 509   _starting_thread->set_state(RUNNABLE);
 510 
 511   thread->set_osthread(_starting_thread);
 512   return true;
 513 }
 514 
 515 // Allocate and initialize a new OSThread
 516 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 517   unsigned thread_id;
 518 
 519   // Allocate the OSThread object
 520   OSThread* osthread = new OSThread(NULL, NULL);
 521   if (osthread == NULL) {
 522     return false;
 523   }
 524 
 525   // Initialize support for Java interrupts
 526   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 527   if (interrupt_event == NULL) {
 528     delete osthread;
 529     return NULL;
 530   }
 531   osthread->set_interrupt_event(interrupt_event);
 532   osthread->set_interrupted(false);
 533 
 534   thread->set_osthread(osthread);
 535 
 536   if (stack_size == 0) {
 537     switch (thr_type) {
 538     case os::java_thread:
 539       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 540       if (JavaThread::stack_size_at_create() > 0)
 541         stack_size = JavaThread::stack_size_at_create();
 542       break;
 543     case os::compiler_thread:
 544       if (CompilerThreadStackSize > 0) {
 545         stack_size = (size_t)(CompilerThreadStackSize * K);
 546         break;
 547       } // else fall through:
 548         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 549     case os::vm_thread:
 550     case os::pgc_thread:
 551     case os::cgc_thread:
 552     case os::watcher_thread:
 553       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 554       break;
 555     }
 556   }
 557 
 558   // Create the Win32 thread
 559   //
 560   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 561   // does not specify stack size. Instead, it specifies the size of
 562   // initially committed space. The stack size is determined by
 563   // PE header in the executable. If the committed "stack_size" is larger
 564   // than default value in the PE header, the stack is rounded up to the
 565   // nearest multiple of 1MB. For example if the launcher has default
 566   // stack size of 320k, specifying any size less than 320k does not
 567   // affect the actual stack size at all, it only affects the initial
 568   // commitment. On the other hand, specifying 'stack_size' larger than
 569   // default value may cause significant increase in memory usage, because
 570   // not only the stack space will be rounded up to MB, but also the
 571   // entire space is committed upfront.
 572   //
 573   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 574   // for CreateThread() that can treat 'stack_size' as stack size. However we
 575   // are not supposed to call CreateThread() directly according to MSDN
 576   // document because JVM uses C runtime library. The good news is that the
 577   // flag appears to work with _beginthredex() as well.
 578 
 579 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION
 580 #define STACK_SIZE_PARAM_IS_A_RESERVATION  (0x10000)
 581 #endif
 582 
 583   HANDLE thread_handle =
 584     (HANDLE)_beginthreadex(NULL,
 585                            (unsigned)stack_size,
 586                            (unsigned (__stdcall *)(void*)) java_start,
 587                            thread,
 588                            CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION,
 589                            &thread_id);
 590   if (thread_handle == NULL) {
 591     // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again
 592     // without the flag.
 593     thread_handle =
 594     (HANDLE)_beginthreadex(NULL,
 595                            (unsigned)stack_size,
 596                            (unsigned (__stdcall *)(void*)) java_start,
 597                            thread,
 598                            CREATE_SUSPENDED,
 599                            &thread_id);
 600   }
 601   if (thread_handle == NULL) {
 602     // Need to clean up stuff we've allocated so far
 603     CloseHandle(osthread->interrupt_event());
 604     thread->set_osthread(NULL);
 605     delete osthread;
 606     return NULL;
 607   }
 608 
 609   Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
 610 
 611   // Store info on the Win32 thread into the OSThread
 612   osthread->set_thread_handle(thread_handle);
 613   osthread->set_thread_id(thread_id);
 614 
 615   // Initial thread state is INITIALIZED, not SUSPENDED
 616   osthread->set_state(INITIALIZED);
 617 
 618   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 619   return true;
 620 }
 621 
 622 
 623 // Free Win32 resources related to the OSThread
 624 void os::free_thread(OSThread* osthread) {
 625   assert(osthread != NULL, "osthread not set");
 626   CloseHandle(osthread->thread_handle());
 627   CloseHandle(osthread->interrupt_event());
 628   delete osthread;
 629 }
 630 
 631 
 632 static int    has_performance_count = 0;
 633 static jlong first_filetime;
 634 static jlong initial_performance_count;
 635 static jlong performance_frequency;
 636 
 637 
 638 jlong as_long(LARGE_INTEGER x) {
 639   jlong result = 0; // initialization to avoid warning
 640   set_high(&result, x.HighPart);
 641   set_low(&result,  x.LowPart);
 642   return result;
 643 }
 644 
 645 
 646 jlong os::elapsed_counter() {
 647   LARGE_INTEGER count;
 648   if (has_performance_count) {
 649     QueryPerformanceCounter(&count);
 650     return as_long(count) - initial_performance_count;
 651   } else {
 652     FILETIME wt;
 653     GetSystemTimeAsFileTime(&wt);
 654     return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime);
 655   }
 656 }
 657 
 658 
 659 jlong os::elapsed_frequency() {
 660   if (has_performance_count) {
 661     return performance_frequency;
 662   } else {
 663    // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601.
 664    return 10000000;
 665   }
 666 }
 667 
 668 
 669 julong os::available_memory() {
 670   return win32::available_memory();
 671 }
 672 
 673 julong os::win32::available_memory() {
 674   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 675   // value if total memory is larger than 4GB
 676   MEMORYSTATUSEX ms;
 677   ms.dwLength = sizeof(ms);
 678   GlobalMemoryStatusEx(&ms);
 679 
 680   return (julong)ms.ullAvailPhys;
 681 }
 682 
 683 julong os::physical_memory() {
 684   return win32::physical_memory();
 685 }
 686 
 687 bool os::has_allocatable_memory_limit(julong* limit) {
 688   MEMORYSTATUSEX ms;
 689   ms.dwLength = sizeof(ms);
 690   GlobalMemoryStatusEx(&ms);
 691 #ifdef _LP64
 692   *limit = (julong)ms.ullAvailVirtual;
 693   return true;
 694 #else
 695   // Limit to 1400m because of the 2gb address space wall
 696   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 697   return true;
 698 #endif
 699 }
 700 
 701 // VC6 lacks DWORD_PTR
 702 #if _MSC_VER < 1300
 703 typedef UINT_PTR DWORD_PTR;
 704 #endif
 705 
 706 int os::active_processor_count() {
 707   DWORD_PTR lpProcessAffinityMask = 0;
 708   DWORD_PTR lpSystemAffinityMask = 0;
 709   int proc_count = processor_count();
 710   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 711       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 712     // Nof active processors is number of bits in process affinity mask
 713     int bitcount = 0;
 714     while (lpProcessAffinityMask != 0) {
 715       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 716       bitcount++;
 717     }
 718     return bitcount;
 719   } else {
 720     return proc_count;
 721   }
 722 }
 723 
 724 void os::set_native_thread_name(const char *name) {
 725   // Not yet implemented.
 726   return;
 727 }
 728 
 729 bool os::distribute_processes(uint length, uint* distribution) {
 730   // Not yet implemented.
 731   return false;
 732 }
 733 
 734 bool os::bind_to_processor(uint processor_id) {
 735   // Not yet implemented.
 736   return false;
 737 }
 738 
 739 static void initialize_performance_counter() {
 740   LARGE_INTEGER count;
 741   if (QueryPerformanceFrequency(&count)) {
 742     has_performance_count = 1;
 743     performance_frequency = as_long(count);
 744     QueryPerformanceCounter(&count);
 745     initial_performance_count = as_long(count);
 746   } else {
 747     has_performance_count = 0;
 748     FILETIME wt;
 749     GetSystemTimeAsFileTime(&wt);
 750     first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 751   }
 752 }
 753 
 754 
 755 double os::elapsedTime() {
 756   return (double) elapsed_counter() / (double) elapsed_frequency();
 757 }
 758 
 759 
 760 // Windows format:
 761 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 762 // Java format:
 763 //   Java standards require the number of milliseconds since 1/1/1970
 764 
 765 // Constant offset - calculated using offset()
 766 static jlong  _offset   = 116444736000000000;
 767 // Fake time counter for reproducible results when debugging
 768 static jlong  fake_time = 0;
 769 
 770 #ifdef ASSERT
 771 // Just to be safe, recalculate the offset in debug mode
 772 static jlong _calculated_offset = 0;
 773 static int   _has_calculated_offset = 0;
 774 
 775 jlong offset() {
 776   if (_has_calculated_offset) return _calculated_offset;
 777   SYSTEMTIME java_origin;
 778   java_origin.wYear          = 1970;
 779   java_origin.wMonth         = 1;
 780   java_origin.wDayOfWeek     = 0; // ignored
 781   java_origin.wDay           = 1;
 782   java_origin.wHour          = 0;
 783   java_origin.wMinute        = 0;
 784   java_origin.wSecond        = 0;
 785   java_origin.wMilliseconds  = 0;
 786   FILETIME jot;
 787   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 788     fatal(err_msg("Error = %d\nWindows error", GetLastError()));
 789   }
 790   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 791   _has_calculated_offset = 1;
 792   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 793   return _calculated_offset;
 794 }
 795 #else
 796 jlong offset() {
 797   return _offset;
 798 }
 799 #endif
 800 
 801 jlong windows_to_java_time(FILETIME wt) {
 802   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 803   return (a - offset()) / 10000;
 804 }
 805 
 806 FILETIME java_to_windows_time(jlong l) {
 807   jlong a = (l * 10000) + offset();
 808   FILETIME result;
 809   result.dwHighDateTime = high(a);
 810   result.dwLowDateTime  = low(a);
 811   return result;
 812 }
 813 
 814 bool os::supports_vtime() { return true; }
 815 bool os::enable_vtime() { return false; }
 816 bool os::vtime_enabled() { return false; }
 817 
 818 double os::elapsedVTime() {
 819   FILETIME created;
 820   FILETIME exited;
 821   FILETIME kernel;
 822   FILETIME user;
 823   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 824     // the resolution of windows_to_java_time() should be sufficient (ms)
 825     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 826   } else {
 827     return elapsedTime();
 828   }
 829 }
 830 
 831 jlong os::javaTimeMillis() {
 832   if (UseFakeTimers) {
 833     return fake_time++;
 834   } else {
 835     FILETIME wt;
 836     GetSystemTimeAsFileTime(&wt);
 837     return windows_to_java_time(wt);
 838   }
 839 }
 840 
 841 jlong os::javaTimeNanos() {
 842   if (!has_performance_count) {
 843     return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do.
 844   } else {
 845     LARGE_INTEGER current_count;
 846     QueryPerformanceCounter(&current_count);
 847     double current = as_long(current_count);
 848     double freq = performance_frequency;
 849     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 850     return time;
 851   }
 852 }
 853 
 854 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 855   if (!has_performance_count) {
 856     // javaTimeMillis() doesn't have much percision,
 857     // but it is not going to wrap -- so all 64 bits
 858     info_ptr->max_value = ALL_64_BITS;
 859 
 860     // this is a wall clock timer, so may skip
 861     info_ptr->may_skip_backward = true;
 862     info_ptr->may_skip_forward = true;
 863   } else {
 864     jlong freq = performance_frequency;
 865     if (freq < NANOSECS_PER_SEC) {
 866       // the performance counter is 64 bits and we will
 867       // be multiplying it -- so no wrap in 64 bits
 868       info_ptr->max_value = ALL_64_BITS;
 869     } else if (freq > NANOSECS_PER_SEC) {
 870       // use the max value the counter can reach to
 871       // determine the max value which could be returned
 872       julong max_counter = (julong)ALL_64_BITS;
 873       info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 874     } else {
 875       // the performance counter is 64 bits and we will
 876       // be using it directly -- so no wrap in 64 bits
 877       info_ptr->max_value = ALL_64_BITS;
 878     }
 879 
 880     // using a counter, so no skipping
 881     info_ptr->may_skip_backward = false;
 882     info_ptr->may_skip_forward = false;
 883   }
 884   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 885 }
 886 
 887 char* os::local_time_string(char *buf, size_t buflen) {
 888   SYSTEMTIME st;
 889   GetLocalTime(&st);
 890   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 891                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 892   return buf;
 893 }
 894 
 895 bool os::getTimesSecs(double* process_real_time,
 896                      double* process_user_time,
 897                      double* process_system_time) {
 898   HANDLE h_process = GetCurrentProcess();
 899   FILETIME create_time, exit_time, kernel_time, user_time;
 900   BOOL result = GetProcessTimes(h_process,
 901                                &create_time,
 902                                &exit_time,
 903                                &kernel_time,
 904                                &user_time);
 905   if (result != 0) {
 906     FILETIME wt;
 907     GetSystemTimeAsFileTime(&wt);
 908     jlong rtc_millis = windows_to_java_time(wt);
 909     jlong user_millis = windows_to_java_time(user_time);
 910     jlong system_millis = windows_to_java_time(kernel_time);
 911     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 912     *process_user_time = ((double) user_millis) / ((double) MILLIUNITS);
 913     *process_system_time = ((double) system_millis) / ((double) MILLIUNITS);
 914     return true;
 915   } else {
 916     return false;
 917   }
 918 }
 919 
 920 void os::shutdown() {
 921 
 922   // allow PerfMemory to attempt cleanup of any persistent resources
 923   perfMemory_exit();
 924 
 925   // flush buffered output, finish log files
 926   ostream_abort();
 927 
 928   // Check for abort hook
 929   abort_hook_t abort_hook = Arguments::abort_hook();
 930   if (abort_hook != NULL) {
 931     abort_hook();
 932   }
 933 }
 934 
 935 
 936 static BOOL  (WINAPI *_MiniDumpWriteDump)  ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
 937                                             PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION);
 938 
 939 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) {
 940   HINSTANCE dbghelp;
 941   EXCEPTION_POINTERS ep;
 942   MINIDUMP_EXCEPTION_INFORMATION mei;
 943   MINIDUMP_EXCEPTION_INFORMATION* pmei;
 944 
 945   HANDLE hProcess = GetCurrentProcess();
 946   DWORD processId = GetCurrentProcessId();
 947   HANDLE dumpFile;
 948   MINIDUMP_TYPE dumpType;
 949   static const char* cwd;
 950 
 951 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows.
 952 #ifndef ASSERT
 953   // If running on a client version of Windows and user has not explicitly enabled dumping
 954   if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) {
 955     VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false);
 956     return;
 957     // If running on a server version of Windows and user has explictly disabled dumping
 958   } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
 959     VMError::report_coredump_status("Minidump has been disabled from the command line", false);
 960     return;
 961   }
 962 #else
 963   if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
 964     VMError::report_coredump_status("Minidump has been disabled from the command line", false);
 965     return;
 966   }
 967 #endif
 968 
 969   dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
 970 
 971   if (dbghelp == NULL) {
 972     VMError::report_coredump_status("Failed to load dbghelp.dll", false);
 973     return;
 974   }
 975 
 976   _MiniDumpWriteDump = CAST_TO_FN_PTR(
 977     BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
 978     PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION),
 979     GetProcAddress(dbghelp, "MiniDumpWriteDump"));
 980 
 981   if (_MiniDumpWriteDump == NULL) {
 982     VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false);
 983     return;
 984   }
 985 
 986   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData);
 987 
 988 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with
 989 // API_VERSION_NUMBER 11 or higher contains the ones we want though
 990 #if API_VERSION_NUMBER >= 11
 991   dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo |
 992     MiniDumpWithUnloadedModules);
 993 #endif
 994 
 995   cwd = get_current_directory(NULL, 0);
 996   jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp",cwd, current_process_id());
 997   dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
 998 
 999   if (dumpFile == INVALID_HANDLE_VALUE) {
1000     VMError::report_coredump_status("Failed to create file for dumping", false);
1001     return;
1002   }
1003   if (exceptionRecord != NULL && contextRecord != NULL) {
1004     ep.ContextRecord = (PCONTEXT) contextRecord;
1005     ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord;
1006 
1007     mei.ThreadId = GetCurrentThreadId();
1008     mei.ExceptionPointers = &ep;
1009     pmei = &mei;
1010   } else {
1011     pmei = NULL;
1012   }
1013 
1014 
1015   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1016   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1017   if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1018       _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1019         DWORD error = GetLastError();
1020         LPTSTR msgbuf = NULL;
1021 
1022         if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
1023                       FORMAT_MESSAGE_FROM_SYSTEM |
1024                       FORMAT_MESSAGE_IGNORE_INSERTS,
1025                       NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) {
1026 
1027           jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf);
1028           LocalFree(msgbuf);
1029         } else {
1030           // Call to FormatMessage failed, just include the result from GetLastError
1031           jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error);
1032         }
1033         VMError::report_coredump_status(buffer, false);
1034   } else {
1035     VMError::report_coredump_status(buffer, true);
1036   }
1037 
1038   CloseHandle(dumpFile);
1039 }
1040 
1041 
1042 
1043 void os::abort(bool dump_core)
1044 {
1045   os::shutdown();
1046   // no core dump on Windows
1047   ::exit(1);
1048 }
1049 
1050 // Die immediately, no exit hook, no abort hook, no cleanup.
1051 void os::die() {
1052   _exit(-1);
1053 }
1054 
1055 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1056 //  * dirent_md.c       1.15 00/02/02
1057 //
1058 // The declarations for DIR and struct dirent are in jvm_win32.h.
1059 
1060 /* Caller must have already run dirname through JVM_NativePath, which removes
1061    duplicate slashes and converts all instances of '/' into '\\'. */
1062 
1063 DIR *
1064 os::opendir(const char *dirname)
1065 {
1066     assert(dirname != NULL, "just checking");   // hotspot change
1067     DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1068     DWORD fattr;                                // hotspot change
1069     char alt_dirname[4] = { 0, 0, 0, 0 };
1070 
1071     if (dirp == 0) {
1072         errno = ENOMEM;
1073         return 0;
1074     }
1075 
1076     /*
1077      * Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1078      * as a directory in FindFirstFile().  We detect this case here and
1079      * prepend the current drive name.
1080      */
1081     if (dirname[1] == '\0' && dirname[0] == '\\') {
1082         alt_dirname[0] = _getdrive() + 'A' - 1;
1083         alt_dirname[1] = ':';
1084         alt_dirname[2] = '\\';
1085         alt_dirname[3] = '\0';
1086         dirname = alt_dirname;
1087     }
1088 
1089     dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1090     if (dirp->path == 0) {
1091         free(dirp, mtInternal);
1092         errno = ENOMEM;
1093         return 0;
1094     }
1095     strcpy(dirp->path, dirname);
1096 
1097     fattr = GetFileAttributes(dirp->path);
1098     if (fattr == 0xffffffff) {
1099         free(dirp->path, mtInternal);
1100         free(dirp, mtInternal);
1101         errno = ENOENT;
1102         return 0;
1103     } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1104         free(dirp->path, mtInternal);
1105         free(dirp, mtInternal);
1106         errno = ENOTDIR;
1107         return 0;
1108     }
1109 
1110     /* Append "*.*", or possibly "\\*.*", to path */
1111     if (dirp->path[1] == ':'
1112         && (dirp->path[2] == '\0'
1113             || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1114         /* No '\\' needed for cases like "Z:" or "Z:\" */
1115         strcat(dirp->path, "*.*");
1116     } else {
1117         strcat(dirp->path, "\\*.*");
1118     }
1119 
1120     dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1121     if (dirp->handle == INVALID_HANDLE_VALUE) {
1122         if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1123             free(dirp->path, mtInternal);
1124             free(dirp, mtInternal);
1125             errno = EACCES;
1126             return 0;
1127         }
1128     }
1129     return dirp;
1130 }
1131 
1132 /* parameter dbuf unused on Windows */
1133 
1134 struct dirent *
1135 os::readdir(DIR *dirp, dirent *dbuf)
1136 {
1137     assert(dirp != NULL, "just checking");      // hotspot change
1138     if (dirp->handle == INVALID_HANDLE_VALUE) {
1139         return 0;
1140     }
1141 
1142     strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1143 
1144     if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1145         if (GetLastError() == ERROR_INVALID_HANDLE) {
1146             errno = EBADF;
1147             return 0;
1148         }
1149         FindClose(dirp->handle);
1150         dirp->handle = INVALID_HANDLE_VALUE;
1151     }
1152 
1153     return &dirp->dirent;
1154 }
1155 
1156 int
1157 os::closedir(DIR *dirp)
1158 {
1159     assert(dirp != NULL, "just checking");      // hotspot change
1160     if (dirp->handle != INVALID_HANDLE_VALUE) {
1161         if (!FindClose(dirp->handle)) {
1162             errno = EBADF;
1163             return -1;
1164         }
1165         dirp->handle = INVALID_HANDLE_VALUE;
1166     }
1167     free(dirp->path, mtInternal);
1168     free(dirp, mtInternal);
1169     return 0;
1170 }
1171 
1172 // This must be hard coded because it's the system's temporary
1173 // directory not the java application's temp directory, ala java.io.tmpdir.
1174 const char* os::get_temp_directory() {
1175   static char path_buf[MAX_PATH];
1176   if (GetTempPath(MAX_PATH, path_buf)>0)
1177     return path_buf;
1178   else{
1179     path_buf[0]='\0';
1180     return path_buf;
1181   }
1182 }
1183 
1184 static bool file_exists(const char* filename) {
1185   if (filename == NULL || strlen(filename) == 0) {
1186     return false;
1187   }
1188   return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1189 }
1190 
1191 bool os::dll_build_name(char *buffer, size_t buflen,
1192                         const char* pname, const char* fname) {
1193   bool retval = false;
1194   const size_t pnamelen = pname ? strlen(pname) : 0;
1195   const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1196 
1197   // Return error on buffer overflow.
1198   if (pnamelen + strlen(fname) + 10 > buflen) {
1199     return retval;
1200   }
1201 
1202   if (pnamelen == 0) {
1203     jio_snprintf(buffer, buflen, "%s.dll", fname);
1204     retval = true;
1205   } else if (c == ':' || c == '\\') {
1206     jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1207     retval = true;
1208   } else if (strchr(pname, *os::path_separator()) != NULL) {
1209     int n;
1210     char** pelements = split_path(pname, &n);
1211     if (pelements == NULL) {
1212       return false;
1213     }
1214     for (int i = 0 ; i < n ; i++) {
1215       char* path = pelements[i];
1216       // Really shouldn't be NULL, but check can't hurt
1217       size_t plen = (path == NULL) ? 0 : strlen(path);
1218       if (plen == 0) {
1219         continue; // skip the empty path values
1220       }
1221       const char lastchar = path[plen - 1];
1222       if (lastchar == ':' || lastchar == '\\') {
1223         jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1224       } else {
1225         jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1226       }
1227       if (file_exists(buffer)) {
1228         retval = true;
1229         break;
1230       }
1231     }
1232     // release the storage
1233     for (int i = 0 ; i < n ; i++) {
1234       if (pelements[i] != NULL) {
1235         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1236       }
1237     }
1238     if (pelements != NULL) {
1239       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1240     }
1241   } else {
1242     jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1243     retval = true;
1244   }
1245   return retval;
1246 }
1247 
1248 // Needs to be in os specific directory because windows requires another
1249 // header file <direct.h>
1250 const char* os::get_current_directory(char *buf, size_t buflen) {
1251   int n = static_cast<int>(buflen);
1252   if (buflen > INT_MAX)  n = INT_MAX;
1253   return _getcwd(buf, n);
1254 }
1255 
1256 //-----------------------------------------------------------
1257 // Helper functions for fatal error handler
1258 #ifdef _WIN64
1259 // Helper routine which returns true if address in
1260 // within the NTDLL address space.
1261 //
1262 static bool _addr_in_ntdll( address addr )
1263 {
1264   HMODULE hmod;
1265   MODULEINFO minfo;
1266 
1267   hmod = GetModuleHandle("NTDLL.DLL");
1268   if ( hmod == NULL ) return false;
1269   if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod,
1270                                &minfo, sizeof(MODULEINFO)) )
1271     return false;
1272 
1273   if ( (addr >= minfo.lpBaseOfDll) &&
1274        (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage)))
1275     return true;
1276   else
1277     return false;
1278 }
1279 #endif
1280 
1281 
1282 // Enumerate all modules for a given process ID
1283 //
1284 // Notice that Windows 95/98/Me and Windows NT/2000/XP have
1285 // different API for doing this. We use PSAPI.DLL on NT based
1286 // Windows and ToolHelp on 95/98/Me.
1287 
1288 // Callback function that is called by enumerate_modules() on
1289 // every DLL module.
1290 // Input parameters:
1291 //    int       pid,
1292 //    char*     module_file_name,
1293 //    address   module_base_addr,
1294 //    unsigned  module_size,
1295 //    void*     param
1296 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *);
1297 
1298 // enumerate_modules for Windows NT, using PSAPI
1299 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param)
1300 {
1301   HANDLE   hProcess ;
1302 
1303 # define MAX_NUM_MODULES 128
1304   HMODULE     modules[MAX_NUM_MODULES];
1305   static char filename[ MAX_PATH ];
1306   int         result = 0;
1307 
1308   if (!os::PSApiDll::PSApiAvailable()) {
1309     return 0;
1310   }
1311 
1312   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1313                          FALSE, pid ) ;
1314   if (hProcess == NULL) return 0;
1315 
1316   DWORD size_needed;
1317   if (!os::PSApiDll::EnumProcessModules(hProcess, modules,
1318                            sizeof(modules), &size_needed)) {
1319       CloseHandle( hProcess );
1320       return 0;
1321   }
1322 
1323   // number of modules that are currently loaded
1324   int num_modules = size_needed / sizeof(HMODULE);
1325 
1326   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1327     // Get Full pathname:
1328     if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i],
1329                              filename, sizeof(filename))) {
1330         filename[0] = '\0';
1331     }
1332 
1333     MODULEINFO modinfo;
1334     if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i],
1335                                &modinfo, sizeof(modinfo))) {
1336         modinfo.lpBaseOfDll = NULL;
1337         modinfo.SizeOfImage = 0;
1338     }
1339 
1340     // Invoke callback function
1341     result = func(pid, filename, (address)modinfo.lpBaseOfDll,
1342                   modinfo.SizeOfImage, param);
1343     if (result) break;
1344   }
1345 
1346   CloseHandle( hProcess ) ;
1347   return result;
1348 }
1349 
1350 
1351 // enumerate_modules for Windows 95/98/ME, using TOOLHELP
1352 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param)
1353 {
1354   HANDLE                hSnapShot ;
1355   static MODULEENTRY32  modentry ;
1356   int                   result = 0;
1357 
1358   if (!os::Kernel32Dll::HelpToolsAvailable()) {
1359     return 0;
1360   }
1361 
1362   // Get a handle to a Toolhelp snapshot of the system
1363   hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ;
1364   if( hSnapShot == INVALID_HANDLE_VALUE ) {
1365       return FALSE ;
1366   }
1367 
1368   // iterate through all modules
1369   modentry.dwSize = sizeof(MODULEENTRY32) ;
1370   bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0;
1371 
1372   while( not_done ) {
1373     // invoke the callback
1374     result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr,
1375                 modentry.modBaseSize, param);
1376     if (result) break;
1377 
1378     modentry.dwSize = sizeof(MODULEENTRY32) ;
1379     not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0;
1380   }
1381 
1382   CloseHandle(hSnapShot);
1383   return result;
1384 }
1385 
1386 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param )
1387 {
1388   // Get current process ID if caller doesn't provide it.
1389   if (!pid) pid = os::current_process_id();
1390 
1391   if (os::win32::is_nt()) return _enumerate_modules_winnt  (pid, func, param);
1392   else                    return _enumerate_modules_windows(pid, func, param);
1393 }
1394 
1395 struct _modinfo {
1396    address addr;
1397    char*   full_path;   // point to a char buffer
1398    int     buflen;      // size of the buffer
1399    address base_addr;
1400 };
1401 
1402 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr,
1403                                   unsigned size, void * param) {
1404    struct _modinfo *pmod = (struct _modinfo *)param;
1405    if (!pmod) return -1;
1406 
1407    if (base_addr     <= pmod->addr &&
1408        base_addr+size > pmod->addr) {
1409      // if a buffer is provided, copy path name to the buffer
1410      if (pmod->full_path) {
1411        jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1412      }
1413      pmod->base_addr = base_addr;
1414      return 1;
1415    }
1416    return 0;
1417 }
1418 
1419 bool os::dll_address_to_library_name(address addr, char* buf,
1420                                      int buflen, int* offset) {
1421   // buf is not optional, but offset is optional
1422   assert(buf != NULL, "sanity check");
1423 
1424 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1425 //       return the full path to the DLL file, sometimes it returns path
1426 //       to the corresponding PDB file (debug info); sometimes it only
1427 //       returns partial path, which makes life painful.
1428 
1429   struct _modinfo mi;
1430   mi.addr      = addr;
1431   mi.full_path = buf;
1432   mi.buflen    = buflen;
1433   int pid = os::current_process_id();
1434   if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) {
1435     // buf already contains path name
1436     if (offset) *offset = addr - mi.base_addr;
1437     return true;
1438   }
1439 
1440   buf[0] = '\0';
1441   if (offset) *offset = -1;
1442   return false;
1443 }
1444 
1445 bool os::dll_address_to_function_name(address addr, char *buf,
1446                                       int buflen, int *offset) {
1447   // buf is not optional, but offset is optional
1448   assert(buf != NULL, "sanity check");
1449 
1450   if (Decoder::decode(addr, buf, buflen, offset)) {
1451     return true;
1452   }
1453   if (offset != NULL)  *offset  = -1;
1454   buf[0] = '\0';
1455   return false;
1456 }
1457 
1458 // save the start and end address of jvm.dll into param[0] and param[1]
1459 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr,
1460                     unsigned size, void * param) {
1461    if (!param) return -1;
1462 
1463    if (base_addr     <= (address)_locate_jvm_dll &&
1464        base_addr+size > (address)_locate_jvm_dll) {
1465          ((address*)param)[0] = base_addr;
1466          ((address*)param)[1] = base_addr + size;
1467          return 1;
1468    }
1469    return 0;
1470 }
1471 
1472 address vm_lib_location[2];    // start and end address of jvm.dll
1473 
1474 // check if addr is inside jvm.dll
1475 bool os::address_is_in_vm(address addr) {
1476   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1477     int pid = os::current_process_id();
1478     if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) {
1479       assert(false, "Can't find jvm module.");
1480       return false;
1481     }
1482   }
1483 
1484   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1485 }
1486 
1487 // print module info; param is outputStream*
1488 static int _print_module(int pid, char* fname, address base,
1489                          unsigned size, void* param) {
1490    if (!param) return -1;
1491 
1492    outputStream* st = (outputStream*)param;
1493 
1494    address end_addr = base + size;
1495    st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname);
1496    return 0;
1497 }
1498 
1499 // Loads .dll/.so and
1500 // in case of error it checks if .dll/.so was built for the
1501 // same architecture as Hotspot is running on
1502 void * os::dll_load(const char *name, char *ebuf, int ebuflen)
1503 {
1504   void * result = LoadLibrary(name);
1505   if (result != NULL)
1506   {
1507     return result;
1508   }
1509 
1510   DWORD errcode = GetLastError();
1511   if (errcode == ERROR_MOD_NOT_FOUND) {
1512     strncpy(ebuf, "Can't find dependent libraries", ebuflen-1);
1513     ebuf[ebuflen-1]='\0';
1514     return NULL;
1515   }
1516 
1517   // Parsing dll below
1518   // If we can read dll-info and find that dll was built
1519   // for an architecture other than Hotspot is running in
1520   // - then print to buffer "DLL was built for a different architecture"
1521   // else call os::lasterror to obtain system error message
1522 
1523   // Read system error message into ebuf
1524   // It may or may not be overwritten below (in the for loop and just above)
1525   lasterror(ebuf, (size_t) ebuflen);
1526   ebuf[ebuflen-1]='\0';
1527   int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0);
1528   if (file_descriptor<0)
1529   {
1530     return NULL;
1531   }
1532 
1533   uint32_t signature_offset;
1534   uint16_t lib_arch=0;
1535   bool failed_to_get_lib_arch=
1536   (
1537     //Go to position 3c in the dll
1538     (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0)
1539     ||
1540     // Read loacation of signature
1541     (sizeof(signature_offset)!=
1542       (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset))))
1543     ||
1544     //Go to COFF File Header in dll
1545     //that is located after"signature" (4 bytes long)
1546     (os::seek_to_file_offset(file_descriptor,
1547       signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0)
1548     ||
1549     //Read field that contains code of architecture
1550     // that dll was build for
1551     (sizeof(lib_arch)!=
1552       (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch))))
1553   );
1554 
1555   ::close(file_descriptor);
1556   if (failed_to_get_lib_arch)
1557   {
1558     // file i/o error - report os::lasterror(...) msg
1559     return NULL;
1560   }
1561 
1562   typedef struct
1563   {
1564     uint16_t arch_code;
1565     char* arch_name;
1566   } arch_t;
1567 
1568   static const arch_t arch_array[]={
1569     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1570     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1571     {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
1572   };
1573   #if   (defined _M_IA64)
1574     static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64;
1575   #elif (defined _M_AMD64)
1576     static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64;
1577   #elif (defined _M_IX86)
1578     static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386;
1579   #else
1580     #error Method os::dll_load requires that one of following \
1581            is defined :_M_IA64,_M_AMD64 or _M_IX86
1582   #endif
1583 
1584 
1585   // Obtain a string for printf operation
1586   // lib_arch_str shall contain string what platform this .dll was built for
1587   // running_arch_str shall string contain what platform Hotspot was built for
1588   char *running_arch_str=NULL,*lib_arch_str=NULL;
1589   for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++)
1590   {
1591     if (lib_arch==arch_array[i].arch_code)
1592       lib_arch_str=arch_array[i].arch_name;
1593     if (running_arch==arch_array[i].arch_code)
1594       running_arch_str=arch_array[i].arch_name;
1595   }
1596 
1597   assert(running_arch_str,
1598     "Didn't find runing architecture code in arch_array");
1599 
1600   // If the architure is right
1601   // but some other error took place - report os::lasterror(...) msg
1602   if (lib_arch == running_arch)
1603   {
1604     return NULL;
1605   }
1606 
1607   if (lib_arch_str!=NULL)
1608   {
1609     ::_snprintf(ebuf, ebuflen-1,
1610       "Can't load %s-bit .dll on a %s-bit platform",
1611       lib_arch_str,running_arch_str);
1612   }
1613   else
1614   {
1615     // don't know what architecture this dll was build for
1616     ::_snprintf(ebuf, ebuflen-1,
1617       "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1618       lib_arch,running_arch_str);
1619   }
1620 
1621   return NULL;
1622 }
1623 
1624 
1625 void os::print_dll_info(outputStream *st) {
1626    int pid = os::current_process_id();
1627    st->print_cr("Dynamic libraries:");
1628    enumerate_modules(pid, _print_module, (void *)st);
1629 }
1630 
1631 void os::print_os_info_brief(outputStream* st) {
1632   os::print_os_info(st);
1633 }
1634 
1635 void os::print_os_info(outputStream* st) {
1636   st->print("OS:");
1637 
1638   os::win32::print_windows_version(st);
1639 }
1640 
1641 void os::win32::print_windows_version(outputStream* st) {
1642   OSVERSIONINFOEX osvi;
1643   SYSTEM_INFO si;
1644 
1645   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1646   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1647 
1648   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1649     st->print_cr("N/A");
1650     return;
1651   }
1652 
1653   int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
1654 
1655   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1656   if (os_vers >= 5002) {
1657     // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1658     // find out whether we are running on 64 bit processor or not.
1659     if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) {
1660       os::Kernel32Dll::GetNativeSystemInfo(&si);
1661     } else {
1662       GetSystemInfo(&si);
1663     }
1664   }
1665 
1666   if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
1667     switch (os_vers) {
1668     case 3051: st->print(" Windows NT 3.51"); break;
1669     case 4000: st->print(" Windows NT 4.0"); break;
1670     case 5000: st->print(" Windows 2000"); break;
1671     case 5001: st->print(" Windows XP"); break;
1672     case 5002:
1673       if (osvi.wProductType == VER_NT_WORKSTATION &&
1674           si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1675         st->print(" Windows XP x64 Edition");
1676       } else {
1677         st->print(" Windows Server 2003 family");
1678       }
1679       break;
1680 
1681     case 6000:
1682       if (osvi.wProductType == VER_NT_WORKSTATION) {
1683         st->print(" Windows Vista");
1684       } else {
1685         st->print(" Windows Server 2008");
1686       }
1687       break;
1688 
1689     case 6001:
1690       if (osvi.wProductType == VER_NT_WORKSTATION) {
1691         st->print(" Windows 7");
1692       } else {
1693         st->print(" Windows Server 2008 R2");
1694       }
1695       break;
1696 
1697     case 6002:
1698       if (osvi.wProductType == VER_NT_WORKSTATION) {
1699         st->print(" Windows 8");
1700       } else {
1701         st->print(" Windows Server 2012");
1702       }
1703       break;
1704 
1705     case 6003:
1706       if (osvi.wProductType == VER_NT_WORKSTATION) {
1707         st->print(" Windows 8.1");
1708       } else {
1709         st->print(" Windows Server 2012 R2");
1710       }
1711       break;
1712 
1713     default: // future os
1714       // Unrecognized windows, print out its major and minor versions
1715       st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1716     }
1717   } else {
1718     switch (os_vers) {
1719     case 4000: st->print(" Windows 95"); break;
1720     case 4010: st->print(" Windows 98"); break;
1721     case 4090: st->print(" Windows Me"); break;
1722     default: // future windows, print out its major and minor versions
1723       st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1724     }
1725   }
1726 
1727   if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1728     st->print(" , 64 bit");
1729   }
1730 
1731   st->print(" Build %d", osvi.dwBuildNumber);
1732   st->print(" %s", osvi.szCSDVersion);           // service pack
1733   st->cr();
1734 }
1735 
1736 void os::pd_print_cpu_info(outputStream* st) {
1737   // Nothing to do for now.
1738 }
1739 
1740 void os::print_memory_info(outputStream* st) {
1741   st->print("Memory:");
1742   st->print(" %dk page", os::vm_page_size()>>10);
1743 
1744   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1745   // value if total memory is larger than 4GB
1746   MEMORYSTATUSEX ms;
1747   ms.dwLength = sizeof(ms);
1748   GlobalMemoryStatusEx(&ms);
1749 
1750   st->print(", physical %uk", os::physical_memory() >> 10);
1751   st->print("(%uk free)", os::available_memory() >> 10);
1752 
1753   st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1754   st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1755   st->cr();
1756 }
1757 
1758 void os::print_siginfo(outputStream *st, void *siginfo) {
1759   EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo;
1760   st->print("siginfo:");
1761   st->print(" ExceptionCode=0x%x", er->ExceptionCode);
1762 
1763   if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
1764       er->NumberParameters >= 2) {
1765       switch (er->ExceptionInformation[0]) {
1766       case 0: st->print(", reading address"); break;
1767       case 1: st->print(", writing address"); break;
1768       default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1769                             er->ExceptionInformation[0]);
1770       }
1771       st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1772   } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR &&
1773              er->NumberParameters >= 2 && UseSharedSpaces) {
1774     FileMapInfo* mapinfo = FileMapInfo::current_info();
1775     if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) {
1776       st->print("\n\nError accessing class data sharing archive."       \
1777                 " Mapped file inaccessible during execution, "          \
1778                 " possible disk/network problem.");
1779     }
1780   } else {
1781     int num = er->NumberParameters;
1782     if (num > 0) {
1783       st->print(", ExceptionInformation=");
1784       for (int i = 0; i < num; i++) {
1785         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1786       }
1787     }
1788   }
1789   st->cr();
1790 }
1791 
1792 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1793   // do nothing
1794 }
1795 
1796 static char saved_jvm_path[MAX_PATH] = {0};
1797 
1798 // Find the full path to the current module, jvm.dll
1799 void os::jvm_path(char *buf, jint buflen) {
1800   // Error checking.
1801   if (buflen < MAX_PATH) {
1802     assert(false, "must use a large-enough buffer");
1803     buf[0] = '\0';
1804     return;
1805   }
1806   // Lazy resolve the path to current module.
1807   if (saved_jvm_path[0] != 0) {
1808     strcpy(buf, saved_jvm_path);
1809     return;
1810   }
1811 
1812   buf[0] = '\0';
1813   if (Arguments::created_by_gamma_launcher()) {
1814      // Support for the gamma launcher. Check for an
1815      // JAVA_HOME environment variable
1816      // and fix up the path so it looks like
1817      // libjvm.so is installed there (append a fake suffix
1818      // hotspot/libjvm.so).
1819      char* java_home_var = ::getenv("JAVA_HOME");
1820      if (java_home_var != NULL && java_home_var[0] != 0) {
1821 
1822         strncpy(buf, java_home_var, buflen);
1823 
1824         // determine if this is a legacy image or modules image
1825         // modules image doesn't have "jre" subdirectory
1826         size_t len = strlen(buf);
1827         char* jrebin_p = buf + len;
1828         jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1829         if (0 != _access(buf, 0)) {
1830           jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1831         }
1832         len = strlen(buf);
1833         jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1834      }
1835   }
1836 
1837   if(buf[0] == '\0') {
1838   GetModuleFileName(vm_lib_handle, buf, buflen);
1839   }
1840   strcpy(saved_jvm_path, buf);
1841 }
1842 
1843 
1844 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1845 #ifndef _WIN64
1846   st->print("_");
1847 #endif
1848 }
1849 
1850 
1851 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1852 #ifndef _WIN64
1853   st->print("@%d", args_size  * sizeof(int));
1854 #endif
1855 }
1856 
1857 // This method is a copy of JDK's sysGetLastErrorString
1858 // from src/windows/hpi/src/system_md.c
1859 
1860 size_t os::lasterror(char* buf, size_t len) {
1861   DWORD errval;
1862 
1863   if ((errval = GetLastError()) != 0) {
1864     // DOS error
1865     size_t n = (size_t)FormatMessage(
1866           FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1867           NULL,
1868           errval,
1869           0,
1870           buf,
1871           (DWORD)len,
1872           NULL);
1873     if (n > 3) {
1874       // Drop final '.', CR, LF
1875       if (buf[n - 1] == '\n') n--;
1876       if (buf[n - 1] == '\r') n--;
1877       if (buf[n - 1] == '.') n--;
1878       buf[n] = '\0';
1879     }
1880     return n;
1881   }
1882 
1883   if (errno != 0) {
1884     // C runtime error that has no corresponding DOS error code
1885     const char* s = strerror(errno);
1886     size_t n = strlen(s);
1887     if (n >= len) n = len - 1;
1888     strncpy(buf, s, n);
1889     buf[n] = '\0';
1890     return n;
1891   }
1892 
1893   return 0;
1894 }
1895 
1896 int os::get_last_error() {
1897   DWORD error = GetLastError();
1898   if (error == 0)
1899     error = errno;
1900   return (int)error;
1901 }
1902 
1903 // sun.misc.Signal
1904 // NOTE that this is a workaround for an apparent kernel bug where if
1905 // a signal handler for SIGBREAK is installed then that signal handler
1906 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1907 // See bug 4416763.
1908 static void (*sigbreakHandler)(int) = NULL;
1909 
1910 static void UserHandler(int sig, void *siginfo, void *context) {
1911   os::signal_notify(sig);
1912   // We need to reinstate the signal handler each time...
1913   os::signal(sig, (void*)UserHandler);
1914 }
1915 
1916 void* os::user_handler() {
1917   return (void*) UserHandler;
1918 }
1919 
1920 void* os::signal(int signal_number, void* handler) {
1921   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1922     void (*oldHandler)(int) = sigbreakHandler;
1923     sigbreakHandler = (void (*)(int)) handler;
1924     return (void*) oldHandler;
1925   } else {
1926     return (void*)::signal(signal_number, (void (*)(int))handler);
1927   }
1928 }
1929 
1930 void os::signal_raise(int signal_number) {
1931   raise(signal_number);
1932 }
1933 
1934 // The Win32 C runtime library maps all console control events other than ^C
1935 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
1936 // logoff, and shutdown events.  We therefore install our own console handler
1937 // that raises SIGTERM for the latter cases.
1938 //
1939 static BOOL WINAPI consoleHandler(DWORD event) {
1940   switch(event) {
1941     case CTRL_C_EVENT:
1942       if (is_error_reported()) {
1943         // Ctrl-C is pressed during error reporting, likely because the error
1944         // handler fails to abort. Let VM die immediately.
1945         os::die();
1946       }
1947 
1948       os::signal_raise(SIGINT);
1949       return TRUE;
1950       break;
1951     case CTRL_BREAK_EVENT:
1952       if (sigbreakHandler != NULL) {
1953         (*sigbreakHandler)(SIGBREAK);
1954       }
1955       return TRUE;
1956       break;
1957     case CTRL_LOGOFF_EVENT: {
1958       // Don't terminate JVM if it is running in a non-interactive session,
1959       // such as a service process.
1960       USEROBJECTFLAGS flags;
1961       HANDLE handle = GetProcessWindowStation();
1962       if (handle != NULL &&
1963           GetUserObjectInformation(handle, UOI_FLAGS, &flags,
1964             sizeof( USEROBJECTFLAGS), NULL)) {
1965         // If it is a non-interactive session, let next handler to deal
1966         // with it.
1967         if ((flags.dwFlags & WSF_VISIBLE) == 0) {
1968           return FALSE;
1969         }
1970       }
1971     }
1972     case CTRL_CLOSE_EVENT:
1973     case CTRL_SHUTDOWN_EVENT:
1974       os::signal_raise(SIGTERM);
1975       return TRUE;
1976       break;
1977     default:
1978       break;
1979   }
1980   return FALSE;
1981 }
1982 
1983 /*
1984  * The following code is moved from os.cpp for making this
1985  * code platform specific, which it is by its very nature.
1986  */
1987 
1988 // Return maximum OS signal used + 1 for internal use only
1989 // Used as exit signal for signal_thread
1990 int os::sigexitnum_pd(){
1991   return NSIG;
1992 }
1993 
1994 // a counter for each possible signal value, including signal_thread exit signal
1995 static volatile jint pending_signals[NSIG+1] = { 0 };
1996 static HANDLE sig_sem = NULL;
1997 
1998 void os::signal_init_pd() {
1999   // Initialize signal structures
2000   memset((void*)pending_signals, 0, sizeof(pending_signals));
2001 
2002   sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2003 
2004   // Programs embedding the VM do not want it to attempt to receive
2005   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2006   // shutdown hooks mechanism introduced in 1.3.  For example, when
2007   // the VM is run as part of a Windows NT service (i.e., a servlet
2008   // engine in a web server), the correct behavior is for any console
2009   // control handler to return FALSE, not TRUE, because the OS's
2010   // "final" handler for such events allows the process to continue if
2011   // it is a service (while terminating it if it is not a service).
2012   // To make this behavior uniform and the mechanism simpler, we
2013   // completely disable the VM's usage of these console events if -Xrs
2014   // (=ReduceSignalUsage) is specified.  This means, for example, that
2015   // the CTRL-BREAK thread dump mechanism is also disabled in this
2016   // case.  See bugs 4323062, 4345157, and related bugs.
2017 
2018   if (!ReduceSignalUsage) {
2019     // Add a CTRL-C handler
2020     SetConsoleCtrlHandler(consoleHandler, TRUE);
2021   }
2022 }
2023 
2024 void os::signal_notify(int signal_number) {
2025   BOOL ret;
2026   if (sig_sem != NULL) {
2027     Atomic::inc(&pending_signals[signal_number]);
2028     ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2029     assert(ret != 0, "ReleaseSemaphore() failed");
2030   }
2031 }
2032 
2033 static int check_pending_signals(bool wait_for_signal) {
2034   DWORD ret;
2035   while (true) {
2036     for (int i = 0; i < NSIG + 1; i++) {
2037       jint n = pending_signals[i];
2038       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2039         return i;
2040       }
2041     }
2042     if (!wait_for_signal) {
2043       return -1;
2044     }
2045 
2046     JavaThread *thread = JavaThread::current();
2047 
2048     ThreadBlockInVM tbivm(thread);
2049 
2050     bool threadIsSuspended;
2051     do {
2052       thread->set_suspend_equivalent();
2053       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2054       ret = ::WaitForSingleObject(sig_sem, INFINITE);
2055       assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2056 
2057       // were we externally suspended while we were waiting?
2058       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2059       if (threadIsSuspended) {
2060         //
2061         // The semaphore has been incremented, but while we were waiting
2062         // another thread suspended us. We don't want to continue running
2063         // while suspended because that would surprise the thread that
2064         // suspended us.
2065         //
2066         ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2067         assert(ret != 0, "ReleaseSemaphore() failed");
2068 
2069         thread->java_suspend_self();
2070       }
2071     } while (threadIsSuspended);
2072   }
2073 }
2074 
2075 int os::signal_lookup() {
2076   return check_pending_signals(false);
2077 }
2078 
2079 int os::signal_wait() {
2080   return check_pending_signals(true);
2081 }
2082 
2083 // Implicit OS exception handling
2084 
2085 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) {
2086   JavaThread* thread = JavaThread::current();
2087   // Save pc in thread
2088 #ifdef _M_IA64
2089   // Do not blow up if no thread info available.
2090   if (thread) {
2091     // Saving PRECISE pc (with slot information) in thread.
2092     uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2093     // Convert precise PC into "Unix" format
2094     precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2095     thread->set_saved_exception_pc((address)precise_pc);
2096   }
2097   // Set pc to handler
2098   exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2099   // Clear out psr.ri (= Restart Instruction) in order to continue
2100   // at the beginning of the target bundle.
2101   exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2102   assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2103 #elif _M_AMD64
2104   // Do not blow up if no thread info available.
2105   if (thread) {
2106     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2107   }
2108   // Set pc to handler
2109   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2110 #else
2111   // Do not blow up if no thread info available.
2112   if (thread) {
2113     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2114   }
2115   // Set pc to handler
2116   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2117 #endif
2118 
2119   // Continue the execution
2120   return EXCEPTION_CONTINUE_EXECUTION;
2121 }
2122 
2123 
2124 // Used for PostMortemDump
2125 extern "C" void safepoints();
2126 extern "C" void find(int x);
2127 extern "C" void events();
2128 
2129 // According to Windows API documentation, an illegal instruction sequence should generate
2130 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2131 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2132 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2133 
2134 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2135 
2136 // From "Execution Protection in the Windows Operating System" draft 0.35
2137 // Once a system header becomes available, the "real" define should be
2138 // included or copied here.
2139 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2140 
2141 // Handle NAT Bit consumption on IA64.
2142 #ifdef _M_IA64
2143 #define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
2144 #endif
2145 
2146 // Windows Vista/2008 heap corruption check
2147 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2148 
2149 #define def_excpt(val) #val, val
2150 
2151 struct siglabel {
2152   char *name;
2153   int   number;
2154 };
2155 
2156 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2157 // C++ compiler contain this error code. Because this is a compiler-generated
2158 // error, the code is not listed in the Win32 API header files.
2159 // The code is actually a cryptic mnemonic device, with the initial "E"
2160 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2161 // ASCII values of "msc".
2162 
2163 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2164 
2165 
2166 struct siglabel exceptlabels[] = {
2167     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2168     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2169     def_excpt(EXCEPTION_BREAKPOINT),
2170     def_excpt(EXCEPTION_SINGLE_STEP),
2171     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2172     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2173     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2174     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2175     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2176     def_excpt(EXCEPTION_FLT_OVERFLOW),
2177     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2178     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2179     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2180     def_excpt(EXCEPTION_INT_OVERFLOW),
2181     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2182     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2183     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2184     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2185     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2186     def_excpt(EXCEPTION_STACK_OVERFLOW),
2187     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2188     def_excpt(EXCEPTION_GUARD_PAGE),
2189     def_excpt(EXCEPTION_INVALID_HANDLE),
2190     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2191     def_excpt(EXCEPTION_HEAP_CORRUPTION),
2192 #ifdef _M_IA64
2193     def_excpt(EXCEPTION_REG_NAT_CONSUMPTION),
2194 #endif
2195     NULL, 0
2196 };
2197 
2198 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2199   for (int i = 0; exceptlabels[i].name != NULL; i++) {
2200     if (exceptlabels[i].number == exception_code) {
2201        jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2202        return buf;
2203     }
2204   }
2205 
2206   return NULL;
2207 }
2208 
2209 //-----------------------------------------------------------------------------
2210 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2211   // handle exception caused by idiv; should only happen for -MinInt/-1
2212   // (division by zero is handled explicitly)
2213 #ifdef _M_IA64
2214   assert(0, "Fix Handle_IDiv_Exception");
2215 #elif _M_AMD64
2216   PCONTEXT ctx = exceptionInfo->ContextRecord;
2217   address pc = (address)ctx->Rip;
2218   assert(pc[0] == 0xF7, "not an idiv opcode");
2219   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2220   assert(ctx->Rax == min_jint, "unexpected idiv exception");
2221   // set correct result values and continue after idiv instruction
2222   ctx->Rip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2223   ctx->Rax = (DWORD)min_jint;      // result
2224   ctx->Rdx = (DWORD)0;             // remainder
2225   // Continue the execution
2226 #else
2227   PCONTEXT ctx = exceptionInfo->ContextRecord;
2228   address pc = (address)ctx->Eip;
2229   assert(pc[0] == 0xF7, "not an idiv opcode");
2230   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2231   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2232   // set correct result values and continue after idiv instruction
2233   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2234   ctx->Eax = (DWORD)min_jint;      // result
2235   ctx->Edx = (DWORD)0;             // remainder
2236   // Continue the execution
2237 #endif
2238   return EXCEPTION_CONTINUE_EXECUTION;
2239 }
2240 
2241 //-----------------------------------------------------------------------------
2242 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2243 #ifndef  _WIN64
2244   // handle exception caused by native method modifying control word
2245   PCONTEXT ctx = exceptionInfo->ContextRecord;
2246   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2247 
2248   switch (exception_code) {
2249     case EXCEPTION_FLT_DENORMAL_OPERAND:
2250     case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2251     case EXCEPTION_FLT_INEXACT_RESULT:
2252     case EXCEPTION_FLT_INVALID_OPERATION:
2253     case EXCEPTION_FLT_OVERFLOW:
2254     case EXCEPTION_FLT_STACK_CHECK:
2255     case EXCEPTION_FLT_UNDERFLOW:
2256       jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2257       if (fp_control_word != ctx->FloatSave.ControlWord) {
2258         // Restore FPCW and mask out FLT exceptions
2259         ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2260         // Mask out pending FLT exceptions
2261         ctx->FloatSave.StatusWord &=  0xffffff00;
2262         return EXCEPTION_CONTINUE_EXECUTION;
2263       }
2264   }
2265 
2266   if (prev_uef_handler != NULL) {
2267     // We didn't handle this exception so pass it to the previous
2268     // UnhandledExceptionFilter.
2269     return (prev_uef_handler)(exceptionInfo);
2270   }
2271 #else // !_WIN64
2272 /*
2273   On Windows, the mxcsr control bits are non-volatile across calls
2274   See also CR 6192333
2275 */
2276    PCONTEXT ctx = exceptionInfo->ContextRecord;
2277    jint MxCsr = INITIAL_MXCSR;
2278    // we can't use StubRoutines::addr_mxcsr_std()
2279    // because in Win64 mxcsr is not saved there
2280    if (MxCsr != ctx->MxCsr) {
2281      ctx->MxCsr = MxCsr;
2282      return EXCEPTION_CONTINUE_EXECUTION;
2283    }
2284 #endif // !_WIN64
2285 
2286   return EXCEPTION_CONTINUE_SEARCH;
2287 }
2288 
2289 // Fatal error reporting is single threaded so we can make this a
2290 // static and preallocated.  If it's more than MAX_PATH silently ignore
2291 // it.
2292 static char saved_error_file[MAX_PATH] = {0};
2293 
2294 void os::set_error_file(const char *logfile) {
2295   if (strlen(logfile) <= MAX_PATH) {
2296     strncpy(saved_error_file, logfile, MAX_PATH);
2297   }
2298 }
2299 
2300 static inline void report_error(Thread* t, DWORD exception_code,
2301                                 address addr, void* siginfo, void* context) {
2302   VMError err(t, exception_code, addr, siginfo, context);
2303   err.report_and_die();
2304 
2305   // If UseOsErrorReporting, this will return here and save the error file
2306   // somewhere where we can find it in the minidump.
2307 }
2308 
2309 //-----------------------------------------------------------------------------
2310 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2311   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2312   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2313 #ifdef _M_IA64
2314   // On Itanium, we need the "precise pc", which has the slot number coded
2315   // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2316   address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2317   // Convert the pc to "Unix format", which has the slot number coded
2318   // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2319   // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2320   // information is saved in the Unix format.
2321   address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2322 #elif _M_AMD64
2323   address pc = (address) exceptionInfo->ContextRecord->Rip;
2324 #else
2325   address pc = (address) exceptionInfo->ContextRecord->Eip;
2326 #endif
2327   Thread* t = ThreadLocalStorage::get_thread_slow();          // slow & steady
2328 
2329   // Handle SafeFetch32 and SafeFetchN exceptions.
2330   if (StubRoutines::is_safefetch_fault(pc)) {
2331     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2332   }
2333 
2334 #ifndef _WIN64
2335   // Execution protection violation - win32 running on AMD64 only
2336   // Handled first to avoid misdiagnosis as a "normal" access violation;
2337   // This is safe to do because we have a new/unique ExceptionInformation
2338   // code for this condition.
2339   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2340     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2341     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2342     address addr = (address) exceptionRecord->ExceptionInformation[1];
2343 
2344     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2345       int page_size = os::vm_page_size();
2346 
2347       // Make sure the pc and the faulting address are sane.
2348       //
2349       // If an instruction spans a page boundary, and the page containing
2350       // the beginning of the instruction is executable but the following
2351       // page is not, the pc and the faulting address might be slightly
2352       // different - we still want to unguard the 2nd page in this case.
2353       //
2354       // 15 bytes seems to be a (very) safe value for max instruction size.
2355       bool pc_is_near_addr =
2356         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2357       bool instr_spans_page_boundary =
2358         (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2359                          (intptr_t) page_size) > 0);
2360 
2361       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2362         static volatile address last_addr =
2363           (address) os::non_memory_address_word();
2364 
2365         // In conservative mode, don't unguard unless the address is in the VM
2366         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2367             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2368 
2369           // Set memory to RWX and retry
2370           address page_start =
2371             (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2372           bool res = os::protect_memory((char*) page_start, page_size,
2373                                         os::MEM_PROT_RWX);
2374 
2375           if (PrintMiscellaneous && Verbose) {
2376             char buf[256];
2377             jio_snprintf(buf, sizeof(buf), "Execution protection violation "
2378                          "at " INTPTR_FORMAT
2379                          ", unguarding " INTPTR_FORMAT ": %s", addr,
2380                          page_start, (res ? "success" : strerror(errno)));
2381             tty->print_raw_cr(buf);
2382           }
2383 
2384           // Set last_addr so if we fault again at the same address, we don't
2385           // end up in an endless loop.
2386           //
2387           // There are two potential complications here.  Two threads trapping
2388           // at the same address at the same time could cause one of the
2389           // threads to think it already unguarded, and abort the VM.  Likely
2390           // very rare.
2391           //
2392           // The other race involves two threads alternately trapping at
2393           // different addresses and failing to unguard the page, resulting in
2394           // an endless loop.  This condition is probably even more unlikely
2395           // than the first.
2396           //
2397           // Although both cases could be avoided by using locks or thread
2398           // local last_addr, these solutions are unnecessary complication:
2399           // this handler is a best-effort safety net, not a complete solution.
2400           // It is disabled by default and should only be used as a workaround
2401           // in case we missed any no-execute-unsafe VM code.
2402 
2403           last_addr = addr;
2404 
2405           return EXCEPTION_CONTINUE_EXECUTION;
2406         }
2407       }
2408 
2409       // Last unguard failed or not unguarding
2410       tty->print_raw_cr("Execution protection violation");
2411       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2412                    exceptionInfo->ContextRecord);
2413       return EXCEPTION_CONTINUE_SEARCH;
2414     }
2415   }
2416 #endif // _WIN64
2417 
2418   // Check to see if we caught the safepoint code in the
2419   // process of write protecting the memory serialization page.
2420   // It write enables the page immediately after protecting it
2421   // so just return.
2422   if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
2423     JavaThread* thread = (JavaThread*) t;
2424     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2425     address addr = (address) exceptionRecord->ExceptionInformation[1];
2426     if ( os::is_memory_serialize_page(thread, addr) ) {
2427       // Block current thread until the memory serialize page permission restored.
2428       os::block_on_serialize_page_trap();
2429       return EXCEPTION_CONTINUE_EXECUTION;
2430     }
2431   }
2432 
2433   if (t != NULL && t->is_Java_thread()) {
2434     JavaThread* thread = (JavaThread*) t;
2435     bool in_java = thread->thread_state() == _thread_in_Java;
2436 
2437     // Handle potential stack overflows up front.
2438     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2439       if (os::uses_stack_guard_pages()) {
2440 #ifdef _M_IA64
2441         // Use guard page for register stack.
2442         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2443         address addr = (address) exceptionRecord->ExceptionInformation[1];
2444         // Check for a register stack overflow on Itanium
2445         if (thread->addr_inside_register_stack_red_zone(addr)) {
2446           // Fatal red zone violation happens if the Java program
2447           // catches a StackOverflow error and does so much processing
2448           // that it runs beyond the unprotected yellow guard zone. As
2449           // a result, we are out of here.
2450           fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2451         } else if(thread->addr_inside_register_stack(addr)) {
2452           // Disable the yellow zone which sets the state that
2453           // we've got a stack overflow problem.
2454           if (thread->stack_yellow_zone_enabled()) {
2455             thread->disable_stack_yellow_zone();
2456           }
2457           // Give us some room to process the exception.
2458           thread->disable_register_stack_guard();
2459           // Tracing with +Verbose.
2460           if (Verbose) {
2461             tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2462             tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2463             tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2464             tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2465                           thread->register_stack_base(),
2466                           thread->register_stack_base() + thread->stack_size());
2467           }
2468 
2469           // Reguard the permanent register stack red zone just to be sure.
2470           // We saw Windows silently disabling this without telling us.
2471           thread->enable_register_stack_red_zone();
2472 
2473           return Handle_Exception(exceptionInfo,
2474             SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2475         }
2476 #endif
2477         if (thread->stack_yellow_zone_enabled()) {
2478           // Yellow zone violation.  The o/s has unprotected the first yellow
2479           // zone page for us.  Note:  must call disable_stack_yellow_zone to
2480           // update the enabled status, even if the zone contains only one page.
2481           thread->disable_stack_yellow_zone();
2482           // If not in java code, return and hope for the best.
2483           return in_java ? Handle_Exception(exceptionInfo,
2484             SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2485             :  EXCEPTION_CONTINUE_EXECUTION;
2486         } else {
2487           // Fatal red zone violation.
2488           thread->disable_stack_red_zone();
2489           tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2490           report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2491                        exceptionInfo->ContextRecord);
2492           return EXCEPTION_CONTINUE_SEARCH;
2493         }
2494       } else if (in_java) {
2495         // JVM-managed guard pages cannot be used on win95/98.  The o/s provides
2496         // a one-time-only guard page, which it has released to us.  The next
2497         // stack overflow on this thread will result in an ACCESS_VIOLATION.
2498         return Handle_Exception(exceptionInfo,
2499           SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2500       } else {
2501         // Can only return and hope for the best.  Further stack growth will
2502         // result in an ACCESS_VIOLATION.
2503         return EXCEPTION_CONTINUE_EXECUTION;
2504       }
2505     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2506       // Either stack overflow or null pointer exception.
2507       if (in_java) {
2508         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2509         address addr = (address) exceptionRecord->ExceptionInformation[1];
2510         address stack_end = thread->stack_base() - thread->stack_size();
2511         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2512           // Stack overflow.
2513           assert(!os::uses_stack_guard_pages(),
2514             "should be caught by red zone code above.");
2515           return Handle_Exception(exceptionInfo,
2516             SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2517         }
2518         //
2519         // Check for safepoint polling and implicit null
2520         // We only expect null pointers in the stubs (vtable)
2521         // the rest are checked explicitly now.
2522         //
2523         CodeBlob* cb = CodeCache::find_blob(pc);
2524         if (cb != NULL) {
2525           if (os::is_poll_address(addr)) {
2526             address stub = SharedRuntime::get_poll_stub(pc);
2527             return Handle_Exception(exceptionInfo, stub);
2528           }
2529         }
2530         {
2531 #ifdef _WIN64
2532           //
2533           // If it's a legal stack address map the entire region in
2534           //
2535           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2536           address addr = (address) exceptionRecord->ExceptionInformation[1];
2537           if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) {
2538                   addr = (address)((uintptr_t)addr &
2539                          (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2540                   os::commit_memory((char *)addr, thread->stack_base() - addr,
2541                                     !ExecMem);
2542                   return EXCEPTION_CONTINUE_EXECUTION;
2543           }
2544           else
2545 #endif
2546           {
2547             // Null pointer exception.
2548 #ifdef _M_IA64
2549             // Process implicit null checks in compiled code. Note: Implicit null checks
2550             // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2551             if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2552               CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2553               // Handle implicit null check in UEP method entry
2554               if (cb && (cb->is_frame_complete_at(pc) ||
2555                          (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2556                 if (Verbose) {
2557                   intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2558                   tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2559                   tty->print_cr("      to addr " INTPTR_FORMAT, addr);
2560                   tty->print_cr("      bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2561                                 *(bundle_start + 1), *bundle_start);
2562                 }
2563                 return Handle_Exception(exceptionInfo,
2564                   SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2565               }
2566             }
2567 
2568             // Implicit null checks were processed above.  Hence, we should not reach
2569             // here in the usual case => die!
2570             if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2571             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2572                          exceptionInfo->ContextRecord);
2573             return EXCEPTION_CONTINUE_SEARCH;
2574 
2575 #else // !IA64
2576 
2577             // Windows 98 reports faulting addresses incorrectly
2578             if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
2579                 !os::win32::is_nt()) {
2580               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2581               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2582             }
2583             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2584                          exceptionInfo->ContextRecord);
2585             return EXCEPTION_CONTINUE_SEARCH;
2586 #endif
2587           }
2588         }
2589       }
2590 
2591 #ifdef _WIN64
2592       // Special care for fast JNI field accessors.
2593       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2594       // in and the heap gets shrunk before the field access.
2595       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2596         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2597         if (addr != (address)-1) {
2598           return Handle_Exception(exceptionInfo, addr);
2599         }
2600       }
2601 #endif
2602 
2603       // Stack overflow or null pointer exception in native code.
2604       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2605                    exceptionInfo->ContextRecord);
2606       return EXCEPTION_CONTINUE_SEARCH;
2607     } // /EXCEPTION_ACCESS_VIOLATION
2608     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2609 #if defined _M_IA64
2610     else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2611               exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2612       M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2613 
2614       // Compiled method patched to be non entrant? Following conditions must apply:
2615       // 1. must be first instruction in bundle
2616       // 2. must be a break instruction with appropriate code
2617       if((((uint64_t) pc & 0x0F) == 0) &&
2618          (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2619         return Handle_Exception(exceptionInfo,
2620                                 (address)SharedRuntime::get_handle_wrong_method_stub());
2621       }
2622     } // /EXCEPTION_ILLEGAL_INSTRUCTION
2623 #endif
2624 
2625 
2626     if (in_java) {
2627       switch (exception_code) {
2628       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2629         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2630 
2631       case EXCEPTION_INT_OVERFLOW:
2632         return Handle_IDiv_Exception(exceptionInfo);
2633 
2634       } // switch
2635     }
2636     if (((thread->thread_state() == _thread_in_Java) ||
2637         (thread->thread_state() == _thread_in_native)) &&
2638         exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION)
2639     {
2640       LONG result=Handle_FLT_Exception(exceptionInfo);
2641       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2642     }
2643   }
2644 
2645   if (exception_code != EXCEPTION_BREAKPOINT) {
2646     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2647                  exceptionInfo->ContextRecord);
2648   }
2649   return EXCEPTION_CONTINUE_SEARCH;
2650 }
2651 
2652 #ifndef _WIN64
2653 // Special care for fast JNI accessors.
2654 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2655 // the heap gets shrunk before the field access.
2656 // Need to install our own structured exception handler since native code may
2657 // install its own.
2658 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2659   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2660   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2661     address pc = (address) exceptionInfo->ContextRecord->Eip;
2662     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2663     if (addr != (address)-1) {
2664       return Handle_Exception(exceptionInfo, addr);
2665     }
2666   }
2667   return EXCEPTION_CONTINUE_SEARCH;
2668 }
2669 
2670 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \
2671 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \
2672   __try { \
2673     return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \
2674   } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \
2675   } \
2676   return 0; \
2677 }
2678 
2679 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2680 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2681 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2682 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2683 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2684 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2685 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2686 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2687 
2688 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2689   switch (type) {
2690     case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2691     case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2692     case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2693     case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2694     case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2695     case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2696     case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2697     case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2698     default:        ShouldNotReachHere();
2699   }
2700   return (address)-1;
2701 }
2702 #endif
2703 
2704 #ifndef PRODUCT
2705 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
2706   // Install a win32 structured exception handler around the test
2707   // function call so the VM can generate an error dump if needed.
2708   __try {
2709     (*funcPtr)();
2710   } __except(topLevelExceptionFilter(
2711              (_EXCEPTION_POINTERS*)_exception_info())) {
2712     // Nothing to do.
2713   }
2714 }
2715 #endif
2716 
2717 // Virtual Memory
2718 
2719 int os::vm_page_size() { return os::win32::vm_page_size(); }
2720 int os::vm_allocation_granularity() {
2721   return os::win32::vm_allocation_granularity();
2722 }
2723 
2724 // Windows large page support is available on Windows 2003. In order to use
2725 // large page memory, the administrator must first assign additional privilege
2726 // to the user:
2727 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2728 //   + select Local Policies -> User Rights Assignment
2729 //   + double click "Lock pages in memory", add users and/or groups
2730 //   + reboot
2731 // Note the above steps are needed for administrator as well, as administrators
2732 // by default do not have the privilege to lock pages in memory.
2733 //
2734 // Note about Windows 2003: although the API supports committing large page
2735 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2736 // scenario, I found through experiment it only uses large page if the entire
2737 // memory region is reserved and committed in a single VirtualAlloc() call.
2738 // This makes Windows large page support more or less like Solaris ISM, in
2739 // that the entire heap must be committed upfront. This probably will change
2740 // in the future, if so the code below needs to be revisited.
2741 
2742 #ifndef MEM_LARGE_PAGES
2743 #define MEM_LARGE_PAGES 0x20000000
2744 #endif
2745 
2746 static HANDLE    _hProcess;
2747 static HANDLE    _hToken;
2748 
2749 // Container for NUMA node list info
2750 class NUMANodeListHolder {
2751 private:
2752   int *_numa_used_node_list;  // allocated below
2753   int _numa_used_node_count;
2754 
2755   void free_node_list() {
2756     if (_numa_used_node_list != NULL) {
2757       FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal);
2758     }
2759   }
2760 
2761 public:
2762   NUMANodeListHolder() {
2763     _numa_used_node_count = 0;
2764     _numa_used_node_list = NULL;
2765     // do rest of initialization in build routine (after function pointers are set up)
2766   }
2767 
2768   ~NUMANodeListHolder() {
2769     free_node_list();
2770   }
2771 
2772   bool build() {
2773     DWORD_PTR proc_aff_mask;
2774     DWORD_PTR sys_aff_mask;
2775     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2776     ULONG highest_node_number;
2777     if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false;
2778     free_node_list();
2779     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2780     for (unsigned int i = 0; i <= highest_node_number; i++) {
2781       ULONGLONG proc_mask_numa_node;
2782       if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2783       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2784         _numa_used_node_list[_numa_used_node_count++] = i;
2785       }
2786     }
2787     return (_numa_used_node_count > 1);
2788   }
2789 
2790   int get_count() {return _numa_used_node_count;}
2791   int get_node_list_entry(int n) {
2792     // for indexes out of range, returns -1
2793     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2794   }
2795 
2796 } numa_node_list_holder;
2797 
2798 
2799 
2800 static size_t _large_page_size = 0;
2801 
2802 static bool resolve_functions_for_large_page_init() {
2803   return os::Kernel32Dll::GetLargePageMinimumAvailable() &&
2804     os::Advapi32Dll::AdvapiAvailable();
2805 }
2806 
2807 static bool request_lock_memory_privilege() {
2808   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2809                                 os::current_process_id());
2810 
2811   LUID luid;
2812   if (_hProcess != NULL &&
2813       os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2814       os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2815 
2816     TOKEN_PRIVILEGES tp;
2817     tp.PrivilegeCount = 1;
2818     tp.Privileges[0].Luid = luid;
2819     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2820 
2821     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2822     // privilege. Check GetLastError() too. See MSDN document.
2823     if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2824         (GetLastError() == ERROR_SUCCESS)) {
2825       return true;
2826     }
2827   }
2828 
2829   return false;
2830 }
2831 
2832 static void cleanup_after_large_page_init() {
2833   if (_hProcess) CloseHandle(_hProcess);
2834   _hProcess = NULL;
2835   if (_hToken) CloseHandle(_hToken);
2836   _hToken = NULL;
2837 }
2838 
2839 static bool numa_interleaving_init() {
2840   bool success = false;
2841   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2842 
2843   // print a warning if UseNUMAInterleaving flag is specified on command line
2844   bool warn_on_failure = use_numa_interleaving_specified;
2845 # define WARN(msg) if (warn_on_failure) { warning(msg); }
2846 
2847   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2848   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2849   NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2850 
2851   if (os::Kernel32Dll::NumaCallsAvailable()) {
2852     if (numa_node_list_holder.build()) {
2853       if (PrintMiscellaneous && Verbose) {
2854         tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2855         for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2856           tty->print("%d ", numa_node_list_holder.get_node_list_entry(i));
2857         }
2858         tty->print("\n");
2859       }
2860       success = true;
2861     } else {
2862       WARN("Process does not cover multiple NUMA nodes.");
2863     }
2864   } else {
2865     WARN("NUMA Interleaving is not supported by the operating system.");
2866   }
2867   if (!success) {
2868     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2869   }
2870   return success;
2871 #undef WARN
2872 }
2873 
2874 // this routine is used whenever we need to reserve a contiguous VA range
2875 // but we need to make separate VirtualAlloc calls for each piece of the range
2876 // Reasons for doing this:
2877 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2878 //  * UseNUMAInterleaving requires a separate node for each piece
2879 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot,
2880                                          bool should_inject_error=false) {
2881   char * p_buf;
2882   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2883   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2884   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2885 
2886   // first reserve enough address space in advance since we want to be
2887   // able to break a single contiguous virtual address range into multiple
2888   // large page commits but WS2003 does not allow reserving large page space
2889   // so we just use 4K pages for reserve, this gives us a legal contiguous
2890   // address space. then we will deallocate that reservation, and re alloc
2891   // using large pages
2892   const size_t size_of_reserve = bytes + chunk_size;
2893   if (bytes > size_of_reserve) {
2894     // Overflowed.
2895     return NULL;
2896   }
2897   p_buf = (char *) VirtualAlloc(addr,
2898                                 size_of_reserve,  // size of Reserve
2899                                 MEM_RESERVE,
2900                                 PAGE_READWRITE);
2901   // If reservation failed, return NULL
2902   if (p_buf == NULL) return NULL;
2903   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
2904   os::release_memory(p_buf, bytes + chunk_size);
2905 
2906   // we still need to round up to a page boundary (in case we are using large pages)
2907   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2908   // instead we handle this in the bytes_to_rq computation below
2909   p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2910 
2911   // now go through and allocate one chunk at a time until all bytes are
2912   // allocated
2913   size_t  bytes_remaining = bytes;
2914   // An overflow of align_size_up() would have been caught above
2915   // in the calculation of size_of_reserve.
2916   char * next_alloc_addr = p_buf;
2917   HANDLE hProc = GetCurrentProcess();
2918 
2919 #ifdef ASSERT
2920   // Variable for the failure injection
2921   long ran_num = os::random();
2922   size_t fail_after = ran_num % bytes;
2923 #endif
2924 
2925   int count=0;
2926   while (bytes_remaining) {
2927     // select bytes_to_rq to get to the next chunk_size boundary
2928 
2929     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2930     // Note allocate and commit
2931     char * p_new;
2932 
2933 #ifdef ASSERT
2934     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2935 #else
2936     const bool inject_error_now = false;
2937 #endif
2938 
2939     if (inject_error_now) {
2940       p_new = NULL;
2941     } else {
2942       if (!UseNUMAInterleaving) {
2943         p_new = (char *) VirtualAlloc(next_alloc_addr,
2944                                       bytes_to_rq,
2945                                       flags,
2946                                       prot);
2947       } else {
2948         // get the next node to use from the used_node_list
2949         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2950         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2951         p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc,
2952                                                             next_alloc_addr,
2953                                                             bytes_to_rq,
2954                                                             flags,
2955                                                             prot,
2956                                                             node);
2957       }
2958     }
2959 
2960     if (p_new == NULL) {
2961       // Free any allocated pages
2962       if (next_alloc_addr > p_buf) {
2963         // Some memory was committed so release it.
2964         size_t bytes_to_release = bytes - bytes_remaining;
2965         // NMT has yet to record any individual blocks, so it
2966         // need to create a dummy 'reserve' record to match
2967         // the release.
2968         MemTracker::record_virtual_memory_reserve((address)p_buf,
2969           bytes_to_release, mtNone, CALLER_PC);
2970         os::release_memory(p_buf, bytes_to_release);
2971       }
2972 #ifdef ASSERT
2973       if (should_inject_error) {
2974         if (TracePageSizes && Verbose) {
2975           tty->print_cr("Reserving pages individually failed.");
2976         }
2977       }
2978 #endif
2979       return NULL;
2980     }
2981 
2982     bytes_remaining -= bytes_to_rq;
2983     next_alloc_addr += bytes_to_rq;
2984     count++;
2985   }
2986   // Although the memory is allocated individually, it is returned as one.
2987   // NMT records it as one block.
2988   address pc = CALLER_PC;
2989   if ((flags & MEM_COMMIT) != 0) {
2990     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
2991   } else {
2992     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
2993   }
2994 
2995   // made it this far, success
2996   return p_buf;
2997 }
2998 
2999 
3000 
3001 void os::large_page_init() {
3002   if (!UseLargePages) return;
3003 
3004   // print a warning if any large page related flag is specified on command line
3005   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3006                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3007   bool success = false;
3008 
3009 # define WARN(msg) if (warn_on_failure) { warning(msg); }
3010   if (resolve_functions_for_large_page_init()) {
3011     if (request_lock_memory_privilege()) {
3012       size_t s = os::Kernel32Dll::GetLargePageMinimum();
3013       if (s) {
3014 #if defined(IA32) || defined(AMD64)
3015         if (s > 4*M || LargePageSizeInBytes > 4*M) {
3016           WARN("JVM cannot use large pages bigger than 4mb.");
3017         } else {
3018 #endif
3019           if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3020             _large_page_size = LargePageSizeInBytes;
3021           } else {
3022             _large_page_size = s;
3023           }
3024           success = true;
3025 #if defined(IA32) || defined(AMD64)
3026         }
3027 #endif
3028       } else {
3029         WARN("Large page is not supported by the processor.");
3030       }
3031     } else {
3032       WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3033     }
3034   } else {
3035     WARN("Large page is not supported by the operating system.");
3036   }
3037 #undef WARN
3038 
3039   const size_t default_page_size = (size_t) vm_page_size();
3040   if (success && _large_page_size > default_page_size) {
3041     _page_sizes[0] = _large_page_size;
3042     _page_sizes[1] = default_page_size;
3043     _page_sizes[2] = 0;
3044   }
3045 
3046   cleanup_after_large_page_init();
3047   UseLargePages = success;
3048 }
3049 
3050 // On win32, one cannot release just a part of reserved memory, it's an
3051 // all or nothing deal.  When we split a reservation, we must break the
3052 // reservation into two reservations.
3053 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3054                               bool realloc) {
3055   if (size > 0) {
3056     release_memory(base, size);
3057     if (realloc) {
3058       reserve_memory(split, base);
3059     }
3060     if (size != split) {
3061       reserve_memory(size - split, base + split);
3062     }
3063   }
3064 }
3065 
3066 // Multiple threads can race in this code but it's not possible to unmap small sections of
3067 // virtual space to get requested alignment, like posix-like os's.
3068 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3069 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3070   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3071       "Alignment must be a multiple of allocation granularity (page size)");
3072   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3073 
3074   size_t extra_size = size + alignment;
3075   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3076 
3077   char* aligned_base = NULL;
3078 
3079   do {
3080     char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3081     if (extra_base == NULL) {
3082       return NULL;
3083     }
3084     // Do manual alignment
3085     aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3086 
3087     os::release_memory(extra_base, extra_size);
3088 
3089     aligned_base = os::reserve_memory(size, aligned_base);
3090 
3091   } while (aligned_base == NULL);
3092 
3093   return aligned_base;
3094 }
3095 
3096 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3097   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3098          "reserve alignment");
3099   assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
3100   char* res;
3101   // note that if UseLargePages is on, all the areas that require interleaving
3102   // will go thru reserve_memory_special rather than thru here.
3103   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3104   if (!use_individual) {
3105     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3106   } else {
3107     elapsedTimer reserveTimer;
3108     if( Verbose && PrintMiscellaneous ) reserveTimer.start();
3109     // in numa interleaving, we have to allocate pages individually
3110     // (well really chunks of NUMAInterleaveGranularity size)
3111     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3112     if (res == NULL) {
3113       warning("NUMA page allocation failed");
3114     }
3115     if( Verbose && PrintMiscellaneous ) {
3116       reserveTimer.stop();
3117       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3118                     reserveTimer.milliseconds(), reserveTimer.ticks());
3119     }
3120   }
3121   assert(res == NULL || addr == NULL || addr == res,
3122          "Unexpected address from reserve.");
3123 
3124   return res;
3125 }
3126 
3127 // Reserve memory at an arbitrary address, only if that area is
3128 // available (and not reserved for something else).
3129 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3130   // Windows os::reserve_memory() fails of the requested address range is
3131   // not avilable.
3132   return reserve_memory(bytes, requested_addr);
3133 }
3134 
3135 size_t os::large_page_size() {
3136   return _large_page_size;
3137 }
3138 
3139 bool os::can_commit_large_page_memory() {
3140   // Windows only uses large page memory when the entire region is reserved
3141   // and committed in a single VirtualAlloc() call. This may change in the
3142   // future, but with Windows 2003 it's not possible to commit on demand.
3143   return false;
3144 }
3145 
3146 bool os::can_execute_large_page_memory() {
3147   return true;
3148 }
3149 
3150 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) {
3151   assert(UseLargePages, "only for large pages");
3152 
3153   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3154     return NULL; // Fallback to small pages.
3155   }
3156 
3157   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3158   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3159 
3160   // with large pages, there are two cases where we need to use Individual Allocation
3161   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3162   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3163   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3164     if (TracePageSizes && Verbose) {
3165        tty->print_cr("Reserving large pages individually.");
3166     }
3167     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3168     if (p_buf == NULL) {
3169       // give an appropriate warning message
3170       if (UseNUMAInterleaving) {
3171         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3172       }
3173       if (UseLargePagesIndividualAllocation) {
3174         warning("Individually allocated large pages failed, "
3175                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3176       }
3177       return NULL;
3178     }
3179 
3180     return p_buf;
3181 
3182   } else {
3183     if (TracePageSizes && Verbose) {
3184        tty->print_cr("Reserving large pages in a single large chunk.");
3185     }
3186     // normal policy just allocate it all at once
3187     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3188     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3189     if (res != NULL) {
3190       address pc = CALLER_PC;
3191       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
3192     }
3193 
3194     return res;
3195   }
3196 }
3197 
3198 bool os::release_memory_special(char* base, size_t bytes) {
3199   assert(base != NULL, "Sanity check");
3200   return release_memory(base, bytes);
3201 }
3202 
3203 void os::print_statistics() {
3204 }
3205 
3206 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3207   int err = os::get_last_error();
3208   char buf[256];
3209   size_t buf_len = os::lasterror(buf, sizeof(buf));
3210   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3211           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3212           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3213 }
3214 
3215 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3216   if (bytes == 0) {
3217     // Don't bother the OS with noops.
3218     return true;
3219   }
3220   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3221   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3222   // Don't attempt to print anything if the OS call fails. We're
3223   // probably low on resources, so the print itself may cause crashes.
3224 
3225   // unless we have NUMAInterleaving enabled, the range of a commit
3226   // is always within a reserve covered by a single VirtualAlloc
3227   // in that case we can just do a single commit for the requested size
3228   if (!UseNUMAInterleaving) {
3229     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3230       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3231       return false;
3232     }
3233     if (exec) {
3234       DWORD oldprot;
3235       // Windows doc says to use VirtualProtect to get execute permissions
3236       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3237         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3238         return false;
3239       }
3240     }
3241     return true;
3242   } else {
3243 
3244     // when NUMAInterleaving is enabled, the commit might cover a range that
3245     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3246     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3247     // returns represents the number of bytes that can be committed in one step.
3248     size_t bytes_remaining = bytes;
3249     char * next_alloc_addr = addr;
3250     while (bytes_remaining > 0) {
3251       MEMORY_BASIC_INFORMATION alloc_info;
3252       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3253       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3254       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3255                        PAGE_READWRITE) == NULL) {
3256         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3257                                             exec);)
3258         return false;
3259       }
3260       if (exec) {
3261         DWORD oldprot;
3262         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3263                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3264           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3265                                               exec);)
3266           return false;
3267         }
3268       }
3269       bytes_remaining -= bytes_to_rq;
3270       next_alloc_addr += bytes_to_rq;
3271     }
3272   }
3273   // if we made it this far, return true
3274   return true;
3275 }
3276 
3277 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3278                        bool exec) {
3279   // alignment_hint is ignored on this OS
3280   return pd_commit_memory(addr, size, exec);
3281 }
3282 
3283 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3284                                   const char* mesg) {
3285   assert(mesg != NULL, "mesg must be specified");
3286   if (!pd_commit_memory(addr, size, exec)) {
3287     warn_fail_commit_memory(addr, size, exec);
3288     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
3289   }
3290 }
3291 
3292 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3293                                   size_t alignment_hint, bool exec,
3294                                   const char* mesg) {
3295   // alignment_hint is ignored on this OS
3296   pd_commit_memory_or_exit(addr, size, exec, mesg);
3297 }
3298 
3299 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3300   if (bytes == 0) {
3301     // Don't bother the OS with noops.
3302     return true;
3303   }
3304   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3305   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3306   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3307 }
3308 
3309 bool os::pd_release_memory(char* addr, size_t bytes) {
3310   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3311 }
3312 
3313 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3314   return os::commit_memory(addr, size, !ExecMem);
3315 }
3316 
3317 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3318   return os::uncommit_memory(addr, size);
3319 }
3320 
3321 // Set protections specified
3322 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3323                         bool is_committed) {
3324   unsigned int p = 0;
3325   switch (prot) {
3326   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3327   case MEM_PROT_READ: p = PAGE_READONLY; break;
3328   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3329   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3330   default:
3331     ShouldNotReachHere();
3332   }
3333 
3334   DWORD old_status;
3335 
3336   // Strange enough, but on Win32 one can change protection only for committed
3337   // memory, not a big deal anyway, as bytes less or equal than 64K
3338   if (!is_committed) {
3339     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3340                           "cannot commit protection page");
3341   }
3342   // One cannot use os::guard_memory() here, as on Win32 guard page
3343   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3344   //
3345   // Pages in the region become guard pages. Any attempt to access a guard page
3346   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3347   // the guard page status. Guard pages thus act as a one-time access alarm.
3348   return VirtualProtect(addr, bytes, p, &old_status) != 0;
3349 }
3350 
3351 bool os::guard_memory(char* addr, size_t bytes) {
3352   DWORD old_status;
3353   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3354 }
3355 
3356 bool os::unguard_memory(char* addr, size_t bytes) {
3357   DWORD old_status;
3358   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3359 }
3360 
3361 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3362 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3363 void os::numa_make_global(char *addr, size_t bytes)    { }
3364 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3365 bool os::numa_topology_changed()                       { return false; }
3366 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3367 int os::numa_get_group_id()                            { return 0; }
3368 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3369   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3370     // Provide an answer for UMA systems
3371     ids[0] = 0;
3372     return 1;
3373   } else {
3374     // check for size bigger than actual groups_num
3375     size = MIN2(size, numa_get_groups_num());
3376     for (int i = 0; i < (int)size; i++) {
3377       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3378     }
3379     return size;
3380   }
3381 }
3382 
3383 bool os::get_page_info(char *start, page_info* info) {
3384   return false;
3385 }
3386 
3387 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
3388   return end;
3389 }
3390 
3391 char* os::non_memory_address_word() {
3392   // Must never look like an address returned by reserve_memory,
3393   // even in its subfields (as defined by the CPU immediate fields,
3394   // if the CPU splits constants across multiple instructions).
3395   return (char*)-1;
3396 }
3397 
3398 #define MAX_ERROR_COUNT 100
3399 #define SYS_THREAD_ERROR 0xffffffffUL
3400 
3401 void os::pd_start_thread(Thread* thread) {
3402   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3403   // Returns previous suspend state:
3404   // 0:  Thread was not suspended
3405   // 1:  Thread is running now
3406   // >1: Thread is still suspended.
3407   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3408 }
3409 
3410 class HighResolutionInterval : public CHeapObj<mtThread> {
3411   // The default timer resolution seems to be 10 milliseconds.
3412   // (Where is this written down?)
3413   // If someone wants to sleep for only a fraction of the default,
3414   // then we set the timer resolution down to 1 millisecond for
3415   // the duration of their interval.
3416   // We carefully set the resolution back, since otherwise we
3417   // seem to incur an overhead (3%?) that we don't need.
3418   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3419   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3420   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3421   // timeBeginPeriod() if the relative error exceeded some threshold.
3422   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3423   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
3424   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3425   // resolution timers running.
3426 private:
3427     jlong resolution;
3428 public:
3429   HighResolutionInterval(jlong ms) {
3430     resolution = ms % 10L;
3431     if (resolution != 0) {
3432       MMRESULT result = timeBeginPeriod(1L);
3433     }
3434   }
3435   ~HighResolutionInterval() {
3436     if (resolution != 0) {
3437       MMRESULT result = timeEndPeriod(1L);
3438     }
3439     resolution = 0L;
3440   }
3441 };
3442 
3443 int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3444   jlong limit = (jlong) MAXDWORD;
3445 
3446   while(ms > limit) {
3447     int res;
3448     if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT)
3449       return res;
3450     ms -= limit;
3451   }
3452 
3453   assert(thread == Thread::current(),  "thread consistency check");
3454   OSThread* osthread = thread->osthread();
3455   OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3456   int result;
3457   if (interruptable) {
3458     assert(thread->is_Java_thread(), "must be java thread");
3459     JavaThread *jt = (JavaThread *) thread;
3460     ThreadBlockInVM tbivm(jt);
3461 
3462     jt->set_suspend_equivalent();
3463     // cleared by handle_special_suspend_equivalent_condition() or
3464     // java_suspend_self() via check_and_wait_while_suspended()
3465 
3466     HANDLE events[1];
3467     events[0] = osthread->interrupt_event();
3468     HighResolutionInterval *phri=NULL;
3469     if(!ForceTimeHighResolution)
3470       phri = new HighResolutionInterval( ms );
3471     if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3472       result = OS_TIMEOUT;
3473     } else {
3474       ResetEvent(osthread->interrupt_event());
3475       osthread->set_interrupted(false);
3476       result = OS_INTRPT;
3477     }
3478     delete phri; //if it is NULL, harmless
3479 
3480     // were we externally suspended while we were waiting?
3481     jt->check_and_wait_while_suspended();
3482   } else {
3483     assert(!thread->is_Java_thread(), "must not be java thread");
3484     Sleep((long) ms);
3485     result = OS_TIMEOUT;
3486   }
3487   return result;
3488 }
3489 
3490 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3491 void os::infinite_sleep() {
3492   while (true) {    // sleep forever ...
3493     Sleep(100000);  // ... 100 seconds at a time
3494   }
3495 }
3496 
3497 typedef BOOL (WINAPI * STTSignature)(void) ;
3498 
3499 os::YieldResult os::NakedYield() {
3500   // Use either SwitchToThread() or Sleep(0)
3501   // Consider passing back the return value from SwitchToThread().
3502   if (os::Kernel32Dll::SwitchToThreadAvailable()) {
3503     return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ;
3504   } else {
3505     Sleep(0);
3506   }
3507   return os::YIELD_UNKNOWN ;
3508 }
3509 
3510 void os::yield() {  os::NakedYield(); }
3511 
3512 void os::yield_all(int attempts) {
3513   // Yields to all threads, including threads with lower priorities
3514   Sleep(1);
3515 }
3516 
3517 // Win32 only gives you access to seven real priorities at a time,
3518 // so we compress Java's ten down to seven.  It would be better
3519 // if we dynamically adjusted relative priorities.
3520 
3521 int os::java_to_os_priority[CriticalPriority + 1] = {
3522   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3523   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3524   THREAD_PRIORITY_LOWEST,                       // 2
3525   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3526   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3527   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3528   THREAD_PRIORITY_NORMAL,                       // 6
3529   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3530   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3531   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3532   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3533   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3534 };
3535 
3536 int prio_policy1[CriticalPriority + 1] = {
3537   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3538   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3539   THREAD_PRIORITY_LOWEST,                       // 2
3540   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3541   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3542   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3543   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3544   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3545   THREAD_PRIORITY_HIGHEST,                      // 8
3546   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3547   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3548   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3549 };
3550 
3551 static int prio_init() {
3552   // If ThreadPriorityPolicy is 1, switch tables
3553   if (ThreadPriorityPolicy == 1) {
3554     int i;
3555     for (i = 0; i < CriticalPriority + 1; i++) {
3556       os::java_to_os_priority[i] = prio_policy1[i];
3557     }
3558   }
3559   if (UseCriticalJavaThreadPriority) {
3560     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ;
3561   }
3562   return 0;
3563 }
3564 
3565 OSReturn os::set_native_priority(Thread* thread, int priority) {
3566   if (!UseThreadPriorities) return OS_OK;
3567   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3568   return ret ? OS_OK : OS_ERR;
3569 }
3570 
3571 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) {
3572   if ( !UseThreadPriorities ) {
3573     *priority_ptr = java_to_os_priority[NormPriority];
3574     return OS_OK;
3575   }
3576   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3577   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3578     assert(false, "GetThreadPriority failed");
3579     return OS_ERR;
3580   }
3581   *priority_ptr = os_prio;
3582   return OS_OK;
3583 }
3584 
3585 
3586 // Hint to the underlying OS that a task switch would not be good.
3587 // Void return because it's a hint and can fail.
3588 void os::hint_no_preempt() {}
3589 
3590 void os::interrupt(Thread* thread) {
3591   assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3592          "possibility of dangling Thread pointer");
3593 
3594   OSThread* osthread = thread->osthread();
3595   osthread->set_interrupted(true);
3596   // More than one thread can get here with the same value of osthread,
3597   // resulting in multiple notifications.  We do, however, want the store
3598   // to interrupted() to be visible to other threads before we post
3599   // the interrupt event.
3600   OrderAccess::release();
3601   SetEvent(osthread->interrupt_event());
3602   // For JSR166:  unpark after setting status
3603   if (thread->is_Java_thread())
3604     ((JavaThread*)thread)->parker()->unpark();
3605 
3606   ParkEvent * ev = thread->_ParkEvent ;
3607   if (ev != NULL) ev->unpark() ;
3608 
3609 }
3610 
3611 
3612 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3613   assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3614          "possibility of dangling Thread pointer");
3615 
3616   OSThread* osthread = thread->osthread();
3617   bool interrupted = osthread->interrupted();
3618   // There is no synchronization between the setting of the interrupt
3619   // and it being cleared here. It is critical - see 6535709 - that
3620   // we only clear the interrupt state, and reset the interrupt event,
3621   // if we are going to report that we were indeed interrupted - else
3622   // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3623   // depending on the timing
3624   if (interrupted && clear_interrupted) {
3625     osthread->set_interrupted(false);
3626     ResetEvent(osthread->interrupt_event());
3627   } // Otherwise leave the interrupted state alone
3628 
3629   return interrupted;
3630 }
3631 
3632 // Get's a pc (hint) for a running thread. Currently used only for profiling.
3633 ExtendedPC os::get_thread_pc(Thread* thread) {
3634   CONTEXT context;
3635   context.ContextFlags = CONTEXT_CONTROL;
3636   HANDLE handle = thread->osthread()->thread_handle();
3637 #ifdef _M_IA64
3638   assert(0, "Fix get_thread_pc");
3639   return ExtendedPC(NULL);
3640 #else
3641   if (GetThreadContext(handle, &context)) {
3642 #ifdef _M_AMD64
3643     return ExtendedPC((address) context.Rip);
3644 #else
3645     return ExtendedPC((address) context.Eip);
3646 #endif
3647   } else {
3648     return ExtendedPC(NULL);
3649   }
3650 #endif
3651 }
3652 
3653 // GetCurrentThreadId() returns DWORD
3654 intx os::current_thread_id()          { return GetCurrentThreadId(); }
3655 
3656 static int _initial_pid = 0;
3657 
3658 int os::current_process_id()
3659 {
3660   return (_initial_pid ? _initial_pid : _getpid());
3661 }
3662 
3663 int    os::win32::_vm_page_size       = 0;
3664 int    os::win32::_vm_allocation_granularity = 0;
3665 int    os::win32::_processor_type     = 0;
3666 // Processor level is not available on non-NT systems, use vm_version instead
3667 int    os::win32::_processor_level    = 0;
3668 julong os::win32::_physical_memory    = 0;
3669 size_t os::win32::_default_stack_size = 0;
3670 
3671          intx os::win32::_os_thread_limit    = 0;
3672 volatile intx os::win32::_os_thread_count    = 0;
3673 
3674 bool   os::win32::_is_nt              = false;
3675 bool   os::win32::_is_windows_2003    = false;
3676 bool   os::win32::_is_windows_server  = false;
3677 
3678 void os::win32::initialize_system_info() {
3679   SYSTEM_INFO si;
3680   GetSystemInfo(&si);
3681   _vm_page_size    = si.dwPageSize;
3682   _vm_allocation_granularity = si.dwAllocationGranularity;
3683   _processor_type  = si.dwProcessorType;
3684   _processor_level = si.wProcessorLevel;
3685   set_processor_count(si.dwNumberOfProcessors);
3686 
3687   MEMORYSTATUSEX ms;
3688   ms.dwLength = sizeof(ms);
3689 
3690   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3691   // dwMemoryLoad (% of memory in use)
3692   GlobalMemoryStatusEx(&ms);
3693   _physical_memory = ms.ullTotalPhys;
3694 
3695   OSVERSIONINFOEX oi;
3696   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3697   GetVersionEx((OSVERSIONINFO*)&oi);
3698   switch(oi.dwPlatformId) {
3699     case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
3700     case VER_PLATFORM_WIN32_NT:
3701       _is_nt = true;
3702       {
3703         int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3704         if (os_vers == 5002) {
3705           _is_windows_2003 = true;
3706         }
3707         if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3708           oi.wProductType == VER_NT_SERVER) {
3709             _is_windows_server = true;
3710         }
3711       }
3712       break;
3713     default: fatal("Unknown platform");
3714   }
3715 
3716   _default_stack_size = os::current_stack_size();
3717   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3718   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3719     "stack size not a multiple of page size");
3720 
3721   initialize_performance_counter();
3722 
3723   // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is
3724   // known to deadlock the system, if the VM issues to thread operations with
3725   // a too high frequency, e.g., such as changing the priorities.
3726   // The 6000 seems to work well - no deadlocks has been notices on the test
3727   // programs that we have seen experience this problem.
3728   if (!os::win32::is_nt()) {
3729     StarvationMonitorInterval = 6000;
3730   }
3731 }
3732 
3733 
3734 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) {
3735   char path[MAX_PATH];
3736   DWORD size;
3737   DWORD pathLen = (DWORD)sizeof(path);
3738   HINSTANCE result = NULL;
3739 
3740   // only allow library name without path component
3741   assert(strchr(name, '\\') == NULL, "path not allowed");
3742   assert(strchr(name, ':') == NULL, "path not allowed");
3743   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3744     jio_snprintf(ebuf, ebuflen,
3745       "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3746     return NULL;
3747   }
3748 
3749   // search system directory
3750   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3751     strcat(path, "\\");
3752     strcat(path, name);
3753     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3754       return result;
3755     }
3756   }
3757 
3758   // try Windows directory
3759   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3760     strcat(path, "\\");
3761     strcat(path, name);
3762     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3763       return result;
3764     }
3765   }
3766 
3767   jio_snprintf(ebuf, ebuflen,
3768     "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3769   return NULL;
3770 }
3771 
3772 void os::win32::setmode_streams() {
3773   _setmode(_fileno(stdin), _O_BINARY);
3774   _setmode(_fileno(stdout), _O_BINARY);
3775   _setmode(_fileno(stderr), _O_BINARY);
3776 }
3777 
3778 
3779 bool os::is_debugger_attached() {
3780   return IsDebuggerPresent() ? true : false;
3781 }
3782 
3783 
3784 void os::wait_for_keypress_at_exit(void) {
3785   if (PauseAtExit) {
3786     fprintf(stderr, "Press any key to continue...\n");
3787     fgetc(stdin);
3788   }
3789 }
3790 
3791 
3792 int os::message_box(const char* title, const char* message) {
3793   int result = MessageBox(NULL, message, title,
3794                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3795   return result == IDYES;
3796 }
3797 
3798 int os::allocate_thread_local_storage() {
3799   return TlsAlloc();
3800 }
3801 
3802 
3803 void os::free_thread_local_storage(int index) {
3804   TlsFree(index);
3805 }
3806 
3807 
3808 void os::thread_local_storage_at_put(int index, void* value) {
3809   TlsSetValue(index, value);
3810   assert(thread_local_storage_at(index) == value, "Just checking");
3811 }
3812 
3813 
3814 void* os::thread_local_storage_at(int index) {
3815   return TlsGetValue(index);
3816 }
3817 
3818 
3819 #ifndef PRODUCT
3820 #ifndef _WIN64
3821 // Helpers to check whether NX protection is enabled
3822 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3823   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3824       pex->ExceptionRecord->NumberParameters > 0 &&
3825       pex->ExceptionRecord->ExceptionInformation[0] ==
3826       EXCEPTION_INFO_EXEC_VIOLATION) {
3827     return EXCEPTION_EXECUTE_HANDLER;
3828   }
3829   return EXCEPTION_CONTINUE_SEARCH;
3830 }
3831 
3832 void nx_check_protection() {
3833   // If NX is enabled we'll get an exception calling into code on the stack
3834   char code[] = { (char)0xC3 }; // ret
3835   void *code_ptr = (void *)code;
3836   __try {
3837     __asm call code_ptr
3838   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3839     tty->print_raw_cr("NX protection detected.");
3840   }
3841 }
3842 #endif // _WIN64
3843 #endif // PRODUCT
3844 
3845 // this is called _before_ the global arguments have been parsed
3846 void os::init(void) {
3847   _initial_pid = _getpid();
3848 
3849   init_random(1234567);
3850 
3851   win32::initialize_system_info();
3852   win32::setmode_streams();
3853   init_page_sizes((size_t) win32::vm_page_size());
3854 
3855   // For better scalability on MP systems (must be called after initialize_system_info)
3856 #ifndef PRODUCT
3857   if (is_MP()) {
3858     NoYieldsInMicrolock = true;
3859   }
3860 #endif
3861   // This may be overridden later when argument processing is done.
3862   FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
3863     os::win32::is_windows_2003());
3864 
3865   // Initialize main_process and main_thread
3866   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
3867  if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3868                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3869     fatal("DuplicateHandle failed\n");
3870   }
3871   main_thread_id = (int) GetCurrentThreadId();
3872 }
3873 
3874 // To install functions for atexit processing
3875 extern "C" {
3876   static void perfMemory_exit_helper() {
3877     perfMemory_exit();
3878   }
3879 }
3880 
3881 static jint initSock();
3882 
3883 // this is called _after_ the global arguments have been parsed
3884 jint os::init_2(void) {
3885   // Allocate a single page and mark it as readable for safepoint polling
3886   address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
3887   guarantee( polling_page != NULL, "Reserve Failed for polling page");
3888 
3889   address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
3890   guarantee( return_page != NULL, "Commit Failed for polling page");
3891 
3892   os::set_polling_page( polling_page );
3893 
3894 #ifndef PRODUCT
3895   if( Verbose && PrintMiscellaneous )
3896     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
3897 #endif
3898 
3899   if (!UseMembar) {
3900     address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
3901     guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
3902 
3903     return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
3904     guarantee( return_page != NULL, "Commit Failed for memory serialize page");
3905 
3906     os::set_memory_serialize_page( mem_serialize_page );
3907 
3908 #ifndef PRODUCT
3909     if(Verbose && PrintMiscellaneous)
3910       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3911 #endif
3912   }
3913 
3914   // Setup Windows Exceptions
3915 
3916   // for debugging float code generation bugs
3917   if (ForceFloatExceptions) {
3918 #ifndef  _WIN64
3919     static long fp_control_word = 0;
3920     __asm { fstcw fp_control_word }
3921     // see Intel PPro Manual, Vol. 2, p 7-16
3922     const long precision = 0x20;
3923     const long underflow = 0x10;
3924     const long overflow  = 0x08;
3925     const long zero_div  = 0x04;
3926     const long denorm    = 0x02;
3927     const long invalid   = 0x01;
3928     fp_control_word |= invalid;
3929     __asm { fldcw fp_control_word }
3930 #endif
3931   }
3932 
3933   // If stack_commit_size is 0, windows will reserve the default size,
3934   // but only commit a small portion of it.
3935   size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
3936   size_t default_reserve_size = os::win32::default_stack_size();
3937   size_t actual_reserve_size = stack_commit_size;
3938   if (stack_commit_size < default_reserve_size) {
3939     // If stack_commit_size == 0, we want this too
3940     actual_reserve_size = default_reserve_size;
3941   }
3942 
3943   // Check minimum allowable stack size for thread creation and to initialize
3944   // the java system classes, including StackOverflowError - depends on page
3945   // size.  Add a page for compiler2 recursion in main thread.
3946   // Add in 2*BytesPerWord times page size to account for VM stack during
3947   // class initialization depending on 32 or 64 bit VM.
3948   size_t min_stack_allowed =
3949             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
3950             2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
3951   if (actual_reserve_size < min_stack_allowed) {
3952     tty->print_cr("\nThe stack size specified is too small, "
3953                   "Specify at least %dk",
3954                   min_stack_allowed / K);
3955     return JNI_ERR;
3956   }
3957 
3958   JavaThread::set_stack_size_at_create(stack_commit_size);
3959 
3960   // Calculate theoretical max. size of Threads to guard gainst artifical
3961   // out-of-memory situations, where all available address-space has been
3962   // reserved by thread stacks.
3963   assert(actual_reserve_size != 0, "Must have a stack");
3964 
3965   // Calculate the thread limit when we should start doing Virtual Memory
3966   // banging. Currently when the threads will have used all but 200Mb of space.
3967   //
3968   // TODO: consider performing a similar calculation for commit size instead
3969   // as reserve size, since on a 64-bit platform we'll run into that more
3970   // often than running out of virtual memory space.  We can use the
3971   // lower value of the two calculations as the os_thread_limit.
3972   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
3973   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
3974 
3975   // at exit methods are called in the reverse order of their registration.
3976   // there is no limit to the number of functions registered. atexit does
3977   // not set errno.
3978 
3979   if (PerfAllowAtExitRegistration) {
3980     // only register atexit functions if PerfAllowAtExitRegistration is set.
3981     // atexit functions can be delayed until process exit time, which
3982     // can be problematic for embedded VM situations. Embedded VMs should
3983     // call DestroyJavaVM() to assure that VM resources are released.
3984 
3985     // note: perfMemory_exit_helper atexit function may be removed in
3986     // the future if the appropriate cleanup code can be added to the
3987     // VM_Exit VMOperation's doit method.
3988     if (atexit(perfMemory_exit_helper) != 0) {
3989       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3990     }
3991   }
3992 
3993 #ifndef _WIN64
3994   // Print something if NX is enabled (win32 on AMD64)
3995   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
3996 #endif
3997 
3998   // initialize thread priority policy
3999   prio_init();
4000 
4001   if (UseNUMA && !ForceNUMA) {
4002     UseNUMA = false; // We don't fully support this yet
4003   }
4004 
4005   if (UseNUMAInterleaving) {
4006     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4007     bool success = numa_interleaving_init();
4008     if (!success) UseNUMAInterleaving = false;
4009   }
4010 
4011   if (initSock() != JNI_OK) {
4012     return JNI_ERR;
4013   }
4014 
4015   return JNI_OK;
4016 }
4017 
4018 void os::init_3(void) {
4019   return;
4020 }
4021 
4022 // Mark the polling page as unreadable
4023 void os::make_polling_page_unreadable(void) {
4024   DWORD old_status;
4025   if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) )
4026     fatal("Could not disable polling page");
4027 };
4028 
4029 // Mark the polling page as readable
4030 void os::make_polling_page_readable(void) {
4031   DWORD old_status;
4032   if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) )
4033     fatal("Could not enable polling page");
4034 };
4035 
4036 
4037 int os::stat(const char *path, struct stat *sbuf) {
4038   char pathbuf[MAX_PATH];
4039   if (strlen(path) > MAX_PATH - 1) {
4040     errno = ENAMETOOLONG;
4041     return -1;
4042   }
4043   os::native_path(strcpy(pathbuf, path));
4044   int ret = ::stat(pathbuf, sbuf);
4045   if (sbuf != NULL && UseUTCFileTimestamp) {
4046     // Fix for 6539723.  st_mtime returned from stat() is dependent on
4047     // the system timezone and so can return different values for the
4048     // same file if/when daylight savings time changes.  This adjustment
4049     // makes sure the same timestamp is returned regardless of the TZ.
4050     //
4051     // See:
4052     // http://msdn.microsoft.com/library/
4053     //   default.asp?url=/library/en-us/sysinfo/base/
4054     //   time_zone_information_str.asp
4055     // and
4056     // http://msdn.microsoft.com/library/default.asp?url=
4057     //   /library/en-us/sysinfo/base/settimezoneinformation.asp
4058     //
4059     // NOTE: there is a insidious bug here:  If the timezone is changed
4060     // after the call to stat() but before 'GetTimeZoneInformation()', then
4061     // the adjustment we do here will be wrong and we'll return the wrong
4062     // value (which will likely end up creating an invalid class data
4063     // archive).  Absent a better API for this, or some time zone locking
4064     // mechanism, we'll have to live with this risk.
4065     TIME_ZONE_INFORMATION tz;
4066     DWORD tzid = GetTimeZoneInformation(&tz);
4067     int daylightBias =
4068       (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
4069     sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4070   }
4071   return ret;
4072 }
4073 
4074 
4075 #define FT2INT64(ft) \
4076   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4077 
4078 
4079 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4080 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4081 // of a thread.
4082 //
4083 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4084 // the fast estimate available on the platform.
4085 
4086 // current_thread_cpu_time() is not optimized for Windows yet
4087 jlong os::current_thread_cpu_time() {
4088   // return user + sys since the cost is the same
4089   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4090 }
4091 
4092 jlong os::thread_cpu_time(Thread* thread) {
4093   // consistent with what current_thread_cpu_time() returns.
4094   return os::thread_cpu_time(thread, true /* user+sys */);
4095 }
4096 
4097 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4098   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4099 }
4100 
4101 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4102   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4103   // If this function changes, os::is_thread_cpu_time_supported() should too
4104   if (os::win32::is_nt()) {
4105     FILETIME CreationTime;
4106     FILETIME ExitTime;
4107     FILETIME KernelTime;
4108     FILETIME UserTime;
4109 
4110     if ( GetThreadTimes(thread->osthread()->thread_handle(),
4111                     &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
4112       return -1;
4113     else
4114       if (user_sys_cpu_time) {
4115         return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4116       } else {
4117         return FT2INT64(UserTime) * 100;
4118       }
4119   } else {
4120     return (jlong) timeGetTime() * 1000000;
4121   }
4122 }
4123 
4124 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4125   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4126   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4127   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4128   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4129 }
4130 
4131 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4132   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4133   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4134   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4135   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4136 }
4137 
4138 bool os::is_thread_cpu_time_supported() {
4139   // see os::thread_cpu_time
4140   if (os::win32::is_nt()) {
4141     FILETIME CreationTime;
4142     FILETIME ExitTime;
4143     FILETIME KernelTime;
4144     FILETIME UserTime;
4145 
4146     if ( GetThreadTimes(GetCurrentThread(),
4147                     &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
4148       return false;
4149     else
4150       return true;
4151   } else {
4152     return false;
4153   }
4154 }
4155 
4156 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4157 // It does have primitives (PDH API) to get CPU usage and run queue length.
4158 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4159 // If we wanted to implement loadavg on Windows, we have a few options:
4160 //
4161 // a) Query CPU usage and run queue length and "fake" an answer by
4162 //    returning the CPU usage if it's under 100%, and the run queue
4163 //    length otherwise.  It turns out that querying is pretty slow
4164 //    on Windows, on the order of 200 microseconds on a fast machine.
4165 //    Note that on the Windows the CPU usage value is the % usage
4166 //    since the last time the API was called (and the first call
4167 //    returns 100%), so we'd have to deal with that as well.
4168 //
4169 // b) Sample the "fake" answer using a sampling thread and store
4170 //    the answer in a global variable.  The call to loadavg would
4171 //    just return the value of the global, avoiding the slow query.
4172 //
4173 // c) Sample a better answer using exponential decay to smooth the
4174 //    value.  This is basically the algorithm used by UNIX kernels.
4175 //
4176 // Note that sampling thread starvation could affect both (b) and (c).
4177 int os::loadavg(double loadavg[], int nelem) {
4178   return -1;
4179 }
4180 
4181 
4182 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4183 bool os::dont_yield() {
4184   return DontYieldALot;
4185 }
4186 
4187 // This method is a slightly reworked copy of JDK's sysOpen
4188 // from src/windows/hpi/src/sys_api_md.c
4189 
4190 int os::open(const char *path, int oflag, int mode) {
4191   char pathbuf[MAX_PATH];
4192 
4193   if (strlen(path) > MAX_PATH - 1) {
4194     errno = ENAMETOOLONG;
4195           return -1;
4196   }
4197   os::native_path(strcpy(pathbuf, path));
4198   return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4199 }
4200 
4201 FILE* os::open(int fd, const char* mode) {
4202   return ::_fdopen(fd, mode);
4203 }
4204 
4205 // Is a (classpath) directory empty?
4206 bool os::dir_is_empty(const char* path) {
4207   WIN32_FIND_DATA fd;
4208   HANDLE f = FindFirstFile(path, &fd);
4209   if (f == INVALID_HANDLE_VALUE) {
4210     return true;
4211   }
4212   FindClose(f);
4213   return false;
4214 }
4215 
4216 // create binary file, rewriting existing file if required
4217 int os::create_binary_file(const char* path, bool rewrite_existing) {
4218   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4219   if (!rewrite_existing) {
4220     oflags |= _O_EXCL;
4221   }
4222   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4223 }
4224 
4225 // return current position of file pointer
4226 jlong os::current_file_offset(int fd) {
4227   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4228 }
4229 
4230 // move file pointer to the specified offset
4231 jlong os::seek_to_file_offset(int fd, jlong offset) {
4232   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4233 }
4234 
4235 
4236 jlong os::lseek(int fd, jlong offset, int whence) {
4237   return (jlong) ::_lseeki64(fd, offset, whence);
4238 }
4239 
4240 // This method is a slightly reworked copy of JDK's sysNativePath
4241 // from src/windows/hpi/src/path_md.c
4242 
4243 /* Convert a pathname to native format.  On win32, this involves forcing all
4244    separators to be '\\' rather than '/' (both are legal inputs, but Win95
4245    sometimes rejects '/') and removing redundant separators.  The input path is
4246    assumed to have been converted into the character encoding used by the local
4247    system.  Because this might be a double-byte encoding, care is taken to
4248    treat double-byte lead characters correctly.
4249 
4250    This procedure modifies the given path in place, as the result is never
4251    longer than the original.  There is no error return; this operation always
4252    succeeds. */
4253 char * os::native_path(char *path) {
4254   char *src = path, *dst = path, *end = path;
4255   char *colon = NULL;           /* If a drive specifier is found, this will
4256                                         point to the colon following the drive
4257                                         letter */
4258 
4259   /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */
4260   assert(((!::IsDBCSLeadByte('/'))
4261     && (!::IsDBCSLeadByte('\\'))
4262     && (!::IsDBCSLeadByte(':'))),
4263     "Illegal lead byte");
4264 
4265   /* Check for leading separators */
4266 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4267   while (isfilesep(*src)) {
4268     src++;
4269   }
4270 
4271   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4272     /* Remove leading separators if followed by drive specifier.  This
4273       hack is necessary to support file URLs containing drive
4274       specifiers (e.g., "file://c:/path").  As a side effect,
4275       "/c:/path" can be used as an alternative to "c:/path". */
4276     *dst++ = *src++;
4277     colon = dst;
4278     *dst++ = ':';
4279     src++;
4280   } else {
4281     src = path;
4282     if (isfilesep(src[0]) && isfilesep(src[1])) {
4283       /* UNC pathname: Retain first separator; leave src pointed at
4284          second separator so that further separators will be collapsed
4285          into the second separator.  The result will be a pathname
4286          beginning with "\\\\" followed (most likely) by a host name. */
4287       src = dst = path + 1;
4288       path[0] = '\\';     /* Force first separator to '\\' */
4289     }
4290   }
4291 
4292   end = dst;
4293 
4294   /* Remove redundant separators from remainder of path, forcing all
4295       separators to be '\\' rather than '/'. Also, single byte space
4296       characters are removed from the end of the path because those
4297       are not legal ending characters on this operating system.
4298   */
4299   while (*src != '\0') {
4300     if (isfilesep(*src)) {
4301       *dst++ = '\\'; src++;
4302       while (isfilesep(*src)) src++;
4303       if (*src == '\0') {
4304         /* Check for trailing separator */
4305         end = dst;
4306         if (colon == dst - 2) break;                      /* "z:\\" */
4307         if (dst == path + 1) break;                       /* "\\" */
4308         if (dst == path + 2 && isfilesep(path[0])) {
4309           /* "\\\\" is not collapsed to "\\" because "\\\\" marks the
4310             beginning of a UNC pathname.  Even though it is not, by
4311             itself, a valid UNC pathname, we leave it as is in order
4312             to be consistent with the path canonicalizer as well
4313             as the win32 APIs, which treat this case as an invalid
4314             UNC pathname rather than as an alias for the root
4315             directory of the current drive. */
4316           break;
4317         }
4318         end = --dst;  /* Path does not denote a root directory, so
4319                                     remove trailing separator */
4320         break;
4321       }
4322       end = dst;
4323     } else {
4324       if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */
4325         *dst++ = *src++;
4326         if (*src) *dst++ = *src++;
4327         end = dst;
4328       } else {         /* Copy a single-byte character */
4329         char c = *src++;
4330         *dst++ = c;
4331         /* Space is not a legal ending character */
4332         if (c != ' ') end = dst;
4333       }
4334     }
4335   }
4336 
4337   *end = '\0';
4338 
4339   /* For "z:", add "." to work around a bug in the C runtime library */
4340   if (colon == dst - 1) {
4341           path[2] = '.';
4342           path[3] = '\0';
4343   }
4344 
4345   return path;
4346 }
4347 
4348 // This code is a copy of JDK's sysSetLength
4349 // from src/windows/hpi/src/sys_api_md.c
4350 
4351 int os::ftruncate(int fd, jlong length) {
4352   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4353   long high = (long)(length >> 32);
4354   DWORD ret;
4355 
4356   if (h == (HANDLE)(-1)) {
4357     return -1;
4358   }
4359 
4360   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4361   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4362       return -1;
4363   }
4364 
4365   if (::SetEndOfFile(h) == FALSE) {
4366     return -1;
4367   }
4368 
4369   return 0;
4370 }
4371 
4372 
4373 // This code is a copy of JDK's sysSync
4374 // from src/windows/hpi/src/sys_api_md.c
4375 // except for the legacy workaround for a bug in Win 98
4376 
4377 int os::fsync(int fd) {
4378   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4379 
4380   if ( (!::FlushFileBuffers(handle)) &&
4381          (GetLastError() != ERROR_ACCESS_DENIED) ) {
4382     /* from winerror.h */
4383     return -1;
4384   }
4385   return 0;
4386 }
4387 
4388 static int nonSeekAvailable(int, long *);
4389 static int stdinAvailable(int, long *);
4390 
4391 #define S_ISCHR(mode)   (((mode) & _S_IFCHR) == _S_IFCHR)
4392 #define S_ISFIFO(mode)  (((mode) & _S_IFIFO) == _S_IFIFO)
4393 
4394 // This code is a copy of JDK's sysAvailable
4395 // from src/windows/hpi/src/sys_api_md.c
4396 
4397 int os::available(int fd, jlong *bytes) {
4398   jlong cur, end;
4399   struct _stati64 stbuf64;
4400 
4401   if (::_fstati64(fd, &stbuf64) >= 0) {
4402     int mode = stbuf64.st_mode;
4403     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4404       int ret;
4405       long lpbytes;
4406       if (fd == 0) {
4407         ret = stdinAvailable(fd, &lpbytes);
4408       } else {
4409         ret = nonSeekAvailable(fd, &lpbytes);
4410       }
4411       (*bytes) = (jlong)(lpbytes);
4412       return ret;
4413     }
4414     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4415       return FALSE;
4416     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4417       return FALSE;
4418     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4419       return FALSE;
4420     }
4421     *bytes = end - cur;
4422     return TRUE;
4423   } else {
4424     return FALSE;
4425   }
4426 }
4427 
4428 // This code is a copy of JDK's nonSeekAvailable
4429 // from src/windows/hpi/src/sys_api_md.c
4430 
4431 static int nonSeekAvailable(int fd, long *pbytes) {
4432   /* This is used for available on non-seekable devices
4433     * (like both named and anonymous pipes, such as pipes
4434     *  connected to an exec'd process).
4435     * Standard Input is a special case.
4436     *
4437     */
4438   HANDLE han;
4439 
4440   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4441     return FALSE;
4442   }
4443 
4444   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4445         /* PeekNamedPipe fails when at EOF.  In that case we
4446          * simply make *pbytes = 0 which is consistent with the
4447          * behavior we get on Solaris when an fd is at EOF.
4448          * The only alternative is to raise an Exception,
4449          * which isn't really warranted.
4450          */
4451     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4452       return FALSE;
4453     }
4454     *pbytes = 0;
4455   }
4456   return TRUE;
4457 }
4458 
4459 #define MAX_INPUT_EVENTS 2000
4460 
4461 // This code is a copy of JDK's stdinAvailable
4462 // from src/windows/hpi/src/sys_api_md.c
4463 
4464 static int stdinAvailable(int fd, long *pbytes) {
4465   HANDLE han;
4466   DWORD numEventsRead = 0;      /* Number of events read from buffer */
4467   DWORD numEvents = 0;  /* Number of events in buffer */
4468   DWORD i = 0;          /* Loop index */
4469   DWORD curLength = 0;  /* Position marker */
4470   DWORD actualLength = 0;       /* Number of bytes readable */
4471   BOOL error = FALSE;         /* Error holder */
4472   INPUT_RECORD *lpBuffer;     /* Pointer to records of input events */
4473 
4474   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4475         return FALSE;
4476   }
4477 
4478   /* Construct an array of input records in the console buffer */
4479   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4480   if (error == 0) {
4481     return nonSeekAvailable(fd, pbytes);
4482   }
4483 
4484   /* lpBuffer must fit into 64K or else PeekConsoleInput fails */
4485   if (numEvents > MAX_INPUT_EVENTS) {
4486     numEvents = MAX_INPUT_EVENTS;
4487   }
4488 
4489   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4490   if (lpBuffer == NULL) {
4491     return FALSE;
4492   }
4493 
4494   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4495   if (error == 0) {
4496     os::free(lpBuffer, mtInternal);
4497     return FALSE;
4498   }
4499 
4500   /* Examine input records for the number of bytes available */
4501   for(i=0; i<numEvents; i++) {
4502     if (lpBuffer[i].EventType == KEY_EVENT) {
4503 
4504       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4505                                       &(lpBuffer[i].Event);
4506       if (keyRecord->bKeyDown == TRUE) {
4507         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4508         curLength++;
4509         if (*keyPressed == '\r') {
4510           actualLength = curLength;
4511         }
4512       }
4513     }
4514   }
4515 
4516   if(lpBuffer != NULL) {
4517     os::free(lpBuffer, mtInternal);
4518   }
4519 
4520   *pbytes = (long) actualLength;
4521   return TRUE;
4522 }
4523 
4524 // Map a block of memory.
4525 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4526                      char *addr, size_t bytes, bool read_only,
4527                      bool allow_exec) {
4528   HANDLE hFile;
4529   char* base;
4530 
4531   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4532                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4533   if (hFile == NULL) {
4534     if (PrintMiscellaneous && Verbose) {
4535       DWORD err = GetLastError();
4536       tty->print_cr("CreateFile() failed: GetLastError->%ld.", err);
4537     }
4538     return NULL;
4539   }
4540 
4541   if (allow_exec) {
4542     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4543     // unless it comes from a PE image (which the shared archive is not.)
4544     // Even VirtualProtect refuses to give execute access to mapped memory
4545     // that was not previously executable.
4546     //
4547     // Instead, stick the executable region in anonymous memory.  Yuck.
4548     // Penalty is that ~4 pages will not be shareable - in the future
4549     // we might consider DLLizing the shared archive with a proper PE
4550     // header so that mapping executable + sharing is possible.
4551 
4552     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4553                                 PAGE_READWRITE);
4554     if (base == NULL) {
4555       if (PrintMiscellaneous && Verbose) {
4556         DWORD err = GetLastError();
4557         tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err);
4558       }
4559       CloseHandle(hFile);
4560       return NULL;
4561     }
4562 
4563     DWORD bytes_read;
4564     OVERLAPPED overlapped;
4565     overlapped.Offset = (DWORD)file_offset;
4566     overlapped.OffsetHigh = 0;
4567     overlapped.hEvent = NULL;
4568     // ReadFile guarantees that if the return value is true, the requested
4569     // number of bytes were read before returning.
4570     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4571     if (!res) {
4572       if (PrintMiscellaneous && Verbose) {
4573         DWORD err = GetLastError();
4574         tty->print_cr("ReadFile() failed: GetLastError->%ld.", err);
4575       }
4576       release_memory(base, bytes);
4577       CloseHandle(hFile);
4578       return NULL;
4579     }
4580   } else {
4581     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4582                                     NULL /*file_name*/);
4583     if (hMap == NULL) {
4584       if (PrintMiscellaneous && Verbose) {
4585         DWORD err = GetLastError();
4586         tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err);
4587       }
4588       CloseHandle(hFile);
4589       return NULL;
4590     }
4591 
4592     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4593     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4594                                   (DWORD)bytes, addr);
4595     if (base == NULL) {
4596       if (PrintMiscellaneous && Verbose) {
4597         DWORD err = GetLastError();
4598         tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err);
4599       }
4600       CloseHandle(hMap);
4601       CloseHandle(hFile);
4602       return NULL;
4603     }
4604 
4605     if (CloseHandle(hMap) == 0) {
4606       if (PrintMiscellaneous && Verbose) {
4607         DWORD err = GetLastError();
4608         tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err);
4609       }
4610       CloseHandle(hFile);
4611       return base;
4612     }
4613   }
4614 
4615   if (allow_exec) {
4616     DWORD old_protect;
4617     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4618     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4619 
4620     if (!res) {
4621       if (PrintMiscellaneous && Verbose) {
4622         DWORD err = GetLastError();
4623         tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err);
4624       }
4625       // Don't consider this a hard error, on IA32 even if the
4626       // VirtualProtect fails, we should still be able to execute
4627       CloseHandle(hFile);
4628       return base;
4629     }
4630   }
4631 
4632   if (CloseHandle(hFile) == 0) {
4633     if (PrintMiscellaneous && Verbose) {
4634       DWORD err = GetLastError();
4635       tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err);
4636     }
4637     return base;
4638   }
4639 
4640   return base;
4641 }
4642 
4643 
4644 // Remap a block of memory.
4645 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4646                        char *addr, size_t bytes, bool read_only,
4647                        bool allow_exec) {
4648   // This OS does not allow existing memory maps to be remapped so we
4649   // have to unmap the memory before we remap it.
4650   if (!os::unmap_memory(addr, bytes)) {
4651     return NULL;
4652   }
4653 
4654   // There is a very small theoretical window between the unmap_memory()
4655   // call above and the map_memory() call below where a thread in native
4656   // code may be able to access an address that is no longer mapped.
4657 
4658   return os::map_memory(fd, file_name, file_offset, addr, bytes,
4659            read_only, allow_exec);
4660 }
4661 
4662 
4663 // Unmap a block of memory.
4664 // Returns true=success, otherwise false.
4665 
4666 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4667   BOOL result = UnmapViewOfFile(addr);
4668   if (result == 0) {
4669     if (PrintMiscellaneous && Verbose) {
4670       DWORD err = GetLastError();
4671       tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err);
4672     }
4673     return false;
4674   }
4675   return true;
4676 }
4677 
4678 void os::pause() {
4679   char filename[MAX_PATH];
4680   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4681     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4682   } else {
4683     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4684   }
4685 
4686   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4687   if (fd != -1) {
4688     struct stat buf;
4689     ::close(fd);
4690     while (::stat(filename, &buf) == 0) {
4691       Sleep(100);
4692     }
4693   } else {
4694     jio_fprintf(stderr,
4695       "Could not open pause file '%s', continuing immediately.\n", filename);
4696   }
4697 }
4698 
4699 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4700   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4701 }
4702 
4703 /*
4704  * See the caveats for this class in os_windows.hpp
4705  * Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4706  * into this method and returns false. If no OS EXCEPTION was raised, returns
4707  * true.
4708  * The callback is supposed to provide the method that should be protected.
4709  */
4710 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4711   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4712   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4713       "crash_protection already set?");
4714 
4715   bool success = true;
4716   __try {
4717     WatcherThread::watcher_thread()->set_crash_protection(this);
4718     cb.call();
4719   } __except(EXCEPTION_EXECUTE_HANDLER) {
4720     // only for protection, nothing to do
4721     success = false;
4722   }
4723   WatcherThread::watcher_thread()->set_crash_protection(NULL);
4724   return success;
4725 }
4726 
4727 // An Event wraps a win32 "CreateEvent" kernel handle.
4728 //
4729 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
4730 //
4731 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
4732 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
4733 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
4734 //     In addition, an unpark() operation might fetch the handle field, but the
4735 //     event could recycle between the fetch and the SetEvent() operation.
4736 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
4737 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
4738 //     on an stale but recycled handle would be harmless, but in practice this might
4739 //     confuse other non-Sun code, so it's not a viable approach.
4740 //
4741 // 2:  Once a win32 event handle is associated with an Event, it remains associated
4742 //     with the Event.  The event handle is never closed.  This could be construed
4743 //     as handle leakage, but only up to the maximum # of threads that have been extant
4744 //     at any one time.  This shouldn't be an issue, as windows platforms typically
4745 //     permit a process to have hundreds of thousands of open handles.
4746 //
4747 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
4748 //     and release unused handles.
4749 //
4750 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
4751 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
4752 //
4753 // 5.  Use an RCU-like mechanism (Read-Copy Update).
4754 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
4755 //
4756 // We use (2).
4757 //
4758 // TODO-FIXME:
4759 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
4760 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
4761 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
4762 // 3.  Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
4763 //     into a single win32 CreateEvent() handle.
4764 //
4765 // _Event transitions in park()
4766 //   -1 => -1 : illegal
4767 //    1 =>  0 : pass - return immediately
4768 //    0 => -1 : block
4769 //
4770 // _Event serves as a restricted-range semaphore :
4771 //    -1 : thread is blocked
4772 //     0 : neutral  - thread is running or ready
4773 //     1 : signaled - thread is running or ready
4774 //
4775 // Another possible encoding of _Event would be
4776 // with explicit "PARKED" and "SIGNALED" bits.
4777 
4778 int os::PlatformEvent::park (jlong Millis) {
4779     guarantee (_ParkHandle != NULL , "Invariant") ;
4780     guarantee (Millis > 0          , "Invariant") ;
4781     int v ;
4782 
4783     // CONSIDER: defer assigning a CreateEvent() handle to the Event until
4784     // the initial park() operation.
4785 
4786     for (;;) {
4787         v = _Event ;
4788         if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4789     }
4790     guarantee ((v == 0) || (v == 1), "invariant") ;
4791     if (v != 0) return OS_OK ;
4792 
4793     // Do this the hard way by blocking ...
4794     // TODO: consider a brief spin here, gated on the success of recent
4795     // spin attempts by this thread.
4796     //
4797     // We decompose long timeouts into series of shorter timed waits.
4798     // Evidently large timo values passed in WaitForSingleObject() are problematic on some
4799     // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
4800     // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
4801     // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
4802     // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
4803     // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
4804     // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
4805     // for the already waited time.  This policy does not admit any new outcomes.
4806     // In the future, however, we might want to track the accumulated wait time and
4807     // adjust Millis accordingly if we encounter a spurious wakeup.
4808 
4809     const int MAXTIMEOUT = 0x10000000 ;
4810     DWORD rv = WAIT_TIMEOUT ;
4811     while (_Event < 0 && Millis > 0) {
4812        DWORD prd = Millis ;     // set prd = MAX (Millis, MAXTIMEOUT)
4813        if (Millis > MAXTIMEOUT) {
4814           prd = MAXTIMEOUT ;
4815        }
4816        rv = ::WaitForSingleObject (_ParkHandle, prd) ;
4817        assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ;
4818        if (rv == WAIT_TIMEOUT) {
4819            Millis -= prd ;
4820        }
4821     }
4822     v = _Event ;
4823     _Event = 0 ;
4824     // see comment at end of os::PlatformEvent::park() below:
4825     OrderAccess::fence() ;
4826     // If we encounter a nearly simultanous timeout expiry and unpark()
4827     // we return OS_OK indicating we awoke via unpark().
4828     // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
4829     return (v >= 0) ? OS_OK : OS_TIMEOUT ;
4830 }
4831 
4832 void os::PlatformEvent::park () {
4833     guarantee (_ParkHandle != NULL, "Invariant") ;
4834     // Invariant: Only the thread associated with the Event/PlatformEvent
4835     // may call park().
4836     int v ;
4837     for (;;) {
4838         v = _Event ;
4839         if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4840     }
4841     guarantee ((v == 0) || (v == 1), "invariant") ;
4842     if (v != 0) return ;
4843 
4844     // Do this the hard way by blocking ...
4845     // TODO: consider a brief spin here, gated on the success of recent
4846     // spin attempts by this thread.
4847     while (_Event < 0) {
4848        DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ;
4849        assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ;
4850     }
4851 
4852     // Usually we'll find _Event == 0 at this point, but as
4853     // an optional optimization we clear it, just in case can
4854     // multiple unpark() operations drove _Event up to 1.
4855     _Event = 0 ;
4856     OrderAccess::fence() ;
4857     guarantee (_Event >= 0, "invariant") ;
4858 }
4859 
4860 void os::PlatformEvent::unpark() {
4861   guarantee (_ParkHandle != NULL, "Invariant") ;
4862 
4863   // Transitions for _Event:
4864   //    0 :=> 1
4865   //    1 :=> 1
4866   //   -1 :=> either 0 or 1; must signal target thread
4867   //          That is, we can safely transition _Event from -1 to either
4868   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
4869   //          unpark() calls.
4870   // See also: "Semaphores in Plan 9" by Mullender & Cox
4871   //
4872   // Note: Forcing a transition from "-1" to "1" on an unpark() means
4873   // that it will take two back-to-back park() calls for the owning
4874   // thread to block. This has the benefit of forcing a spurious return
4875   // from the first park() call after an unpark() call which will help
4876   // shake out uses of park() and unpark() without condition variables.
4877 
4878   if (Atomic::xchg(1, &_Event) >= 0) return;
4879 
4880   ::SetEvent(_ParkHandle);
4881 }
4882 
4883 
4884 // JSR166
4885 // -------------------------------------------------------
4886 
4887 /*
4888  * The Windows implementation of Park is very straightforward: Basic
4889  * operations on Win32 Events turn out to have the right semantics to
4890  * use them directly. We opportunistically resuse the event inherited
4891  * from Monitor.
4892  */
4893 
4894 
4895 void Parker::park(bool isAbsolute, jlong time) {
4896   guarantee (_ParkEvent != NULL, "invariant") ;
4897   // First, demultiplex/decode time arguments
4898   if (time < 0) { // don't wait
4899     return;
4900   }
4901   else if (time == 0 && !isAbsolute) {
4902     time = INFINITE;
4903   }
4904   else if  (isAbsolute) {
4905     time -= os::javaTimeMillis(); // convert to relative time
4906     if (time <= 0) // already elapsed
4907       return;
4908   }
4909   else { // relative
4910     time /= 1000000; // Must coarsen from nanos to millis
4911     if (time == 0)   // Wait for the minimal time unit if zero
4912       time = 1;
4913   }
4914 
4915   JavaThread* thread = (JavaThread*)(Thread::current());
4916   assert(thread->is_Java_thread(), "Must be JavaThread");
4917   JavaThread *jt = (JavaThread *)thread;
4918 
4919   // Don't wait if interrupted or already triggered
4920   if (Thread::is_interrupted(thread, false) ||
4921     WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
4922     ResetEvent(_ParkEvent);
4923     return;
4924   }
4925   else {
4926     ThreadBlockInVM tbivm(jt);
4927     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4928     jt->set_suspend_equivalent();
4929 
4930     WaitForSingleObject(_ParkEvent,  time);
4931     ResetEvent(_ParkEvent);
4932 
4933     // If externally suspended while waiting, re-suspend
4934     if (jt->handle_special_suspend_equivalent_condition()) {
4935       jt->java_suspend_self();
4936     }
4937   }
4938 }
4939 
4940 void Parker::unpark() {
4941   guarantee (_ParkEvent != NULL, "invariant") ;
4942   SetEvent(_ParkEvent);
4943 }
4944 
4945 // Run the specified command in a separate process. Return its exit value,
4946 // or -1 on failure (e.g. can't create a new process).
4947 int os::fork_and_exec(char* cmd) {
4948   STARTUPINFO si;
4949   PROCESS_INFORMATION pi;
4950 
4951   memset(&si, 0, sizeof(si));
4952   si.cb = sizeof(si);
4953   memset(&pi, 0, sizeof(pi));
4954   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
4955                             cmd,    // command line
4956                             NULL,   // process security attribute
4957                             NULL,   // thread security attribute
4958                             TRUE,   // inherits system handles
4959                             0,      // no creation flags
4960                             NULL,   // use parent's environment block
4961                             NULL,   // use parent's starting directory
4962                             &si,    // (in) startup information
4963                             &pi);   // (out) process information
4964 
4965   if (rslt) {
4966     // Wait until child process exits.
4967     WaitForSingleObject(pi.hProcess, INFINITE);
4968 
4969     DWORD exit_code;
4970     GetExitCodeProcess(pi.hProcess, &exit_code);
4971 
4972     // Close process and thread handles.
4973     CloseHandle(pi.hProcess);
4974     CloseHandle(pi.hThread);
4975 
4976     return (int)exit_code;
4977   } else {
4978     return -1;
4979   }
4980 }
4981 
4982 //--------------------------------------------------------------------------------------------------
4983 // Non-product code
4984 
4985 static int mallocDebugIntervalCounter = 0;
4986 static int mallocDebugCounter = 0;
4987 bool os::check_heap(bool force) {
4988   if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
4989   if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
4990     // Note: HeapValidate executes two hardware breakpoints when it finds something
4991     // wrong; at these points, eax contains the address of the offending block (I think).
4992     // To get to the exlicit error message(s) below, just continue twice.
4993     HANDLE heap = GetProcessHeap();
4994     { HeapLock(heap);
4995       PROCESS_HEAP_ENTRY phe;
4996       phe.lpData = NULL;
4997       while (HeapWalk(heap, &phe) != 0) {
4998         if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
4999             !HeapValidate(heap, 0, phe.lpData)) {
5000           tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
5001           tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData);
5002           fatal("corrupted C heap");
5003         }
5004       }
5005       DWORD err = GetLastError();
5006       if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
5007         fatal(err_msg("heap walk aborted with error %d", err));
5008       }
5009       HeapUnlock(heap);
5010     }
5011     mallocDebugIntervalCounter = 0;
5012   }
5013   return true;
5014 }
5015 
5016 
5017 bool os::find(address addr, outputStream* st) {
5018   // Nothing yet
5019   return false;
5020 }
5021 
5022 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5023   DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5024 
5025   if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
5026     JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow();
5027     PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5028     address addr = (address) exceptionRecord->ExceptionInformation[1];
5029 
5030     if (os::is_memory_serialize_page(thread, addr))
5031       return EXCEPTION_CONTINUE_EXECUTION;
5032   }
5033 
5034   return EXCEPTION_CONTINUE_SEARCH;
5035 }
5036 
5037 // We don't build a headless jre for Windows
5038 bool os::is_headless_jre() { return false; }
5039 
5040 static jint initSock() {
5041   WSADATA wsadata;
5042 
5043   if (!os::WinSock2Dll::WinSock2Available()) {
5044     jio_fprintf(stderr, "Could not load Winsock (error: %d)\n",
5045       ::GetLastError());
5046     return JNI_ERR;
5047   }
5048 
5049   if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5050     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5051       ::GetLastError());
5052     return JNI_ERR;
5053   }
5054   return JNI_OK;
5055 }
5056 
5057 struct hostent* os::get_host_by_name(char* name) {
5058   return (struct hostent*)os::WinSock2Dll::gethostbyname(name);
5059 }
5060 
5061 int os::socket_close(int fd) {
5062   return ::closesocket(fd);
5063 }
5064 
5065 int os::socket_available(int fd, jint *pbytes) {
5066   int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes);
5067   return (ret < 0) ? 0 : 1;
5068 }
5069 
5070 int os::socket(int domain, int type, int protocol) {
5071   return ::socket(domain, type, protocol);
5072 }
5073 
5074 int os::listen(int fd, int count) {
5075   return ::listen(fd, count);
5076 }
5077 
5078 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5079   return ::connect(fd, him, len);
5080 }
5081 
5082 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
5083   return ::accept(fd, him, len);
5084 }
5085 
5086 int os::sendto(int fd, char* buf, size_t len, uint flags,
5087                struct sockaddr* to, socklen_t tolen) {
5088 
5089   return ::sendto(fd, buf, (int)len, flags, to, tolen);
5090 }
5091 
5092 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags,
5093                  sockaddr* from, socklen_t* fromlen) {
5094 
5095   return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen);
5096 }
5097 
5098 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5099   return ::recv(fd, buf, (int)nBytes, flags);
5100 }
5101 
5102 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5103   return ::send(fd, buf, (int)nBytes, flags);
5104 }
5105 
5106 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5107   return ::send(fd, buf, (int)nBytes, flags);
5108 }
5109 
5110 int os::timeout(int fd, long timeout) {
5111   fd_set tbl;
5112   struct timeval t;
5113 
5114   t.tv_sec  = timeout / 1000;
5115   t.tv_usec = (timeout % 1000) * 1000;
5116 
5117   tbl.fd_count    = 1;
5118   tbl.fd_array[0] = fd;
5119 
5120   return ::select(1, &tbl, 0, 0, &t);
5121 }
5122 
5123 int os::get_host_name(char* name, int namelen) {
5124   return ::gethostname(name, namelen);
5125 }
5126 
5127 int os::socket_shutdown(int fd, int howto) {
5128   return ::shutdown(fd, howto);
5129 }
5130 
5131 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
5132   return ::bind(fd, him, len);
5133 }
5134 
5135 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
5136   return ::getsockname(fd, him, len);
5137 }
5138 
5139 int os::get_sock_opt(int fd, int level, int optname,
5140                      char* optval, socklen_t* optlen) {
5141   return ::getsockopt(fd, level, optname, optval, optlen);
5142 }
5143 
5144 int os::set_sock_opt(int fd, int level, int optname,
5145                      const char* optval, socklen_t optlen) {
5146   return ::setsockopt(fd, level, optname, optval, optlen);
5147 }
5148 
5149 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5150 #if defined(IA32)
5151 #  define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5152 #elif defined (AMD64)
5153 #  define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5154 #endif
5155 
5156 // returns true if thread could be suspended,
5157 // false otherwise
5158 static bool do_suspend(HANDLE* h) {
5159   if (h != NULL) {
5160     if (SuspendThread(*h) != ~0) {
5161       return true;
5162     }
5163   }
5164   return false;
5165 }
5166 
5167 // resume the thread
5168 // calling resume on an active thread is a no-op
5169 static void do_resume(HANDLE* h) {
5170   if (h != NULL) {
5171     ResumeThread(*h);
5172   }
5173 }
5174 
5175 // retrieve a suspend/resume context capable handle
5176 // from the tid. Caller validates handle return value.
5177 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) {
5178   if (h != NULL) {
5179     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5180   }
5181 }
5182 
5183 //
5184 // Thread sampling implementation
5185 //
5186 void os::SuspendedThreadTask::internal_do_task() {
5187   CONTEXT    ctxt;
5188   HANDLE     h = NULL;
5189 
5190   // get context capable handle for thread
5191   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5192 
5193   // sanity
5194   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5195     return;
5196   }
5197 
5198   // suspend the thread
5199   if (do_suspend(&h)) {
5200     ctxt.ContextFlags = sampling_context_flags;
5201     // get thread context
5202     GetThreadContext(h, &ctxt);
5203     SuspendedThreadTaskContext context(_thread, &ctxt);
5204     // pass context to Thread Sampling impl
5205     do_task(context);
5206     // resume thread
5207     do_resume(&h);
5208   }
5209 
5210   // close handle
5211   CloseHandle(h);
5212 }
5213 
5214 
5215 // Kernel32 API
5216 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);
5217 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
5218 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG);
5219 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG);
5220 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG);
5221 
5222 GetLargePageMinimum_Fn      os::Kernel32Dll::_GetLargePageMinimum = NULL;
5223 VirtualAllocExNuma_Fn       os::Kernel32Dll::_VirtualAllocExNuma = NULL;
5224 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL;
5225 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL;
5226 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL;
5227 
5228 
5229 BOOL                        os::Kernel32Dll::initialized = FALSE;
5230 SIZE_T os::Kernel32Dll::GetLargePageMinimum() {
5231   assert(initialized && _GetLargePageMinimum != NULL,
5232     "GetLargePageMinimumAvailable() not yet called");
5233   return _GetLargePageMinimum();
5234 }
5235 
5236 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() {
5237   if (!initialized) {
5238     initialize();
5239   }
5240   return _GetLargePageMinimum != NULL;
5241 }
5242 
5243 BOOL os::Kernel32Dll::NumaCallsAvailable() {
5244   if (!initialized) {
5245     initialize();
5246   }
5247   return _VirtualAllocExNuma != NULL;
5248 }
5249 
5250 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) {
5251   assert(initialized && _VirtualAllocExNuma != NULL,
5252     "NUMACallsAvailable() not yet called");
5253 
5254   return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node);
5255 }
5256 
5257 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) {
5258   assert(initialized && _GetNumaHighestNodeNumber != NULL,
5259     "NUMACallsAvailable() not yet called");
5260 
5261   return _GetNumaHighestNodeNumber(ptr_highest_node_number);
5262 }
5263 
5264 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) {
5265   assert(initialized && _GetNumaNodeProcessorMask != NULL,
5266     "NUMACallsAvailable() not yet called");
5267 
5268   return _GetNumaNodeProcessorMask(node, proc_mask);
5269 }
5270 
5271 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip,
5272   ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) {
5273     if (!initialized) {
5274       initialize();
5275     }
5276 
5277     if (_RtlCaptureStackBackTrace != NULL) {
5278       return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture,
5279         BackTrace, BackTraceHash);
5280     } else {
5281       return 0;
5282     }
5283 }
5284 
5285 void os::Kernel32Dll::initializeCommon() {
5286   if (!initialized) {
5287     HMODULE handle = ::GetModuleHandle("Kernel32.dll");
5288     assert(handle != NULL, "Just check");
5289     _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");
5290     _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma");
5291     _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber");
5292     _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask");
5293     _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace");
5294     initialized = TRUE;
5295   }
5296 }
5297 
5298 
5299 
5300 #ifndef JDK6_OR_EARLIER
5301 
5302 void os::Kernel32Dll::initialize() {
5303   initializeCommon();
5304 }
5305 
5306 
5307 // Kernel32 API
5308 inline BOOL os::Kernel32Dll::SwitchToThread() {
5309   return ::SwitchToThread();
5310 }
5311 
5312 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
5313   return true;
5314 }
5315 
5316   // Help tools
5317 inline BOOL os::Kernel32Dll::HelpToolsAvailable() {
5318   return true;
5319 }
5320 
5321 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
5322   return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
5323 }
5324 
5325 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5326   return ::Module32First(hSnapshot, lpme);
5327 }
5328 
5329 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5330   return ::Module32Next(hSnapshot, lpme);
5331 }
5332 
5333 
5334 inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
5335   return true;
5336 }
5337 
5338 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
5339   ::GetNativeSystemInfo(lpSystemInfo);
5340 }
5341 
5342 // PSAPI API
5343 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
5344   return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
5345 }
5346 
5347 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
5348   return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
5349 }
5350 
5351 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
5352   return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
5353 }
5354 
5355 inline BOOL os::PSApiDll::PSApiAvailable() {
5356   return true;
5357 }
5358 
5359 
5360 // WinSock2 API
5361 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
5362   return ::WSAStartup(wVersionRequested, lpWSAData);
5363 }
5364 
5365 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
5366   return ::gethostbyname(name);
5367 }
5368 
5369 inline BOOL os::WinSock2Dll::WinSock2Available() {
5370   return true;
5371 }
5372 
5373 // Advapi API
5374 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
5375    BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
5376    PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
5377      return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
5378        BufferLength, PreviousState, ReturnLength);
5379 }
5380 
5381 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
5382   PHANDLE TokenHandle) {
5383     return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
5384 }
5385 
5386 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
5387   return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
5388 }
5389 
5390 inline BOOL os::Advapi32Dll::AdvapiAvailable() {
5391   return true;
5392 }
5393 
5394 void* os::get_default_process_handle() {
5395   return (void*)GetModuleHandle(NULL);
5396 }
5397 
5398 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5399 // which is used to find statically linked in agents.
5400 // Additionally for windows, takes into account __stdcall names.
5401 // Parameters:
5402 //            sym_name: Symbol in library we are looking for
5403 //            lib_name: Name of library to look in, NULL for shared libs.
5404 //            is_absolute_path == true if lib_name is absolute path to agent
5405 //                                     such as "C:/a/b/L.dll"
5406 //            == false if only the base name of the library is passed in
5407 //               such as "L"
5408 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5409                                     bool is_absolute_path) {
5410   char *agent_entry_name;
5411   size_t len;
5412   size_t name_len;
5413   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5414   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5415   const char *start;
5416 
5417   if (lib_name != NULL) {
5418     len = name_len = strlen(lib_name);
5419     if (is_absolute_path) {
5420       // Need to strip path, prefix and suffix
5421       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5422         lib_name = ++start;
5423       } else {
5424         // Need to check for drive prefix
5425         if ((start = strchr(lib_name, ':')) != NULL) {
5426           lib_name = ++start;
5427         }
5428       }
5429       if (len <= (prefix_len + suffix_len)) {
5430         return NULL;
5431       }
5432       lib_name += prefix_len;
5433       name_len = strlen(lib_name) - suffix_len;
5434     }
5435   }
5436   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5437   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5438   if (agent_entry_name == NULL) {
5439     return NULL;
5440   }
5441   if (lib_name != NULL) {
5442     const char *p = strrchr(sym_name, '@');
5443     if (p != NULL && p != sym_name) {
5444       // sym_name == _Agent_OnLoad@XX
5445       strncpy(agent_entry_name, sym_name, (p - sym_name));
5446       agent_entry_name[(p-sym_name)] = '\0';
5447       // agent_entry_name == _Agent_OnLoad
5448       strcat(agent_entry_name, "_");
5449       strncat(agent_entry_name, lib_name, name_len);
5450       strcat(agent_entry_name, p);
5451       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5452     } else {
5453       strcpy(agent_entry_name, sym_name);
5454       strcat(agent_entry_name, "_");
5455       strncat(agent_entry_name, lib_name, name_len);
5456     }
5457   } else {
5458     strcpy(agent_entry_name, sym_name);
5459   }
5460   return agent_entry_name;
5461 }
5462 
5463 #else
5464 // Kernel32 API
5465 typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
5466 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD);
5467 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32);
5468 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32);
5469 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO);
5470 
5471 SwitchToThread_Fn           os::Kernel32Dll::_SwitchToThread = NULL;
5472 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL;
5473 Module32First_Fn            os::Kernel32Dll::_Module32First = NULL;
5474 Module32Next_Fn             os::Kernel32Dll::_Module32Next = NULL;
5475 GetNativeSystemInfo_Fn      os::Kernel32Dll::_GetNativeSystemInfo = NULL;
5476 
5477 void os::Kernel32Dll::initialize() {
5478   if (!initialized) {
5479     HMODULE handle = ::GetModuleHandle("Kernel32.dll");
5480     assert(handle != NULL, "Just check");
5481 
5482     _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread");
5483     _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn)
5484       ::GetProcAddress(handle, "CreateToolhelp32Snapshot");
5485     _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First");
5486     _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next");
5487     _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo");
5488     initializeCommon();  // resolve the functions that always need resolving
5489 
5490     initialized = TRUE;
5491   }
5492 }
5493 
5494 BOOL os::Kernel32Dll::SwitchToThread() {
5495   assert(initialized && _SwitchToThread != NULL,
5496     "SwitchToThreadAvailable() not yet called");
5497   return _SwitchToThread();
5498 }
5499 
5500 
5501 BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
5502   if (!initialized) {
5503     initialize();
5504   }
5505   return _SwitchToThread != NULL;
5506 }
5507 
5508 // Help tools
5509 BOOL os::Kernel32Dll::HelpToolsAvailable() {
5510   if (!initialized) {
5511     initialize();
5512   }
5513   return _CreateToolhelp32Snapshot != NULL &&
5514          _Module32First != NULL &&
5515          _Module32Next != NULL;
5516 }
5517 
5518 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
5519   assert(initialized && _CreateToolhelp32Snapshot != NULL,
5520     "HelpToolsAvailable() not yet called");
5521 
5522   return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
5523 }
5524 
5525 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5526   assert(initialized && _Module32First != NULL,
5527     "HelpToolsAvailable() not yet called");
5528 
5529   return _Module32First(hSnapshot, lpme);
5530 }
5531 
5532 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5533   assert(initialized && _Module32Next != NULL,
5534     "HelpToolsAvailable() not yet called");
5535 
5536   return _Module32Next(hSnapshot, lpme);
5537 }
5538 
5539 
5540 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
5541   if (!initialized) {
5542     initialize();
5543   }
5544   return _GetNativeSystemInfo != NULL;
5545 }
5546 
5547 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
5548   assert(initialized && _GetNativeSystemInfo != NULL,
5549     "GetNativeSystemInfoAvailable() not yet called");
5550 
5551   _GetNativeSystemInfo(lpSystemInfo);
5552 }
5553 
5554 // PSAPI API
5555 
5556 
5557 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD);
5558 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);;
5559 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
5560 
5561 EnumProcessModules_Fn   os::PSApiDll::_EnumProcessModules = NULL;
5562 GetModuleFileNameEx_Fn  os::PSApiDll::_GetModuleFileNameEx = NULL;
5563 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL;
5564 BOOL                    os::PSApiDll::initialized = FALSE;
5565 
5566 void os::PSApiDll::initialize() {
5567   if (!initialized) {
5568     HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0);
5569     if (handle != NULL) {
5570       _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle,
5571         "EnumProcessModules");
5572       _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle,
5573         "GetModuleFileNameExA");
5574       _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle,
5575         "GetModuleInformation");
5576     }
5577     initialized = TRUE;
5578   }
5579 }
5580 
5581 
5582 
5583 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
5584   assert(initialized && _EnumProcessModules != NULL,
5585     "PSApiAvailable() not yet called");
5586   return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
5587 }
5588 
5589 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
5590   assert(initialized && _GetModuleFileNameEx != NULL,
5591     "PSApiAvailable() not yet called");
5592   return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
5593 }
5594 
5595 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
5596   assert(initialized && _GetModuleInformation != NULL,
5597     "PSApiAvailable() not yet called");
5598   return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
5599 }
5600 
5601 BOOL os::PSApiDll::PSApiAvailable() {
5602   if (!initialized) {
5603     initialize();
5604   }
5605   return _EnumProcessModules != NULL &&
5606     _GetModuleFileNameEx != NULL &&
5607     _GetModuleInformation != NULL;
5608 }
5609 
5610 
5611 // WinSock2 API
5612 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA);
5613 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...);
5614 
5615 WSAStartup_Fn    os::WinSock2Dll::_WSAStartup = NULL;
5616 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL;
5617 BOOL             os::WinSock2Dll::initialized = FALSE;
5618 
5619 void os::WinSock2Dll::initialize() {
5620   if (!initialized) {
5621     HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0);
5622     if (handle != NULL) {
5623       _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup");
5624       _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname");
5625     }
5626     initialized = TRUE;
5627   }
5628 }
5629 
5630 
5631 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
5632   assert(initialized && _WSAStartup != NULL,
5633     "WinSock2Available() not yet called");
5634   return _WSAStartup(wVersionRequested, lpWSAData);
5635 }
5636 
5637 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
5638   assert(initialized && _gethostbyname != NULL,
5639     "WinSock2Available() not yet called");
5640   return _gethostbyname(name);
5641 }
5642 
5643 BOOL os::WinSock2Dll::WinSock2Available() {
5644   if (!initialized) {
5645     initialize();
5646   }
5647   return _WSAStartup != NULL &&
5648     _gethostbyname != NULL;
5649 }
5650 
5651 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
5652 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE);
5653 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID);
5654 
5655 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL;
5656 OpenProcessToken_Fn      os::Advapi32Dll::_OpenProcessToken = NULL;
5657 LookupPrivilegeValue_Fn  os::Advapi32Dll::_LookupPrivilegeValue = NULL;
5658 BOOL                     os::Advapi32Dll::initialized = FALSE;
5659 
5660 void os::Advapi32Dll::initialize() {
5661   if (!initialized) {
5662     HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0);
5663     if (handle != NULL) {
5664       _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle,
5665         "AdjustTokenPrivileges");
5666       _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle,
5667         "OpenProcessToken");
5668       _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle,
5669         "LookupPrivilegeValueA");
5670     }
5671     initialized = TRUE;
5672   }
5673 }
5674 
5675 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
5676    BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
5677    PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
5678    assert(initialized && _AdjustTokenPrivileges != NULL,
5679      "AdvapiAvailable() not yet called");
5680    return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
5681        BufferLength, PreviousState, ReturnLength);
5682 }
5683 
5684 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
5685   PHANDLE TokenHandle) {
5686    assert(initialized && _OpenProcessToken != NULL,
5687      "AdvapiAvailable() not yet called");
5688     return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
5689 }
5690 
5691 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
5692    assert(initialized && _LookupPrivilegeValue != NULL,
5693      "AdvapiAvailable() not yet called");
5694   return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
5695 }
5696 
5697 BOOL os::Advapi32Dll::AdvapiAvailable() {
5698   if (!initialized) {
5699     initialize();
5700   }
5701   return _AdjustTokenPrivileges != NULL &&
5702     _OpenProcessToken != NULL &&
5703     _LookupPrivilegeValue != NULL;
5704 }
5705 
5706 #endif
5707 
5708 #ifndef PRODUCT
5709 
5710 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5711 // contiguous memory block at a particular address.
5712 // The test first tries to find a good approximate address to allocate at by using the same
5713 // method to allocate some memory at any address. The test then tries to allocate memory in
5714 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5715 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5716 // the previously allocated memory is available for allocation. The only actual failure
5717 // that is reported is when the test tries to allocate at a particular location but gets a
5718 // different valid one. A NULL return value at this point is not considered an error but may
5719 // be legitimate.
5720 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5721 void TestReserveMemorySpecial_test() {
5722   if (!UseLargePages) {
5723     if (VerboseInternalVMTests) {
5724       gclog_or_tty->print("Skipping test because large pages are disabled");
5725     }
5726     return;
5727   }
5728   // save current value of globals
5729   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5730   bool old_use_numa_interleaving = UseNUMAInterleaving;
5731 
5732   // set globals to make sure we hit the correct code path
5733   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5734 
5735   // do an allocation at an address selected by the OS to get a good one.
5736   const size_t large_allocation_size = os::large_page_size() * 4;
5737   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5738   if (result == NULL) {
5739     if (VerboseInternalVMTests) {
5740       gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.",
5741         large_allocation_size);
5742     }
5743   } else {
5744     os::release_memory_special(result, large_allocation_size);
5745 
5746     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5747     // we managed to get it once.
5748     const size_t expected_allocation_size = os::large_page_size();
5749     char* expected_location = result + os::large_page_size();
5750     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5751     if (actual_location == NULL) {
5752       if (VerboseInternalVMTests) {
5753         gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.",
5754           expected_location, large_allocation_size);
5755       }
5756     } else {
5757       // release memory
5758       os::release_memory_special(actual_location, expected_allocation_size);
5759       // only now check, after releasing any memory to avoid any leaks.
5760       assert(actual_location == expected_location,
5761         err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
5762           expected_location, expected_allocation_size, actual_location));
5763     }
5764   }
5765 
5766   // restore globals
5767   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5768   UseNUMAInterleaving = old_use_numa_interleaving;
5769 }
5770 #endif // PRODUCT
5771