hotspot/src/os/windows/vm/os_windows.cpp

Print this page
rev 611 : Merge
   1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)os_windows.cpp       1.535 07/11/15 10:56:43 JVM"
   3 #endif
   4 /*
   5  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  


 313       stack_size += minfo.RegionSize;
 314     else
 315       break;
 316   }
 317 
 318 #ifdef _M_IA64
 319   // IA64 has memory and register stacks
 320   stack_size = stack_size / 2;
 321 #endif
 322   return stack_bottom + stack_size;
 323 }
 324 
 325 size_t os::current_stack_size() {
 326   size_t sz;
 327   MEMORY_BASIC_INFORMATION minfo;
 328   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 329   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 330   return sz;
 331 }
 332 








 333 
 334 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 335 
 336 // Thread start routine for all new Java threads
 337 static unsigned __stdcall java_start(Thread* thread) {
 338   // Try to randomize the cache line index of hot stack frames.
 339   // This helps when threads of the same stack traces evict each other's
 340   // cache lines. The threads can be either from the same JVM instance, or
 341   // from different JVM instances. The benefit is especially true for
 342   // processors with hyperthreading technology.
 343   static int counter = 0;
 344   int pid = os::current_process_id();
 345   _alloca(((pid ^ counter++) & 7) * 128);
 346 
 347   OSThread* osthr = thread->osthread();
 348   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 349 
 350   if (UseNUMA) {
 351     int lgrp_id = os::numa_get_group_id();
 352     if (lgrp_id != -1) {


 723 }
 724 #else
 725 jlong offset() {
 726   return _offset;
 727 }
 728 #endif
 729 
 730 jlong windows_to_java_time(FILETIME wt) {
 731   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 732   return (a - offset()) / 10000;
 733 }
 734 
 735 FILETIME java_to_windows_time(jlong l) {
 736   jlong a = (l * 10000) + offset();
 737   FILETIME result;
 738   result.dwHighDateTime = high(a); 
 739   result.dwLowDateTime  = low(a);
 740   return result;
 741 }
 742 
 743 jlong os::timeofday() {
 744   FILETIME wt;
 745   GetSystemTimeAsFileTime(&wt);
 746   return windows_to_java_time(wt);
 747 }
 748 







 749 
 750 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
 751 // _use_global_time is only set if CacheTimeMillis is true
 752 jlong os::javaTimeMillis() {
 753   if (UseFakeTimers) {
 754     return fake_time++;
 755   } else {
 756     return (_use_global_time ? read_global_time() : timeofday());


 757   }
 758 }
 759 
 760 #define NANOS_PER_SEC         CONST64(1000000000)
 761 #define NANOS_PER_MILLISEC    1000000
 762 jlong os::javaTimeNanos() {
 763   if (!has_performance_count) { 
 764     return javaTimeMillis() * NANOS_PER_MILLISEC; // the best we can do.
 765   } else {
 766     LARGE_INTEGER current_count;  
 767     QueryPerformanceCounter(&current_count);
 768     double current = as_long(current_count);
 769     double freq = performance_frequency;
 770     jlong time = (jlong)((current/freq) * NANOS_PER_SEC);
 771     return time;
 772   }
 773 }
 774 
 775 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 776   if (!has_performance_count) { 


 978         dirp->handle = INVALID_HANDLE_VALUE;
 979     }
 980     free(dirp->path);
 981     free(dirp);
 982     return 0;
 983 }
 984 
 985 const char* os::dll_file_extension() { return ".dll"; }
 986 
 987 const char * os::get_temp_directory()
 988 {
 989     static char path_buf[MAX_PATH];
 990     if (GetTempPath(MAX_PATH, path_buf)>0)
 991       return path_buf;
 992     else{
 993       path_buf[0]='\0';
 994       return path_buf;
 995     }
 996 }
 997 

























































 998 // Needs to be in os specific directory because windows requires another
 999 // header file <direct.h>
1000 const char* os::get_current_directory(char *buf, int buflen) {
1001   return _getcwd(buf, buflen);
1002 }
1003 
1004 //-----------------------------------------------------------
1005 // Helper functions for fatal error handler
1006 
1007 // The following library functions are resolved dynamically at runtime:
1008 
1009 // PSAPI functions, for Windows NT, 2000, XP
1010 
1011 // psapi.h doesn't come with Visual Studio 6; it can be downloaded as Platform 
1012 // SDK from Microsoft.  Here are the definitions copied from psapi.h
1013 typedef struct _MODULEINFO {
1014     LPVOID lpBaseOfDll;
1015     DWORD SizeOfImage;
1016     LPVOID EntryPoint;
1017 } MODULEINFO, *LPMODULEINFO;


1241       if (offset) *offset = addr - mi.base_addr;
1242       return true;
1243    } else {
1244       if (buf) buf[0] = '\0';
1245       if (offset) *offset = -1;
1246       return false;
1247    }
1248 }
1249 
1250 bool os::dll_address_to_function_name(address addr, char *buf,
1251                                       int buflen, int *offset) {
1252   // Unimplemented on Windows - in order to use SymGetSymFromAddr(),
1253   // we need to initialize imagehlp/dbghelp, then load symbol table 
1254   // for every module. That's too much work to do after a fatal error.
1255   // For an example on how to implement this function, see 1.4.2.
1256   if (offset)  *offset  = -1;
1257   if (buf) buf[0] = '\0';
1258   return false;
1259 }
1260 




1261 // save the start and end address of jvm.dll into param[0] and param[1]
1262 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, 
1263                     unsigned size, void * param) {
1264    if (!param) return -1;
1265 
1266    if (base_addr     <= (address)_locate_jvm_dll &&
1267        base_addr+size > (address)_locate_jvm_dll) {
1268          ((address*)param)[0] = base_addr;
1269          ((address*)param)[1] = base_addr + size;
1270          return 1;
1271    }
1272    return 0;
1273 }
1274 
1275 address vm_lib_location[2];    // start and end address of jvm.dll
1276 
1277 // check if addr is inside jvm.dll
1278 bool os::address_is_in_vm(address addr) {
1279   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1280     int pid = os::current_process_id();


1414       lib_arch_str,running_arch_str);
1415   }
1416   else 
1417   {
1418     // don't know what architecture this dll was build for
1419     ::_snprintf(ebuf, ebuflen-1,
1420       "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1421       lib_arch,running_arch_str);
1422   }
1423 
1424   return NULL;
1425 }
1426 
1427 
1428 void os::print_dll_info(outputStream *st) {
1429    int pid = os::current_process_id();
1430    st->print_cr("Dynamic libraries:");
1431    enumerate_modules(pid, _print_module, (void *)st);
1432 }
1433 




1434 void os::print_os_info(outputStream* st) {
1435    st->print("OS:");
1436 
1437    OSVERSIONINFOEX osvi;
1438    ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1439    osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1440 
1441    if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1442       st->print_cr("N/A");
1443       return;
1444    }
1445 
1446    int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
1447 
1448    if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
1449      switch (os_vers) {
1450        case 3051: st->print(" Windows NT 3.51"); break;
1451        case 4000: st->print(" Windows NT 4.0"); break;
1452        case 5000: st->print(" Windows 2000"); break;
1453        case 5001: st->print(" Windows XP"); break;
1454        case 5002: st->print(" Windows Server 2003 family"); break;
1455        case 6000: st->print(" Windows Vista"); break;










































1456        default: // future windows, print out its major and minor versions
1457                 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1458      }
1459    } else {
1460      switch (os_vers) {
1461        case 4000: st->print(" Windows 95"); break;
1462        case 4010: st->print(" Windows 98"); break;
1463        case 4090: st->print(" Windows Me"); break;
1464        default: // future windows, print out its major and minor versions
1465                 st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1466      }
1467    }
1468 
1469    st->print(" Build %d", osvi.dwBuildNumber);
1470    st->print(" %s", osvi.szCSDVersion);           // service pack
1471    st->cr();
1472 }
1473 
1474 void os::print_memory_info(outputStream* st) {
1475   st->print("Memory:");
1476   st->print(" %dk page", os::vm_page_size()>>10);
1477 
1478   // FIXME: GlobalMemoryStatus() may return incorrect value if total memory
1479   // is larger than 4GB
1480   MEMORYSTATUS ms;
1481   GlobalMemoryStatus(&ms);
1482 
1483   st->print(", physical %uk", os::physical_memory() >> 10);
1484   st->print("(%uk free)", os::available_memory() >> 10);
1485 
1486   st->print(", swap %uk", ms.dwTotalPageFile >> 10);
1487   st->print("(%uk free)", ms.dwAvailPageFile >> 10);
1488   st->cr();


1942       // If an instruction spans a page boundary, and the page containing
1943       // the beginning of the instruction is executable but the following
1944       // page is not, the pc and the faulting address might be slightly
1945       // different - we still want to unguard the 2nd page in this case.
1946       //
1947       // 15 bytes seems to be a (very) safe value for max instruction size.
1948       bool pc_is_near_addr = 
1949         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
1950       bool instr_spans_page_boundary =
1951         (align_size_down((intptr_t) pc ^ (intptr_t) addr,
1952                          (intptr_t) page_size) > 0);
1953 
1954       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
1955         static volatile address last_addr =
1956           (address) os::non_memory_address_word();
1957 
1958         // In conservative mode, don't unguard unless the address is in the VM
1959         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
1960             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
1961           
1962           // Unguard and retry
1963           address page_start =
1964             (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
1965           bool res = os::unguard_memory((char*) page_start, page_size);

1966           
1967           if (PrintMiscellaneous && Verbose) {
1968             char buf[256];
1969             jio_snprintf(buf, sizeof(buf), "Execution protection violation "
1970                          "at " INTPTR_FORMAT
1971                          ", unguarding " INTPTR_FORMAT ": %s", addr,
1972                          page_start, (res ? "success" : strerror(errno)));
1973             tty->print_raw_cr(buf);
1974           }
1975 
1976           // Set last_addr so if we fault again at the same address, we don't
1977           // end up in an endless loop.
1978           // 
1979           // There are two potential complications here.  Two threads trapping
1980           // at the same address at the same time could cause one of the
1981           // threads to think it already unguarded, and abort the VM.  Likely
1982           // very rare.
1983           // 
1984           // The other race involves two threads alternately trapping at
1985           // different addresses and failing to unguard the page, resulting in


2139                                "GR7 doesn't contain register_stack_limit");
2140                 // Disable the yellow zone which sets the state that 
2141                 // we've got a stack overflow problem.
2142                 if (thread->stack_yellow_zone_enabled()) {
2143                   thread->disable_stack_yellow_zone();
2144                 }
2145                 // Give us some room to process the exception
2146                 thread->disable_register_stack_guard();
2147                 // Update GR7 with the new limit so we can continue running
2148                 // compiled code.
2149                 exceptionInfo->ContextRecord->IntS3 = 
2150                                (ULONGLONG)thread->register_stack_limit();
2151                 return Handle_Exception(exceptionInfo, 
2152                        SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2153               } else {
2154                 //
2155                 // Check for implicit null
2156                 // We only expect null pointers in the stubs (vtable)
2157                 // the rest are checked explicitly now.
2158                 //
2159                 CodeBlob* cb = CodeCache::find_blob(pc);
2160                 if (cb != NULL) {
2161                   if (VtableStubs::stub_containing(pc) != NULL) {
2162                     if (((uintptr_t)addr) < os::vm_page_size() ) {
2163                       // an access to the first page of VM--assume it is a null pointer
2164                       return Handle_Exception(exceptionInfo,
2165                         SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL));
2166                     }
2167                   }
2168                 }
2169               }
2170             } // in_java
2171 
2172             // IA64 doesn't use implicit null checking yet. So we shouldn't
2173             // get here.
2174             tty->print_raw_cr("Access violation, possible null pointer exception");
2175             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2176                          exceptionInfo->ContextRecord);
2177             return EXCEPTION_CONTINUE_SEARCH;
2178 #else /* !IA64 */
2179 
2180             // Windows 98 reports faulting addresses incorrectly
2181             if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
2182                 !os::win32::is_nt()) {
2183               return Handle_Exception(exceptionInfo,
2184                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL));
2185             }
2186             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2187                          exceptionInfo->ContextRecord);
2188             return EXCEPTION_CONTINUE_SEARCH;
2189 #endif
2190           }
2191         }
2192       }
2193 
2194 #ifdef _WIN64
2195       // Special care for fast JNI field accessors.
2196       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2197       // in and the heap gets shrunk before the field access.
2198       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2199         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2200         if (addr != (address)-1) {
2201           return Handle_Exception(exceptionInfo, addr);
2202         }
2203       }
2204 #endif


2509 
2510 // Reserve memory at an arbitrary address, only if that area is
2511 // available (and not reserved for something else).
2512 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2513   // Windows os::reserve_memory() fails of the requested address range is
2514   // not avilable.
2515   return reserve_memory(bytes, requested_addr);
2516 }
2517 
2518 size_t os::large_page_size() {
2519   return _large_page_size;
2520 }
2521 
2522 bool os::can_commit_large_page_memory() {
2523   // Windows only uses large page memory when the entire region is reserved
2524   // and committed in a single VirtualAlloc() call. This may change in the
2525   // future, but with Windows 2003 it's not possible to commit on demand.
2526   return false;
2527 }
2528 




2529 char* os::reserve_memory_special(size_t bytes) {



























































































2530   DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
2531   char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_READWRITE);



2532   return res;

2533 }
2534  
2535 bool os::release_memory_special(char* base, size_t bytes) {
2536   return release_memory(base, bytes);
2537 }
2538 
2539 void os::print_statistics() {
2540 }
2541 
2542 bool os::commit_memory(char* addr, size_t bytes) {
2543   if (bytes == 0) {
2544     // Don't bother the OS with noops.
2545     return true;
2546   }
2547   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
2548   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
2549   // Don't attempt to print anything if the OS call fails. We're
2550   // probably low on resources, so the print itself may cause crashes.
2551   return VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_EXECUTE_READWRITE) != NULL;
2552 }
2553 
2554 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint) {
2555   return commit_memory(addr, size);
2556 }
2557 
2558 bool os::uncommit_memory(char* addr, size_t bytes) {
2559   if (bytes == 0) {
2560     // Don't bother the OS with noops.
2561     return true;
2562   }
2563   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
2564   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
2565   return VirtualFree(addr, bytes, MEM_DECOMMIT) != 0;
2566 }
2567 
2568 bool os::release_memory(char* addr, size_t bytes) {
2569   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
2570 }
2571 
2572 bool os::protect_memory(char* addr, size_t bytes) {












2573   DWORD old_status;
2574   return VirtualProtect(addr, bytes, PAGE_READONLY, &old_status) != 0;












2575 }
2576 
2577 bool os::guard_memory(char* addr, size_t bytes) {
2578   DWORD old_status;
2579   return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE | PAGE_GUARD, &old_status) != 0;
2580 }
2581 
2582 bool os::unguard_memory(char* addr, size_t bytes) {
2583   DWORD old_status;
2584   return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &old_status) != 0;
2585 }
2586 
2587 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
2588 void os::free_memory(char *addr, size_t bytes)         { }
2589 void os::numa_make_global(char *addr, size_t bytes)    { }
2590 void os::numa_make_local(char *addr, size_t bytes)     { }
2591 bool os::numa_topology_changed()                       { return false; }
2592 size_t os::numa_get_groups_num()                       { return 1; }
2593 int os::numa_get_group_id()                            { return 0; }
2594 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2595   if (size > 0) {
2596     ids[0] = 0;
2597     return 1;
2598   }
2599   return 0;
2600 }
2601 
2602 bool os::get_page_info(char *start, page_info* info) {
2603   return false;
2604 }
2605 
2606 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2607   return end;
2608 }
2609 
2610 char* os::non_memory_address_word() {


2876 
2877 static int _initial_pid = 0;
2878 
2879 int os::current_process_id()
2880 {
2881   return (_initial_pid ? _initial_pid : _getpid());
2882 }
2883 
2884 int    os::win32::_vm_page_size       = 0;
2885 int    os::win32::_vm_allocation_granularity = 0;
2886 int    os::win32::_processor_type     = 0;
2887 // Processor level is not available on non-NT systems, use vm_version instead
2888 int    os::win32::_processor_level    = 0;
2889 julong os::win32::_physical_memory    = 0;
2890 size_t os::win32::_default_stack_size = 0;
2891 
2892          intx os::win32::_os_thread_limit    = 0;
2893 volatile intx os::win32::_os_thread_count    = 0;
2894 
2895 bool   os::win32::_is_nt              = false;

2896 
2897 
2898 void os::win32::initialize_system_info() {
2899   SYSTEM_INFO si;
2900   GetSystemInfo(&si);
2901   _vm_page_size    = si.dwPageSize;
2902   _vm_allocation_granularity = si.dwAllocationGranularity;
2903   _processor_type  = si.dwProcessorType;
2904   _processor_level = si.wProcessorLevel;
2905   _processor_count = si.dwNumberOfProcessors;
2906 
2907   MEMORYSTATUS ms;
2908   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
2909   // dwMemoryLoad (% of memory in use)
2910   GlobalMemoryStatus(&ms);
2911   _physical_memory = ms.dwTotalPhys;
2912 
2913   OSVERSIONINFO oi;
2914   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
2915   GetVersionEx(&oi);
2916   switch(oi.dwPlatformId) {
2917     case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
2918     case VER_PLATFORM_WIN32_NT:      _is_nt = true;  break;








2919     default: fatal("Unknown platform");
2920   }  
2921 
2922   _default_stack_size = os::current_stack_size(); 
2923   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
2924   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
2925     "stack size not a multiple of page size");
2926 
2927   initialize_performance_counter();
2928 
2929   // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is
2930   // known to deadlock the system, if the VM issues to thread operations with
2931   // a too high frequency, e.g., such as changing the priorities. 
2932   // The 6000 seems to work well - no deadlocks has been notices on the test
2933   // programs that we have seen experience this problem.
2934   if (!os::win32::is_nt()) {    
2935     StarvationMonitorInterval = 6000;
2936   }
2937 }
2938 


2996 }
2997 #endif // _WIN64
2998 #endif // PRODUCT
2999 
3000 // this is called _before_ the global arguments have been parsed
3001 void os::init(void) {
3002   _initial_pid = _getpid();
3003 
3004   init_random(1234567);
3005 
3006   win32::initialize_system_info();
3007   win32::setmode_streams();
3008   init_page_sizes((size_t) win32::vm_page_size());
3009 
3010   // For better scalability on MP systems (must be called after initialize_system_info)
3011 #ifndef PRODUCT
3012   if (is_MP()) {    
3013     NoYieldsInMicrolock = true;
3014   }
3015 #endif




3016   // Initialize main_process and main_thread
3017   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
3018   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3019                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3020     fatal("DuplicateHandle failed\n");
3021   }
3022   main_thread_id = (int) GetCurrentThreadId();
3023 }
3024  
3025 // To install functions for atexit processing
3026 extern "C" {
3027   static void perfMemory_exit_helper() {
3028     perfMemory_exit();
3029   }
3030 }
3031 
3032 
3033 // this is called _after_ the global arguments have been parsed
3034 jint os::init_2(void) {
3035   // Allocate a single page and mark it as readable for safepoint polling


3105   size_t actual_reserve_size = stack_commit_size;
3106   if (stack_commit_size < default_reserve_size) {
3107     // If stack_commit_size == 0, we want this too
3108     actual_reserve_size = default_reserve_size;
3109   }
3110 
3111   JavaThread::set_stack_size_at_create(stack_commit_size);
3112 
3113   // Calculate theoretical max. size of Threads to guard gainst artifical
3114   // out-of-memory situations, where all available address-space has been
3115   // reserved by thread stacks.
3116   assert(actual_reserve_size != 0, "Must have a stack");
3117 
3118   // Calculate the thread limit when we should start doing Virtual Memory 
3119   // banging. Currently when the threads will have used all but 200Mb of space.
3120   //
3121   // TODO: consider performing a similar calculation for commit size instead 
3122   // as reserve size, since on a 64-bit platform we'll run into that more
3123   // often than running out of virtual memory space.  We can use the 
3124   // lower value of the two calculations as the os_thread_limit.
3125   size_t max_address_space = ((size_t)1 << (BitsPerOop - 1)) - (200 * K * K);
3126   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
3127 
3128   // at exit methods are called in the reverse order of their registration.
3129   // there is no limit to the number of functions registered. atexit does
3130   // not set errno.
3131 
3132   if (PerfAllowAtExitRegistration) {
3133     // only register atexit functions if PerfAllowAtExitRegistration is set.
3134     // atexit functions can be delayed until process exit time, which
3135     // can be problematic for embedded VM situations. Embedded VMs should
3136     // call DestroyJavaVM() to assure that VM resources are released.
3137 
3138     // note: perfMemory_exit_helper atexit function may be removed in
3139     // the future if the appropriate cleanup code can be added to the
3140     // VM_Exit VMOperation's doit method.
3141     if (atexit(perfMemory_exit_helper) != 0) {
3142       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3143     }
3144   }
3145 
3146   // initialize PSAPI or ToolHelp for fatal error handler
3147   if (win32::is_nt()) _init_psapi();
3148   else _init_toolhelp();
3149 
3150 #ifndef _WIN64
3151   // Print something if NX is enabled (win32 on AMD64)
3152   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
3153 #endif
3154 
3155   // initialize thread priority policy
3156   prio_init();
3157 




3158   return JNI_OK;
3159 }
3160 
3161 
3162 // Mark the polling page as unreadable
3163 void os::make_polling_page_unreadable(void) {
3164   DWORD old_status;
3165   if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) )
3166     fatal("Could not disable polling page");
3167 };
3168 
3169 // Mark the polling page as readable
3170 void os::make_polling_page_readable(void) {
3171   DWORD old_status;
3172   if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) )
3173     fatal("Could not enable polling page");
3174 };
3175 
3176 
3177 int os::stat(const char *path, struct stat *sbuf) {





   1 /*
   2  * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *


 310       stack_size += minfo.RegionSize;
 311     else
 312       break;
 313   }
 314 
 315 #ifdef _M_IA64
 316   // IA64 has memory and register stacks
 317   stack_size = stack_size / 2;
 318 #endif
 319   return stack_bottom + stack_size;
 320 }
 321 
 322 size_t os::current_stack_size() {
 323   size_t sz;
 324   MEMORY_BASIC_INFORMATION minfo;
 325   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 326   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 327   return sz;
 328 }
 329 
 330 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 331   const struct tm* time_struct_ptr = localtime(clock);
 332   if (time_struct_ptr != NULL) {
 333     *res = *time_struct_ptr;
 334     return res;
 335   }
 336   return NULL;
 337 }
 338 
 339 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 340 
 341 // Thread start routine for all new Java threads
 342 static unsigned __stdcall java_start(Thread* thread) {
 343   // Try to randomize the cache line index of hot stack frames.
 344   // This helps when threads of the same stack traces evict each other's
 345   // cache lines. The threads can be either from the same JVM instance, or
 346   // from different JVM instances. The benefit is especially true for
 347   // processors with hyperthreading technology.
 348   static int counter = 0;
 349   int pid = os::current_process_id();
 350   _alloca(((pid ^ counter++) & 7) * 128);
 351 
 352   OSThread* osthr = thread->osthread();
 353   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 354 
 355   if (UseNUMA) {
 356     int lgrp_id = os::numa_get_group_id();
 357     if (lgrp_id != -1) {


 728 }
 729 #else
 730 jlong offset() {
 731   return _offset;
 732 }
 733 #endif
 734 
 735 jlong windows_to_java_time(FILETIME wt) {
 736   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 737   return (a - offset()) / 10000;
 738 }
 739 
 740 FILETIME java_to_windows_time(jlong l) {
 741   jlong a = (l * 10000) + offset();
 742   FILETIME result;
 743   result.dwHighDateTime = high(a);
 744   result.dwLowDateTime  = low(a);
 745   return result;
 746 }
 747 
 748 // For now, we say that Windows does not support vtime.  I have no idea
 749 // whether it can actually be made to (DLD, 9/13/05).



 750 
 751 bool os::supports_vtime() { return false; }
 752 bool os::enable_vtime() { return false; }
 753 bool os::vtime_enabled() { return false; }
 754 double os::elapsedVTime() {
 755   // better than nothing, but not much
 756   return elapsedTime();
 757 }
 758 


 759 jlong os::javaTimeMillis() {
 760   if (UseFakeTimers) {
 761     return fake_time++;
 762   } else {
 763     FILETIME wt;
 764     GetSystemTimeAsFileTime(&wt);
 765     return windows_to_java_time(wt);
 766   }
 767 }
 768 
 769 #define NANOS_PER_SEC         CONST64(1000000000)
 770 #define NANOS_PER_MILLISEC    1000000
 771 jlong os::javaTimeNanos() {
 772   if (!has_performance_count) {
 773     return javaTimeMillis() * NANOS_PER_MILLISEC; // the best we can do.
 774   } else {
 775     LARGE_INTEGER current_count;
 776     QueryPerformanceCounter(&current_count);
 777     double current = as_long(current_count);
 778     double freq = performance_frequency;
 779     jlong time = (jlong)((current/freq) * NANOS_PER_SEC);
 780     return time;
 781   }
 782 }
 783 
 784 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 785   if (!has_performance_count) {


 987         dirp->handle = INVALID_HANDLE_VALUE;
 988     }
 989     free(dirp->path);
 990     free(dirp);
 991     return 0;
 992 }
 993 
 994 const char* os::dll_file_extension() { return ".dll"; }
 995 
 996 const char * os::get_temp_directory()
 997 {
 998     static char path_buf[MAX_PATH];
 999     if (GetTempPath(MAX_PATH, path_buf)>0)
1000       return path_buf;
1001     else{
1002       path_buf[0]='\0';
1003       return path_buf;
1004     }
1005 }
1006 
1007 static bool file_exists(const char* filename) {
1008   if (filename == NULL || strlen(filename) == 0) {
1009     return false;
1010   }
1011   return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1012 }
1013 
1014 void os::dll_build_name(char *buffer, size_t buflen,
1015                         const char* pname, const char* fname) {
1016   // Copied from libhpi
1017   const size_t pnamelen = pname ? strlen(pname) : 0;
1018   const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1019 
1020   // Quietly truncates on buffer overflow. Should be an error.
1021   if (pnamelen + strlen(fname) + 10 > buflen) {
1022     *buffer = '\0';
1023     return;
1024   }
1025 
1026   if (pnamelen == 0) {
1027     jio_snprintf(buffer, buflen, "%s.dll", fname);
1028   } else if (c == ':' || c == '\\') {
1029     jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1030   } else if (strchr(pname, *os::path_separator()) != NULL) {
1031     int n;
1032     char** pelements = split_path(pname, &n);
1033     for (int i = 0 ; i < n ; i++) {
1034       char* path = pelements[i];
1035       // Really shouldn't be NULL, but check can't hurt
1036       size_t plen = (path == NULL) ? 0 : strlen(path);
1037       if (plen == 0) {
1038         continue; // skip the empty path values
1039       }
1040       const char lastchar = path[plen - 1];
1041       if (lastchar == ':' || lastchar == '\\') {
1042         jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1043       } else {
1044         jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1045       }
1046       if (file_exists(buffer)) {
1047         break;
1048       }
1049     }
1050     // release the storage
1051     for (int i = 0 ; i < n ; i++) {
1052       if (pelements[i] != NULL) {
1053         FREE_C_HEAP_ARRAY(char, pelements[i]);
1054       }
1055     }
1056     if (pelements != NULL) {
1057       FREE_C_HEAP_ARRAY(char*, pelements);
1058     }
1059   } else {
1060     jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1061   }
1062 }
1063 
1064 // Needs to be in os specific directory because windows requires another
1065 // header file <direct.h>
1066 const char* os::get_current_directory(char *buf, int buflen) {
1067   return _getcwd(buf, buflen);
1068 }
1069 
1070 //-----------------------------------------------------------
1071 // Helper functions for fatal error handler
1072 
1073 // The following library functions are resolved dynamically at runtime:
1074 
1075 // PSAPI functions, for Windows NT, 2000, XP
1076 
1077 // psapi.h doesn't come with Visual Studio 6; it can be downloaded as Platform
1078 // SDK from Microsoft.  Here are the definitions copied from psapi.h
1079 typedef struct _MODULEINFO {
1080     LPVOID lpBaseOfDll;
1081     DWORD SizeOfImage;
1082     LPVOID EntryPoint;
1083 } MODULEINFO, *LPMODULEINFO;


1307       if (offset) *offset = addr - mi.base_addr;
1308       return true;
1309    } else {
1310       if (buf) buf[0] = '\0';
1311       if (offset) *offset = -1;
1312       return false;
1313    }
1314 }
1315 
1316 bool os::dll_address_to_function_name(address addr, char *buf,
1317                                       int buflen, int *offset) {
1318   // Unimplemented on Windows - in order to use SymGetSymFromAddr(),
1319   // we need to initialize imagehlp/dbghelp, then load symbol table
1320   // for every module. That's too much work to do after a fatal error.
1321   // For an example on how to implement this function, see 1.4.2.
1322   if (offset)  *offset  = -1;
1323   if (buf) buf[0] = '\0';
1324   return false;
1325 }
1326 
1327 void* os::dll_lookup(void* handle, const char* name) {
1328   return GetProcAddress((HMODULE)handle, name);
1329 }
1330 
1331 // save the start and end address of jvm.dll into param[0] and param[1]
1332 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr,
1333                     unsigned size, void * param) {
1334    if (!param) return -1;
1335 
1336    if (base_addr     <= (address)_locate_jvm_dll &&
1337        base_addr+size > (address)_locate_jvm_dll) {
1338          ((address*)param)[0] = base_addr;
1339          ((address*)param)[1] = base_addr + size;
1340          return 1;
1341    }
1342    return 0;
1343 }
1344 
1345 address vm_lib_location[2];    // start and end address of jvm.dll
1346 
1347 // check if addr is inside jvm.dll
1348 bool os::address_is_in_vm(address addr) {
1349   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1350     int pid = os::current_process_id();


1484       lib_arch_str,running_arch_str);
1485   }
1486   else
1487   {
1488     // don't know what architecture this dll was build for
1489     ::_snprintf(ebuf, ebuflen-1,
1490       "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1491       lib_arch,running_arch_str);
1492   }
1493 
1494   return NULL;
1495 }
1496 
1497 
1498 void os::print_dll_info(outputStream *st) {
1499    int pid = os::current_process_id();
1500    st->print_cr("Dynamic libraries:");
1501    enumerate_modules(pid, _print_module, (void *)st);
1502 }
1503 
1504 // function pointer to Windows API "GetNativeSystemInfo".
1505 typedef void (WINAPI *GetNativeSystemInfo_func_type)(LPSYSTEM_INFO);
1506 static GetNativeSystemInfo_func_type _GetNativeSystemInfo;
1507 
1508 void os::print_os_info(outputStream* st) {
1509   st->print("OS:");
1510 
1511   OSVERSIONINFOEX osvi;
1512   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1513   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1514 
1515   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1516     st->print_cr("N/A");
1517     return;
1518   }
1519 
1520   int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;

1521   if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
1522     switch (os_vers) {
1523     case 3051: st->print(" Windows NT 3.51"); break;
1524     case 4000: st->print(" Windows NT 4.0"); break;
1525     case 5000: st->print(" Windows 2000"); break;
1526     case 5001: st->print(" Windows XP"); break;
1527     case 5002:
1528     case 6000:
1529     case 6001: {
1530       // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1531       // find out whether we are running on 64 bit processor or not.
1532       SYSTEM_INFO si;
1533       ZeroMemory(&si, sizeof(SYSTEM_INFO));
1534       // Check to see if _GetNativeSystemInfo has been initialized.
1535       if (_GetNativeSystemInfo == NULL) {
1536         HMODULE hKernel32 = GetModuleHandle(TEXT("kernel32.dll"));
1537         _GetNativeSystemInfo =
1538             CAST_TO_FN_PTR(GetNativeSystemInfo_func_type,
1539                            GetProcAddress(hKernel32,
1540                                           "GetNativeSystemInfo"));
1541         if (_GetNativeSystemInfo == NULL)
1542           GetSystemInfo(&si);
1543       } else {
1544         _GetNativeSystemInfo(&si);
1545       }
1546       if (os_vers == 5002) {
1547         if (osvi.wProductType == VER_NT_WORKSTATION &&
1548             si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
1549           st->print(" Windows XP x64 Edition");
1550         else
1551             st->print(" Windows Server 2003 family");
1552       } else if (os_vers == 6000) {
1553         if (osvi.wProductType == VER_NT_WORKSTATION)
1554             st->print(" Windows Vista");
1555         else
1556             st->print(" Windows Server 2008");
1557         if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
1558             st->print(" , 64 bit");
1559       } else { // os_vers == 6001
1560         if (osvi.wProductType == VER_NT_WORKSTATION) {
1561             st->print(" Windows 7");
1562         } else {
1563             // Unrecognized windows, print out its major and minor versions
1564             st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1565         }
1566         if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
1567             st->print(" , 64 bit");
1568       }
1569       break;
1570     }
1571     default: // future windows, print out its major and minor versions
1572       st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1573     }
1574   } else {
1575     switch (os_vers) {
1576     case 4000: st->print(" Windows 95"); break;
1577     case 4010: st->print(" Windows 98"); break;
1578     case 4090: st->print(" Windows Me"); break;
1579     default: // future windows, print out its major and minor versions
1580       st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1581     }
1582   }

1583   st->print(" Build %d", osvi.dwBuildNumber);
1584   st->print(" %s", osvi.szCSDVersion);           // service pack
1585   st->cr();
1586 }
1587 
1588 void os::print_memory_info(outputStream* st) {
1589   st->print("Memory:");
1590   st->print(" %dk page", os::vm_page_size()>>10);
1591 
1592   // FIXME: GlobalMemoryStatus() may return incorrect value if total memory
1593   // is larger than 4GB
1594   MEMORYSTATUS ms;
1595   GlobalMemoryStatus(&ms);
1596 
1597   st->print(", physical %uk", os::physical_memory() >> 10);
1598   st->print("(%uk free)", os::available_memory() >> 10);
1599 
1600   st->print(", swap %uk", ms.dwTotalPageFile >> 10);
1601   st->print("(%uk free)", ms.dwAvailPageFile >> 10);
1602   st->cr();


2056       // If an instruction spans a page boundary, and the page containing
2057       // the beginning of the instruction is executable but the following
2058       // page is not, the pc and the faulting address might be slightly
2059       // different - we still want to unguard the 2nd page in this case.
2060       //
2061       // 15 bytes seems to be a (very) safe value for max instruction size.
2062       bool pc_is_near_addr =
2063         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2064       bool instr_spans_page_boundary =
2065         (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2066                          (intptr_t) page_size) > 0);
2067 
2068       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2069         static volatile address last_addr =
2070           (address) os::non_memory_address_word();
2071 
2072         // In conservative mode, don't unguard unless the address is in the VM
2073         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2074             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2075 
2076           // Set memory to RWX and retry
2077           address page_start =
2078             (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2079           bool res = os::protect_memory((char*) page_start, page_size,
2080                                         os::MEM_PROT_RWX);
2081 
2082           if (PrintMiscellaneous && Verbose) {
2083             char buf[256];
2084             jio_snprintf(buf, sizeof(buf), "Execution protection violation "
2085                          "at " INTPTR_FORMAT
2086                          ", unguarding " INTPTR_FORMAT ": %s", addr,
2087                          page_start, (res ? "success" : strerror(errno)));
2088             tty->print_raw_cr(buf);
2089           }
2090 
2091           // Set last_addr so if we fault again at the same address, we don't
2092           // end up in an endless loop.
2093           //
2094           // There are two potential complications here.  Two threads trapping
2095           // at the same address at the same time could cause one of the
2096           // threads to think it already unguarded, and abort the VM.  Likely
2097           // very rare.
2098           //
2099           // The other race involves two threads alternately trapping at
2100           // different addresses and failing to unguard the page, resulting in


2254                                "GR7 doesn't contain register_stack_limit");
2255                 // Disable the yellow zone which sets the state that
2256                 // we've got a stack overflow problem.
2257                 if (thread->stack_yellow_zone_enabled()) {
2258                   thread->disable_stack_yellow_zone();
2259                 }
2260                 // Give us some room to process the exception
2261                 thread->disable_register_stack_guard();
2262                 // Update GR7 with the new limit so we can continue running
2263                 // compiled code.
2264                 exceptionInfo->ContextRecord->IntS3 =
2265                                (ULONGLONG)thread->register_stack_limit();
2266                 return Handle_Exception(exceptionInfo,
2267                        SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2268               } else {
2269                 //
2270                 // Check for implicit null
2271                 // We only expect null pointers in the stubs (vtable)
2272                 // the rest are checked explicitly now.
2273                 //



2274                 if (((uintptr_t)addr) < os::vm_page_size() ) {
2275                   // an access to the first page of VM--assume it is a null pointer
2276                   address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2277                   if (stub != NULL) return Handle_Exception(exceptionInfo, stub);


2278                 }
2279               }
2280             } // in_java
2281 
2282             // IA64 doesn't use implicit null checking yet. So we shouldn't
2283             // get here.
2284             tty->print_raw_cr("Access violation, possible null pointer exception");
2285             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2286                          exceptionInfo->ContextRecord);
2287             return EXCEPTION_CONTINUE_SEARCH;
2288 #else /* !IA64 */
2289 
2290             // Windows 98 reports faulting addresses incorrectly
2291             if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
2292                 !os::win32::is_nt()) {
2293               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2294               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2295             }
2296             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2297                          exceptionInfo->ContextRecord);
2298             return EXCEPTION_CONTINUE_SEARCH;
2299 #endif
2300           }
2301         }
2302       }
2303 
2304 #ifdef _WIN64
2305       // Special care for fast JNI field accessors.
2306       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2307       // in and the heap gets shrunk before the field access.
2308       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2309         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2310         if (addr != (address)-1) {
2311           return Handle_Exception(exceptionInfo, addr);
2312         }
2313       }
2314 #endif


2619 
2620 // Reserve memory at an arbitrary address, only if that area is
2621 // available (and not reserved for something else).
2622 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2623   // Windows os::reserve_memory() fails of the requested address range is
2624   // not avilable.
2625   return reserve_memory(bytes, requested_addr);
2626 }
2627 
2628 size_t os::large_page_size() {
2629   return _large_page_size;
2630 }
2631 
2632 bool os::can_commit_large_page_memory() {
2633   // Windows only uses large page memory when the entire region is reserved
2634   // and committed in a single VirtualAlloc() call. This may change in the
2635   // future, but with Windows 2003 it's not possible to commit on demand.
2636   return false;
2637 }
2638 
2639 bool os::can_execute_large_page_memory() {
2640   return true;
2641 }
2642 
2643 char* os::reserve_memory_special(size_t bytes) {
2644 
2645   if (UseLargePagesIndividualAllocation) {
2646     if (TracePageSizes && Verbose) {
2647        tty->print_cr("Reserving large pages individually.");
2648     }
2649     char * p_buf;
2650     // first reserve enough address space in advance since we want to be
2651     // able to break a single contiguous virtual address range into multiple
2652     // large page commits but WS2003 does not allow reserving large page space
2653     // so we just use 4K pages for reserve, this gives us a legal contiguous
2654     // address space. then we will deallocate that reservation, and re alloc
2655     // using large pages
2656     const size_t size_of_reserve = bytes + _large_page_size;
2657     if (bytes > size_of_reserve) {
2658       // Overflowed.
2659       warning("Individually allocated large pages failed, "
2660         "use -XX:-UseLargePagesIndividualAllocation to turn off");
2661       return NULL;
2662     }
2663     p_buf = (char *) VirtualAlloc(NULL,
2664                                  size_of_reserve,  // size of Reserve
2665                                  MEM_RESERVE,
2666                                  PAGE_EXECUTE_READWRITE);
2667     // If reservation failed, return NULL
2668     if (p_buf == NULL) return NULL;
2669 
2670     release_memory(p_buf, bytes + _large_page_size);
2671     // round up to page boundary.  If the size_of_reserve did not
2672     // overflow and the reservation did not fail, this align up
2673     // should not overflow.
2674     p_buf = (char *) align_size_up((size_t)p_buf, _large_page_size);
2675 
2676     // now go through and allocate one page at a time until all bytes are
2677     // allocated
2678     size_t  bytes_remaining = align_size_up(bytes, _large_page_size);
2679     // An overflow of align_size_up() would have been caught above
2680     // in the calculation of size_of_reserve.
2681     char * next_alloc_addr = p_buf;
2682 
2683 #ifdef ASSERT
2684     // Variable for the failure injection
2685     long ran_num = os::random();
2686     size_t fail_after = ran_num % bytes;
2687 #endif
2688 
2689     while (bytes_remaining) {
2690       size_t bytes_to_rq = MIN2(bytes_remaining, _large_page_size);
2691       // Note allocate and commit
2692       char * p_new;
2693 
2694 #ifdef ASSERT
2695       bool inject_error = LargePagesIndividualAllocationInjectError &&
2696           (bytes_remaining <= fail_after);
2697 #else
2698       const bool inject_error = false;
2699 #endif
2700 
2701       if (inject_error) {
2702         p_new = NULL;
2703       } else {
2704         p_new = (char *) VirtualAlloc(next_alloc_addr,
2705                                     bytes_to_rq,
2706                                     MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES,
2707                                     PAGE_EXECUTE_READWRITE);
2708       }
2709 
2710       if (p_new == NULL) {
2711         // Free any allocated pages
2712         if (next_alloc_addr > p_buf) {
2713           // Some memory was committed so release it.
2714           size_t bytes_to_release = bytes - bytes_remaining;
2715           release_memory(p_buf, bytes_to_release);
2716         }
2717 #ifdef ASSERT
2718         if (UseLargePagesIndividualAllocation &&
2719             LargePagesIndividualAllocationInjectError) {
2720           if (TracePageSizes && Verbose) {
2721              tty->print_cr("Reserving large pages individually failed.");
2722           }
2723         }
2724 #endif
2725         return NULL;
2726       }
2727       bytes_remaining -= bytes_to_rq;
2728       next_alloc_addr += bytes_to_rq;
2729     }
2730 
2731     return p_buf;
2732 
2733   } else {
2734     // normal policy just allocate it all at once
2735     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
2736     char * res = (char *)VirtualAlloc(NULL,
2737                                       bytes,
2738                                       flag,
2739                                       PAGE_EXECUTE_READWRITE);
2740     return res;
2741   }
2742 }
2743 
2744 bool os::release_memory_special(char* base, size_t bytes) {
2745   return release_memory(base, bytes);
2746 }
2747 
2748 void os::print_statistics() {
2749 }
2750 
2751 bool os::commit_memory(char* addr, size_t bytes) {
2752   if (bytes == 0) {
2753     // Don't bother the OS with noops.
2754     return true;
2755   }
2756   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
2757   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
2758   // Don't attempt to print anything if the OS call fails. We're
2759   // probably low on resources, so the print itself may cause crashes.
2760   return VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_EXECUTE_READWRITE) != NULL;
2761 }
2762 
2763 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint) {
2764   return commit_memory(addr, size);
2765 }
2766 
2767 bool os::uncommit_memory(char* addr, size_t bytes) {
2768   if (bytes == 0) {
2769     // Don't bother the OS with noops.
2770     return true;
2771   }
2772   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
2773   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
2774   return VirtualFree(addr, bytes, MEM_DECOMMIT) != 0;
2775 }
2776 
2777 bool os::release_memory(char* addr, size_t bytes) {
2778   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
2779 }
2780 
2781 // Set protections specified
2782 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2783                         bool is_committed) {
2784   unsigned int p = 0;
2785   switch (prot) {
2786   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
2787   case MEM_PROT_READ: p = PAGE_READONLY; break;
2788   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
2789   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
2790   default:
2791     ShouldNotReachHere();
2792   }
2793 
2794   DWORD old_status;
2795 
2796   // Strange enough, but on Win32 one can change protection only for committed
2797   // memory, not a big deal anyway, as bytes less or equal than 64K
2798   if (!is_committed && !commit_memory(addr, bytes)) {
2799     fatal("cannot commit protection page");
2800   }
2801   // One cannot use os::guard_memory() here, as on Win32 guard page
2802   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
2803   //
2804   // Pages in the region become guard pages. Any attempt to access a guard page
2805   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
2806   // the guard page status. Guard pages thus act as a one-time access alarm.
2807   return VirtualProtect(addr, bytes, p, &old_status) != 0;
2808 }
2809 
2810 bool os::guard_memory(char* addr, size_t bytes) {
2811   DWORD old_status;
2812   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
2813 }
2814 
2815 bool os::unguard_memory(char* addr, size_t bytes) {
2816   DWORD old_status;
2817   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
2818 }
2819 
2820 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
2821 void os::free_memory(char *addr, size_t bytes)         { }
2822 void os::numa_make_global(char *addr, size_t bytes)    { }
2823 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
2824 bool os::numa_topology_changed()                       { return false; }
2825 size_t os::numa_get_groups_num()                       { return 1; }
2826 int os::numa_get_group_id()                            { return 0; }
2827 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2828   if (size > 0) {
2829     ids[0] = 0;
2830     return 1;
2831   }
2832   return 0;
2833 }
2834 
2835 bool os::get_page_info(char *start, page_info* info) {
2836   return false;
2837 }
2838 
2839 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2840   return end;
2841 }
2842 
2843 char* os::non_memory_address_word() {


3109 
3110 static int _initial_pid = 0;
3111 
3112 int os::current_process_id()
3113 {
3114   return (_initial_pid ? _initial_pid : _getpid());
3115 }
3116 
3117 int    os::win32::_vm_page_size       = 0;
3118 int    os::win32::_vm_allocation_granularity = 0;
3119 int    os::win32::_processor_type     = 0;
3120 // Processor level is not available on non-NT systems, use vm_version instead
3121 int    os::win32::_processor_level    = 0;
3122 julong os::win32::_physical_memory    = 0;
3123 size_t os::win32::_default_stack_size = 0;
3124 
3125          intx os::win32::_os_thread_limit    = 0;
3126 volatile intx os::win32::_os_thread_count    = 0;
3127 
3128 bool   os::win32::_is_nt              = false;
3129 bool   os::win32::_is_windows_2003    = false;
3130 
3131 
3132 void os::win32::initialize_system_info() {
3133   SYSTEM_INFO si;
3134   GetSystemInfo(&si);
3135   _vm_page_size    = si.dwPageSize;
3136   _vm_allocation_granularity = si.dwAllocationGranularity;
3137   _processor_type  = si.dwProcessorType;
3138   _processor_level = si.wProcessorLevel;
3139   _processor_count = si.dwNumberOfProcessors;
3140 
3141   MEMORYSTATUS ms;
3142   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3143   // dwMemoryLoad (% of memory in use)
3144   GlobalMemoryStatus(&ms);
3145   _physical_memory = ms.dwTotalPhys;
3146 
3147   OSVERSIONINFO oi;
3148   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
3149   GetVersionEx(&oi);
3150   switch(oi.dwPlatformId) {
3151     case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
3152     case VER_PLATFORM_WIN32_NT:
3153       _is_nt = true;
3154       {
3155         int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3156         if (os_vers == 5002) {
3157           _is_windows_2003 = true;
3158         }
3159       }
3160       break;
3161     default: fatal("Unknown platform");
3162   }
3163 
3164   _default_stack_size = os::current_stack_size();
3165   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3166   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3167     "stack size not a multiple of page size");
3168 
3169   initialize_performance_counter();
3170 
3171   // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is
3172   // known to deadlock the system, if the VM issues to thread operations with
3173   // a too high frequency, e.g., such as changing the priorities.
3174   // The 6000 seems to work well - no deadlocks has been notices on the test
3175   // programs that we have seen experience this problem.
3176   if (!os::win32::is_nt()) {
3177     StarvationMonitorInterval = 6000;
3178   }
3179 }
3180 


3238 }
3239 #endif // _WIN64
3240 #endif // PRODUCT
3241 
3242 // this is called _before_ the global arguments have been parsed
3243 void os::init(void) {
3244   _initial_pid = _getpid();
3245 
3246   init_random(1234567);
3247 
3248   win32::initialize_system_info();
3249   win32::setmode_streams();
3250   init_page_sizes((size_t) win32::vm_page_size());
3251 
3252   // For better scalability on MP systems (must be called after initialize_system_info)
3253 #ifndef PRODUCT
3254   if (is_MP()) {
3255     NoYieldsInMicrolock = true;
3256   }
3257 #endif
3258   // This may be overridden later when argument processing is done.
3259   FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
3260     os::win32::is_windows_2003());
3261 
3262   // Initialize main_process and main_thread
3263   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
3264  if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3265                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3266     fatal("DuplicateHandle failed\n");
3267   }
3268   main_thread_id = (int) GetCurrentThreadId();
3269 }
3270 
3271 // To install functions for atexit processing
3272 extern "C" {
3273   static void perfMemory_exit_helper() {
3274     perfMemory_exit();
3275   }
3276 }
3277 
3278 
3279 // this is called _after_ the global arguments have been parsed
3280 jint os::init_2(void) {
3281   // Allocate a single page and mark it as readable for safepoint polling


3351   size_t actual_reserve_size = stack_commit_size;
3352   if (stack_commit_size < default_reserve_size) {
3353     // If stack_commit_size == 0, we want this too
3354     actual_reserve_size = default_reserve_size;
3355   }
3356 
3357   JavaThread::set_stack_size_at_create(stack_commit_size);
3358 
3359   // Calculate theoretical max. size of Threads to guard gainst artifical
3360   // out-of-memory situations, where all available address-space has been
3361   // reserved by thread stacks.
3362   assert(actual_reserve_size != 0, "Must have a stack");
3363 
3364   // Calculate the thread limit when we should start doing Virtual Memory
3365   // banging. Currently when the threads will have used all but 200Mb of space.
3366   //
3367   // TODO: consider performing a similar calculation for commit size instead
3368   // as reserve size, since on a 64-bit platform we'll run into that more
3369   // often than running out of virtual memory space.  We can use the
3370   // lower value of the two calculations as the os_thread_limit.
3371   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
3372   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
3373 
3374   // at exit methods are called in the reverse order of their registration.
3375   // there is no limit to the number of functions registered. atexit does
3376   // not set errno.
3377 
3378   if (PerfAllowAtExitRegistration) {
3379     // only register atexit functions if PerfAllowAtExitRegistration is set.
3380     // atexit functions can be delayed until process exit time, which
3381     // can be problematic for embedded VM situations. Embedded VMs should
3382     // call DestroyJavaVM() to assure that VM resources are released.
3383 
3384     // note: perfMemory_exit_helper atexit function may be removed in
3385     // the future if the appropriate cleanup code can be added to the
3386     // VM_Exit VMOperation's doit method.
3387     if (atexit(perfMemory_exit_helper) != 0) {
3388       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3389     }
3390   }
3391 
3392   // initialize PSAPI or ToolHelp for fatal error handler
3393   if (win32::is_nt()) _init_psapi();
3394   else _init_toolhelp();
3395 
3396 #ifndef _WIN64
3397   // Print something if NX is enabled (win32 on AMD64)
3398   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
3399 #endif
3400 
3401   // initialize thread priority policy
3402   prio_init();
3403 
3404   if (UseNUMA && !ForceNUMA) {
3405     UseNUMA = false; // Currently unsupported.
3406   }
3407 
3408   return JNI_OK;
3409 }
3410 
3411 
3412 // Mark the polling page as unreadable
3413 void os::make_polling_page_unreadable(void) {
3414   DWORD old_status;
3415   if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) )
3416     fatal("Could not disable polling page");
3417 };
3418 
3419 // Mark the polling page as readable
3420 void os::make_polling_page_readable(void) {
3421   DWORD old_status;
3422   if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) )
3423     fatal("Could not enable polling page");
3424 };
3425 
3426 
3427 int os::stat(const char *path, struct stat *sbuf) {