src/os/windows/vm/os_windows.cpp

Print this page
rev 2677 : cleanup numa_get_leaf_groups in os_windows.cpp


1947       assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
1948 
1949       // were we externally suspended while we were waiting?
1950       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1951       if (threadIsSuspended) {
1952         //
1953         // The semaphore has been incremented, but while we were waiting
1954         // another thread suspended us. We don't want to continue running
1955         // while suspended because that would surprise the thread that
1956         // suspended us.
1957         //
1958         ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
1959         assert(ret != 0, "ReleaseSemaphore() failed");
1960 
1961         thread->java_suspend_self();
1962       }
1963     } while (threadIsSuspended);
1964   }
1965 }
1966 

1967 int os::signal_lookup() {
1968   return check_pending_signals(false);
1969 }
1970 
1971 int os::signal_wait() {
1972   return check_pending_signals(true);
1973 }
1974 
1975 // Implicit OS exception handling
1976 
1977 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) {
1978   JavaThread* thread = JavaThread::current();
1979   // Save pc in thread
1980 #ifdef _M_IA64
1981   thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->StIIP);
1982   // Set pc to handler
1983   exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
1984 #elif _M_AMD64
1985   thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Rip);
1986   // Set pc to handler


2597 //   + double click "Lock pages in memory", add users and/or groups
2598 //   + reboot
2599 // Note the above steps are needed for administrator as well, as administrators
2600 // by default do not have the privilege to lock pages in memory.
2601 //
2602 // Note about Windows 2003: although the API supports committing large page
2603 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2604 // scenario, I found through experiment it only uses large page if the entire
2605 // memory region is reserved and committed in a single VirtualAlloc() call.
2606 // This makes Windows large page support more or less like Solaris ISM, in
2607 // that the entire heap must be committed upfront. This probably will change
2608 // in the future, if so the code below needs to be revisited.
2609 
2610 #ifndef MEM_LARGE_PAGES
2611 #define MEM_LARGE_PAGES 0x20000000
2612 #endif
2613 
2614 static HANDLE    _hProcess;
2615 static HANDLE    _hToken;
2616 



















































2617 static size_t _large_page_size = 0;
2618 
2619 static bool resolve_functions_for_large_page_init() {
2620   return os::Kernel32Dll::GetLargePageMinimumAvailable() &&
2621     os::Advapi32Dll::AdvapiAvailable();
2622 }
2623 
2624 static bool request_lock_memory_privilege() {
2625   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2626                                 os::current_process_id());
2627 
2628   LUID luid;
2629   if (_hProcess != NULL &&
2630       os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2631       os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2632 
2633     TOKEN_PRIVILEGES tp;
2634     tp.PrivilegeCount = 1;
2635     tp.Privileges[0].Luid = luid;
2636     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2637 
2638     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2639     // privilege. Check GetLastError() too. See MSDN document.
2640     if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2641         (GetLastError() == ERROR_SUCCESS)) {
2642       return true;
2643     }
2644   }
2645 
2646   return false;
2647 }
2648 
2649 static void cleanup_after_large_page_init() {
2650   if (_hProcess) CloseHandle(_hProcess);
2651   _hProcess = NULL;
2652   if (_hToken) CloseHandle(_hToken);
2653   _hToken = NULL;
2654 }
2655 




















































































































































2656 void os::large_page_init() {
2657   if (!UseLargePages) return;
2658 
2659   // print a warning if any large page related flag is specified on command line
2660   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2661                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2662   bool success = false;
2663 
2664 # define WARN(msg) if (warn_on_failure) { warning(msg); }
2665   if (resolve_functions_for_large_page_init()) {
2666     if (request_lock_memory_privilege()) {
2667       size_t s = os::Kernel32Dll::GetLargePageMinimum();
2668       if (s) {
2669 #if defined(IA32) || defined(AMD64)
2670         if (s > 4*M || LargePageSizeInBytes > 4*M) {
2671           WARN("JVM cannot use large pages bigger than 4mb.");
2672         } else {
2673 #endif
2674           if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
2675             _large_page_size = LargePageSizeInBytes;


2705 // On win32, one cannot release just a part of reserved memory, it's an
2706 // all or nothing deal.  When we split a reservation, we must break the
2707 // reservation into two reservations.
2708 void os::split_reserved_memory(char *base, size_t size, size_t split,
2709                               bool realloc) {
2710   if (size > 0) {
2711     release_memory(base, size);
2712     if (realloc) {
2713       reserve_memory(split, base);
2714     }
2715     if (size != split) {
2716       reserve_memory(size - split, base + split);
2717     }
2718   }
2719 }
2720 
2721 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
2722   assert((size_t)addr % os::vm_allocation_granularity() == 0,
2723          "reserve alignment");
2724   assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
2725   char* res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);

















2726   assert(res == NULL || addr == NULL || addr == res,
2727          "Unexpected address from reserve.");

2728   return res;
2729 }
2730 
2731 // Reserve memory at an arbitrary address, only if that area is
2732 // available (and not reserved for something else).
2733 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2734   // Windows os::reserve_memory() fails of the requested address range is
2735   // not avilable.
2736   return reserve_memory(bytes, requested_addr);
2737 }
2738 
2739 size_t os::large_page_size() {
2740   return _large_page_size;
2741 }
2742 
2743 bool os::can_commit_large_page_memory() {
2744   // Windows only uses large page memory when the entire region is reserved
2745   // and committed in a single VirtualAlloc() call. This may change in the
2746   // future, but with Windows 2003 it's not possible to commit on demand.
2747   return false;
2748 }
2749 
2750 bool os::can_execute_large_page_memory() {
2751   return true;
2752 }
2753 
2754 char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
2755 
2756   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;

2757 
2758   if (UseLargePagesIndividualAllocation) {



2759     if (TracePageSizes && Verbose) {
2760        tty->print_cr("Reserving large pages individually.");
2761     }
2762     char * p_buf;
2763     // first reserve enough address space in advance since we want to be
2764     // able to break a single contiguous virtual address range into multiple
2765     // large page commits but WS2003 does not allow reserving large page space
2766     // so we just use 4K pages for reserve, this gives us a legal contiguous
2767     // address space. then we will deallocate that reservation, and re alloc
2768     // using large pages
2769     const size_t size_of_reserve = bytes + _large_page_size;
2770     if (bytes > size_of_reserve) {
2771       // Overflowed.
2772       warning("Individually allocated large pages failed, "
2773         "use -XX:-UseLargePagesIndividualAllocation to turn off");
2774       return NULL;
2775     }
2776     p_buf = (char *) VirtualAlloc(addr,
2777                                  size_of_reserve,  // size of Reserve
2778                                  MEM_RESERVE,
2779                                  PAGE_READWRITE);
2780     // If reservation failed, return NULL
2781     if (p_buf == NULL) return NULL;
2782 
2783     release_memory(p_buf, bytes + _large_page_size);
2784     // round up to page boundary.  If the size_of_reserve did not
2785     // overflow and the reservation did not fail, this align up
2786     // should not overflow.
2787     p_buf = (char *) align_size_up((size_t)p_buf, _large_page_size);
2788 
2789     // now go through and allocate one page at a time until all bytes are
2790     // allocated
2791     size_t  bytes_remaining = align_size_up(bytes, _large_page_size);
2792     // An overflow of align_size_up() would have been caught above
2793     // in the calculation of size_of_reserve.
2794     char * next_alloc_addr = p_buf;
2795 
2796 #ifdef ASSERT
2797     // Variable for the failure injection
2798     long ran_num = os::random();
2799     size_t fail_after = ran_num % bytes;
2800 #endif
2801 
2802     while (bytes_remaining) {
2803       size_t bytes_to_rq = MIN2(bytes_remaining, _large_page_size);
2804       // Note allocate and commit
2805       char * p_new;
2806 
2807 #ifdef ASSERT
2808       bool inject_error = LargePagesIndividualAllocationInjectError &&
2809           (bytes_remaining <= fail_after);
2810 #else
2811       const bool inject_error = false;
2812 #endif
2813 
2814       if (inject_error) {
2815         p_new = NULL;
2816       } else {
2817         p_new = (char *) VirtualAlloc(next_alloc_addr,
2818                                     bytes_to_rq,
2819                                     MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES,
2820                                     prot);
2821       }
2822 
2823       if (p_new == NULL) {
2824         // Free any allocated pages
2825         if (next_alloc_addr > p_buf) {
2826           // Some memory was committed so release it.
2827           size_t bytes_to_release = bytes - bytes_remaining;
2828           release_memory(p_buf, bytes_to_release);
2829         }
2830 #ifdef ASSERT
2831         if (UseLargePagesIndividualAllocation &&
2832             LargePagesIndividualAllocationInjectError) {
2833           if (TracePageSizes && Verbose) {
2834              tty->print_cr("Reserving large pages individually failed.");
2835           }
2836         }
2837 #endif
2838         return NULL;
2839       }
2840       bytes_remaining -= bytes_to_rq;
2841       next_alloc_addr += bytes_to_rq;
2842     }
2843 
2844     return p_buf;
2845 
2846   } else {
2847     // normal policy just allocate it all at once
2848     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
2849     char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
2850     return res;
2851   }
2852 }
2853 
2854 bool os::release_memory_special(char* base, size_t bytes) {
2855   return release_memory(base, bytes);
2856 }
2857 
2858 void os::print_statistics() {
2859 }
2860 
2861 bool os::commit_memory(char* addr, size_t bytes, bool exec) {
2862   if (bytes == 0) {
2863     // Don't bother the OS with noops.
2864     return true;
2865   }
2866   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
2867   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
2868   // Don't attempt to print anything if the OS call fails. We're
2869   // probably low on resources, so the print itself may cause crashes.
2870   bool result = VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) != 0;
2871   if (result != NULL && exec) {





2872     DWORD oldprot;
2873     // Windows doc says to use VirtualProtect to get execute permissions
2874     return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot) != 0;


2875   } else {
2876     return result;

















2877   }


2878 }
2879 
2880 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
2881                        bool exec) {
2882   return commit_memory(addr, size, exec);
2883 }
2884 
2885 bool os::uncommit_memory(char* addr, size_t bytes) {
2886   if (bytes == 0) {
2887     // Don't bother the OS with noops.
2888     return true;
2889   }
2890   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
2891   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
2892   return VirtualFree(addr, bytes, MEM_DECOMMIT) != 0;
2893 }
2894 
2895 bool os::release_memory(char* addr, size_t bytes) {
2896   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
2897 }


2931   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
2932   // the guard page status. Guard pages thus act as a one-time access alarm.
2933   return VirtualProtect(addr, bytes, p, &old_status) != 0;
2934 }
2935 
2936 bool os::guard_memory(char* addr, size_t bytes) {
2937   DWORD old_status;
2938   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
2939 }
2940 
2941 bool os::unguard_memory(char* addr, size_t bytes) {
2942   DWORD old_status;
2943   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
2944 }
2945 
2946 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
2947 void os::free_memory(char *addr, size_t bytes)         { }
2948 void os::numa_make_global(char *addr, size_t bytes)    { }
2949 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
2950 bool os::numa_topology_changed()                       { return false; }
2951 size_t os::numa_get_groups_num()                       { return 1; }
2952 int os::numa_get_group_id()                            { return 0; }
2953 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2954   if (size > 0) {
2955     ids[0] = 0;
2956     return 1;

2957   }
2958   return 0;
2959 }
2960 
2961 bool os::get_page_info(char *start, page_info* info) {
2962   return false;
2963 }
2964 
2965 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2966   return end;
2967 }
2968 
2969 char* os::non_memory_address_word() {
2970   // Must never look like an address returned by reserve_memory,
2971   // even in its subfields (as defined by the CPU immediate fields,
2972   // if the CPU splits constants across multiple instructions).
2973   return (char*)-1;
2974 }
2975 
2976 #define MAX_ERROR_COUNT 100
2977 #define SYS_THREAD_ERROR 0xffffffffUL
2978 


3463   os::set_polling_page( polling_page );
3464 
3465 #ifndef PRODUCT
3466   if( Verbose && PrintMiscellaneous )
3467     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
3468 #endif
3469 
3470   if (!UseMembar) {
3471     address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
3472     guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
3473 
3474     return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
3475     guarantee( return_page != NULL, "Commit Failed for memory serialize page");
3476 
3477     os::set_memory_serialize_page( mem_serialize_page );
3478 
3479 #ifndef PRODUCT
3480     if(Verbose && PrintMiscellaneous)
3481       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3482 #endif
3483 }
3484 
3485   os::large_page_init();
3486 
3487   // Setup Windows Exceptions
3488 
3489   // On Itanium systems, Structured Exception Handling does not
3490   // work since stack frames must be walkable by the OS.  Since
3491   // much of our code is dynamically generated, and we do not have
3492   // proper unwind .xdata sections, the system simply exits
3493   // rather than delivering the exception.  To work around
3494   // this we use VectorExceptions instead.
3495 #ifdef _WIN64
3496   if (UseVectoredExceptions) {
3497     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelExceptionFilter);
3498   }
3499 #endif
3500 
3501   // for debugging float code generation bugs
3502   if (ForceFloatExceptions) {
3503 #ifndef  _WIN64


3566     // atexit functions can be delayed until process exit time, which
3567     // can be problematic for embedded VM situations. Embedded VMs should
3568     // call DestroyJavaVM() to assure that VM resources are released.
3569 
3570     // note: perfMemory_exit_helper atexit function may be removed in
3571     // the future if the appropriate cleanup code can be added to the
3572     // VM_Exit VMOperation's doit method.
3573     if (atexit(perfMemory_exit_helper) != 0) {
3574       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3575     }
3576   }
3577 
3578 #ifndef _WIN64
3579   // Print something if NX is enabled (win32 on AMD64)
3580   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
3581 #endif
3582 
3583   // initialize thread priority policy
3584   prio_init();
3585 
3586   if (UseNUMA && !ForceNUMA) {
3587     UseNUMA = false; // Currently unsupported.


3588   }
3589 
3590   return JNI_OK;
3591 }
3592 
3593 void os::init_3(void) {
3594   return;
3595 }
3596 
3597 // Mark the polling page as unreadable
3598 void os::make_polling_page_unreadable(void) {
3599   DWORD old_status;
3600   if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) )
3601     fatal("Could not disable polling page");
3602 };
3603 
3604 // Mark the polling page as readable
3605 void os::make_polling_page_readable(void) {
3606   DWORD old_status;
3607   if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) )


4741 int os::get_sock_name(int fd, struct sockaddr *him, int *len) {
4742   ShouldNotReachHere();
4743   return 0;
4744 }
4745 
4746 int os::get_sock_opt(int fd, int level, int optname,
4747                              char *optval, int* optlen) {
4748   ShouldNotReachHere();
4749   return 0;
4750 }
4751 
4752 int os::set_sock_opt(int fd, int level, int optname,
4753                              const char *optval, int optlen) {
4754   ShouldNotReachHere();
4755   return 0;
4756 }
4757 
4758 
4759 // Kernel32 API
4760 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);




4761 GetLargePageMinimum_Fn      os::Kernel32Dll::_GetLargePageMinimum = NULL;



4762 BOOL                        os::Kernel32Dll::initialized = FALSE;
4763 SIZE_T os::Kernel32Dll::GetLargePageMinimum() {
4764   assert(initialized && _GetLargePageMinimum != NULL,
4765     "GetLargePageMinimumAvailable() not yet called");
4766   return _GetLargePageMinimum();
4767 }
4768 
4769 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() {
4770   if (!initialized) {
4771     initialize();
4772   }
4773   return _GetLargePageMinimum != NULL;
4774 }
4775 






4776 
4777 #ifndef JDK6_OR_EARLIER


4778 
4779 void os::Kernel32Dll::initialize() {


















4780   if (!initialized) {
4781     HMODULE handle = ::GetModuleHandle("Kernel32.dll");
4782     assert(handle != NULL, "Just check");
4783     _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");



4784     initialized = TRUE;
4785   }
4786 }
4787 
4788 








4789 // Kernel32 API
4790 inline BOOL os::Kernel32Dll::SwitchToThread() {
4791   return ::SwitchToThread();
4792 }
4793 
4794 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
4795   return true;
4796 }
4797 
4798   // Help tools
4799 inline BOOL os::Kernel32Dll::HelpToolsAvailable() {
4800   return true;
4801 }
4802 
4803 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
4804   return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
4805 }
4806 
4807 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
4808   return ::Module32First(hSnapshot, lpme);


4870 }
4871 
4872 inline BOOL os::Advapi32Dll::AdvapiAvailable() {
4873   return true;
4874 }
4875 
4876 #else
4877 // Kernel32 API
4878 typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
4879 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD);
4880 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32);
4881 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32);
4882 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO);
4883 
4884 SwitchToThread_Fn           os::Kernel32Dll::_SwitchToThread = NULL;
4885 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL;
4886 Module32First_Fn            os::Kernel32Dll::_Module32First = NULL;
4887 Module32Next_Fn             os::Kernel32Dll::_Module32Next = NULL;
4888 GetNativeSystemInfo_Fn      os::Kernel32Dll::_GetNativeSystemInfo = NULL;
4889 

4890 void os::Kernel32Dll::initialize() {
4891   if (!initialized) {
4892     HMODULE handle = ::GetModuleHandle("Kernel32.dll");
4893     assert(handle != NULL, "Just check");
4894 
4895     _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread");
4896     _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");
4897     _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn)
4898       ::GetProcAddress(handle, "CreateToolhelp32Snapshot");
4899     _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First");
4900     _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next");
4901     _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo");

4902 
4903     initialized = TRUE;
4904   }
4905 }
4906 
4907 BOOL os::Kernel32Dll::SwitchToThread() {
4908   assert(initialized && _SwitchToThread != NULL,
4909     "SwitchToThreadAvailable() not yet called");
4910   return _SwitchToThread();
4911 }
4912 
4913 
4914 BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
4915   if (!initialized) {
4916     initialize();
4917   }
4918   return _SwitchToThread != NULL;
4919 }
4920 
4921 // Help tools


4947     "HelpToolsAvailable() not yet called");
4948 
4949   return _Module32Next(hSnapshot, lpme);
4950 }
4951 
4952 
4953 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
4954   if (!initialized) {
4955     initialize();
4956   }
4957   return _GetNativeSystemInfo != NULL;
4958 }
4959 
4960 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
4961   assert(initialized && _GetNativeSystemInfo != NULL,
4962     "GetNativeSystemInfoAvailable() not yet called");
4963 
4964   _GetNativeSystemInfo(lpSystemInfo);
4965 }
4966 


4967 // PSAPI API
4968 
4969 
4970 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD);
4971 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);;
4972 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
4973 
4974 EnumProcessModules_Fn   os::PSApiDll::_EnumProcessModules = NULL;
4975 GetModuleFileNameEx_Fn  os::PSApiDll::_GetModuleFileNameEx = NULL;
4976 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL;
4977 BOOL                    os::PSApiDll::initialized = FALSE;
4978 
4979 void os::PSApiDll::initialize() {
4980   if (!initialized) {
4981     HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0);
4982     if (handle != NULL) {
4983       _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle,
4984         "EnumProcessModules");
4985       _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle,
4986         "GetModuleFileNameExA");




1947       assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
1948 
1949       // were we externally suspended while we were waiting?
1950       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1951       if (threadIsSuspended) {
1952         //
1953         // The semaphore has been incremented, but while we were waiting
1954         // another thread suspended us. We don't want to continue running
1955         // while suspended because that would surprise the thread that
1956         // suspended us.
1957         //
1958         ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
1959         assert(ret != 0, "ReleaseSemaphore() failed");
1960 
1961         thread->java_suspend_self();
1962       }
1963     } while (threadIsSuspended);
1964   }
1965 }
1966 
1967 
1968 int os::signal_lookup() {
1969   return check_pending_signals(false);
1970 }
1971 
1972 int os::signal_wait() {
1973   return check_pending_signals(true);
1974 }
1975 
1976 // Implicit OS exception handling
1977 
1978 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) {
1979   JavaThread* thread = JavaThread::current();
1980   // Save pc in thread
1981 #ifdef _M_IA64
1982   thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->StIIP);
1983   // Set pc to handler
1984   exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
1985 #elif _M_AMD64
1986   thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Rip);
1987   // Set pc to handler


2598 //   + double click "Lock pages in memory", add users and/or groups
2599 //   + reboot
2600 // Note the above steps are needed for administrator as well, as administrators
2601 // by default do not have the privilege to lock pages in memory.
2602 //
2603 // Note about Windows 2003: although the API supports committing large page
2604 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2605 // scenario, I found through experiment it only uses large page if the entire
2606 // memory region is reserved and committed in a single VirtualAlloc() call.
2607 // This makes Windows large page support more or less like Solaris ISM, in
2608 // that the entire heap must be committed upfront. This probably will change
2609 // in the future, if so the code below needs to be revisited.
2610 
2611 #ifndef MEM_LARGE_PAGES
2612 #define MEM_LARGE_PAGES 0x20000000
2613 #endif
2614 
2615 static HANDLE    _hProcess;
2616 static HANDLE    _hToken;
2617 
2618 // Container for NUMA node list info
2619 class NUMANodeListHolder {
2620 private:
2621   int *_numa_used_node_list;  // allocated below
2622   int _numa_used_node_count;
2623 
2624   void free_node_list() {
2625     if (_numa_used_node_list != NULL) {
2626       FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2627     }
2628   }
2629 
2630 public:
2631   NUMANodeListHolder() {
2632     _numa_used_node_count = 0;
2633     _numa_used_node_list = NULL;
2634     // do rest of initialization in build routine (after function pointers are set up)
2635   }
2636 
2637   ~NUMANodeListHolder() {
2638     free_node_list();
2639   }
2640 
2641   bool build() {
2642     DWORD_PTR proc_aff_mask;
2643     DWORD_PTR sys_aff_mask;
2644     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2645     ULONG highest_node_number;
2646     if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false;
2647     free_node_list();
2648     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number);
2649     for (unsigned int i = 0; i <= highest_node_number; i++) {
2650       ULONGLONG proc_mask_numa_node;
2651       if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2652       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2653         _numa_used_node_list[_numa_used_node_count++] = i;
2654       }
2655     }
2656     return (_numa_used_node_count > 1);
2657   }
2658 
2659   int get_count() {return _numa_used_node_count;}
2660   int get_node_list_entry(int n) {
2661     // for indexes out of range, returns -1
2662     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2663   }
2664 
2665 } numa_node_list_holder;
2666 
2667 
2668 
2669 static size_t _large_page_size = 0;
2670 
2671 static bool resolve_functions_for_large_page_init() {
2672   return os::Kernel32Dll::GetLargePageMinimumAvailable() &&
2673     os::Advapi32Dll::AdvapiAvailable();
2674 }
2675 
2676 static bool request_lock_memory_privilege() {
2677   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2678                                 os::current_process_id());
2679 
2680   LUID luid;
2681   if (_hProcess != NULL &&
2682       os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2683       os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2684 
2685     TOKEN_PRIVILEGES tp;
2686     tp.PrivilegeCount = 1;
2687     tp.Privileges[0].Luid = luid;
2688     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2689 
2690     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2691     // privilege. Check GetLastError() too. See MSDN document.
2692     if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2693         (GetLastError() == ERROR_SUCCESS)) {
2694       return true;
2695     }
2696   }
2697 
2698   return false;
2699 }
2700 
2701 static void cleanup_after_large_page_init() {
2702   if (_hProcess) CloseHandle(_hProcess);
2703   _hProcess = NULL;
2704   if (_hToken) CloseHandle(_hToken);
2705   _hToken = NULL;
2706 }
2707 
2708 static bool numa_interleaving_init() {
2709   bool success = false;
2710   bool use_numa_specified = !FLAG_IS_DEFAULT(UseNUMA);
2711   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2712 
2713   // print a warning if UseNUMA or UseNUMAInterleaving flag is specified on command line
2714   bool warn_on_failure =  use_numa_specified || use_numa_interleaving_specified;
2715 # define WARN(msg) if (warn_on_failure) { warning(msg); }
2716 
2717   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2718   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2719   NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2720 
2721   if (os::Kernel32Dll::NumaCallsAvailable()) {
2722     if (numa_node_list_holder.build()) {
2723       if (PrintMiscellaneous && Verbose) {
2724         tty->print("NUMA UsedNodeCount=%d, namely ", os::numa_get_groups_num());
2725         for (int i = 0; i < os::numa_get_groups_num(); i++) {
2726           tty->print("%d ", numa_node_list_holder.get_node_list_entry(i));
2727         }
2728         tty->print("\n");
2729       }
2730       success = true;
2731     } else {
2732       WARN("Process does not cover multiple NUMA nodes.");
2733     }
2734   } else {
2735     WARN("NUMA Interleaving is not supported by the operating system.");
2736   }
2737   if (!success) {
2738     if (use_numa_specified) WARN("...Ignoring UseNUMA flag.");
2739     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2740   }
2741   return success;
2742 #undef WARN
2743 }
2744 
2745 // this routine is used whenever we need to reserve a contiguous VA range
2746 // but we need to make separate VirtualAlloc calls for each piece of the range
2747 // Reasons for doing this:
2748 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2749 //  * UseNUMAInterleaving requires a separate node for each piece
2750 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot,
2751                                          bool should_inject_error=false) {
2752   char * p_buf;
2753   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2754   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2755   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2756 
2757   // first reserve enough address space in advance since we want to be
2758   // able to break a single contiguous virtual address range into multiple
2759   // large page commits but WS2003 does not allow reserving large page space
2760   // so we just use 4K pages for reserve, this gives us a legal contiguous
2761   // address space. then we will deallocate that reservation, and re alloc
2762   // using large pages
2763   const size_t size_of_reserve = bytes + chunk_size;
2764   if (bytes > size_of_reserve) {
2765     // Overflowed.
2766     return NULL;
2767   }
2768   p_buf = (char *) VirtualAlloc(addr,
2769                                 size_of_reserve,  // size of Reserve
2770                                 MEM_RESERVE,
2771                                 PAGE_READWRITE);
2772   // If reservation failed, return NULL
2773   if (p_buf == NULL) return NULL;
2774 
2775   os::release_memory(p_buf, bytes + chunk_size);
2776 
2777   // we still need to round up to a page boundary (in case we are using large pages)
2778   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2779   // instead we handle this in the bytes_to_rq computation below
2780   p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2781 
2782   // now go through and allocate one chunk at a time until all bytes are
2783   // allocated
2784   size_t  bytes_remaining = bytes;   
2785   // An overflow of align_size_up() would have been caught above
2786   // in the calculation of size_of_reserve.
2787   char * next_alloc_addr = p_buf;
2788   HANDLE hProc = GetCurrentProcess();
2789 
2790 #ifdef ASSERT
2791   // Variable for the failure injection
2792   long ran_num = os::random();
2793   size_t fail_after = ran_num % bytes;
2794 #endif
2795 
2796   int count=0;
2797   while (bytes_remaining) {
2798     // select bytes_to_rq to get to the next chunk_size boundary
2799 
2800     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2801     // Note allocate and commit
2802     char * p_new;
2803 
2804 #ifdef ASSERT
2805     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2806 #else
2807     const bool inject_error_now = false;
2808 #endif
2809 
2810     if (inject_error_now) {
2811       p_new = NULL;
2812     } else {
2813       if (!UseNUMAInterleaving) {
2814         p_new = (char *) VirtualAlloc(next_alloc_addr,
2815                                       bytes_to_rq,
2816                                       flags,
2817                                       prot);
2818       } else {
2819         // get the next node to use from the used_node_list
2820         DWORD node = numa_node_list_holder.get_node_list_entry(count % os::numa_get_groups_num());
2821         p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc,
2822                                                             next_alloc_addr,
2823                                                             bytes_to_rq,
2824                                                             flags,
2825                                                             prot,
2826                                                             node);
2827       }
2828     }
2829 
2830     if (p_new == NULL) {
2831       // Free any allocated pages
2832       if (next_alloc_addr > p_buf) {
2833         // Some memory was committed so release it.
2834         size_t bytes_to_release = bytes - bytes_remaining;
2835         os::release_memory(p_buf, bytes_to_release);
2836       }
2837 #ifdef ASSERT
2838       if (should_inject_error) {
2839         if (TracePageSizes && Verbose) {
2840           tty->print_cr("Reserving pages individually failed.");
2841         }
2842       }
2843 #endif
2844       return NULL;
2845     }
2846     bytes_remaining -= bytes_to_rq;
2847     next_alloc_addr += bytes_to_rq;
2848     count++;
2849   }
2850   // made it this far, success
2851   return p_buf;
2852 }
2853 
2854 
2855 
2856 void os::large_page_init() {
2857   if (!UseLargePages) return;
2858 
2859   // print a warning if any large page related flag is specified on command line
2860   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2861                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2862   bool success = false;
2863 
2864 # define WARN(msg) if (warn_on_failure) { warning(msg); }
2865   if (resolve_functions_for_large_page_init()) {
2866     if (request_lock_memory_privilege()) {
2867       size_t s = os::Kernel32Dll::GetLargePageMinimum();
2868       if (s) {
2869 #if defined(IA32) || defined(AMD64)
2870         if (s > 4*M || LargePageSizeInBytes > 4*M) {
2871           WARN("JVM cannot use large pages bigger than 4mb.");
2872         } else {
2873 #endif
2874           if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
2875             _large_page_size = LargePageSizeInBytes;


2905 // On win32, one cannot release just a part of reserved memory, it's an
2906 // all or nothing deal.  When we split a reservation, we must break the
2907 // reservation into two reservations.
2908 void os::split_reserved_memory(char *base, size_t size, size_t split,
2909                               bool realloc) {
2910   if (size > 0) {
2911     release_memory(base, size);
2912     if (realloc) {
2913       reserve_memory(split, base);
2914     }
2915     if (size != split) {
2916       reserve_memory(size - split, base + split);
2917     }
2918   }
2919 }
2920 
2921 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
2922   assert((size_t)addr % os::vm_allocation_granularity() == 0,
2923          "reserve alignment");
2924   assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
2925   char* res;
2926   // note that if UseLargePages is on, all the areas that require interleaving 
2927   // will go thru reserve_memory_special rather than thru here.
2928   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
2929   if (!use_individual) {
2930     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
2931   } else {
2932     elapsedTimer reserveTimer;
2933     if( Verbose && PrintMiscellaneous ) reserveTimer.start();
2934     // in numa interleaving, we have to allocate pages individually
2935     // (well really chunks of allocation granularity size)
2936     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
2937     if( Verbose && PrintMiscellaneous ) {
2938       reserveTimer.stop();
2939       tty->print_cr("reserve_memory of %Ix bytes took %ld ms (%ld ticks)", bytes,
2940                     reserveTimer.milliseconds(), reserveTimer.ticks());
2941     }
2942   }
2943   assert(res == NULL || addr == NULL || addr == res,
2944          "Unexpected address from reserve.");
2945   
2946   return res;
2947 }
2948 
2949 // Reserve memory at an arbitrary address, only if that area is
2950 // available (and not reserved for something else).
2951 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2952   // Windows os::reserve_memory() fails of the requested address range is
2953   // not avilable.
2954   return reserve_memory(bytes, requested_addr);
2955 }
2956 
2957 size_t os::large_page_size() {
2958   return _large_page_size;
2959 }
2960 
2961 bool os::can_commit_large_page_memory() {
2962   // Windows only uses large page memory when the entire region is reserved
2963   // and committed in a single VirtualAlloc() call. This may change in the
2964   // future, but with Windows 2003 it's not possible to commit on demand.
2965   return false;
2966 }
2967 
2968 bool os::can_execute_large_page_memory() {
2969   return true;
2970 }
2971 
2972 char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
2973 
2974   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
2975   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
2976 
2977   // with large pages, there are two cases where we need to use Individual Allocation
2978   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
2979   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
2980   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
2981     if (TracePageSizes && Verbose) {
2982        tty->print_cr("Reserving large pages individually.");
2983     }
2984     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
2985     if (p_buf == NULL) {
2986       warning("Individually allocated pages failed, "








2987               "use -XX:-UseLargePagesIndividualAllocation to turn off");
2988       return NULL;
2989     }



































































2990 
2991     return p_buf;
2992 
2993   } else {
2994     // normal policy just allocate it all at once
2995     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
2996     char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
2997     return res;
2998   }
2999 }
3000 
3001 bool os::release_memory_special(char* base, size_t bytes) {
3002   return release_memory(base, bytes);
3003 }
3004 
3005 void os::print_statistics() {
3006 }
3007 
3008 bool os::commit_memory(char* addr, size_t bytes, bool exec) {
3009   if (bytes == 0) {
3010     // Don't bother the OS with noops.
3011     return true;
3012   }
3013   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3014   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3015   // Don't attempt to print anything if the OS call fails. We're
3016   // probably low on resources, so the print itself may cause crashes.
3017 
3018   // unless we have NUMAInterleavaing enabled, the range of a commit
3019   // is always within a reserve covered by a single VirtualAlloc
3020   // in that case we can just do a single commit for the requested size
3021   if (!UseNUMAInterleaving) {
3022     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) return false;
3023     if (exec) {
3024       DWORD oldprot;
3025       // Windows doc says to use VirtualProtect to get execute permissions
3026       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) return false;
3027     }
3028     return true;
3029   } else {
3030     // when NUMAInterleaving is enabled, the commit might cover a range that
3031     // came from multiple VirtualAlloc reserves  using allocate_pages_individually
3032     // VirtualQuery can help us determine that, the RegionSize that it returns
3033     // represents the number of bytes thNUMANodeListHolderat can be committed in one step.
3034     size_t bytes_remaining = bytes;
3035     char * next_alloc_addr = addr;
3036     while (bytes_remaining > 0) {
3037       MEMORY_BASIC_INFORMATION alloc_info;
3038       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3039       size_t bytes_to_rq = MIN2(bytes_remaining, alloc_info.RegionSize);
3040       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, PAGE_READWRITE) == NULL) return false;
3041       if (exec) {
3042         DWORD oldprot;
3043         if (!VirtualProtect(next_alloc_addr, bytes_to_rq, PAGE_EXECUTE_READWRITE, &oldprot)) return false;
3044       }
3045       bytes_remaining -= bytes_to_rq;
3046       next_alloc_addr += bytes_to_rq;
3047     }
3048   }
3049   // if we made it this far, return true
3050   return true;
3051 }
3052 
3053 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
3054                        bool exec) {
3055   return commit_memory(addr, size, exec);
3056 }
3057 
3058 bool os::uncommit_memory(char* addr, size_t bytes) {
3059   if (bytes == 0) {
3060     // Don't bother the OS with noops.
3061     return true;
3062   }
3063   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3064   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3065   return VirtualFree(addr, bytes, MEM_DECOMMIT) != 0;
3066 }
3067 
3068 bool os::release_memory(char* addr, size_t bytes) {
3069   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3070 }


3104   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3105   // the guard page status. Guard pages thus act as a one-time access alarm.
3106   return VirtualProtect(addr, bytes, p, &old_status) != 0;
3107 }
3108 
3109 bool os::guard_memory(char* addr, size_t bytes) {
3110   DWORD old_status;
3111   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3112 }
3113 
3114 bool os::unguard_memory(char* addr, size_t bytes) {
3115   DWORD old_status;
3116   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3117 }
3118 
3119 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3120 void os::free_memory(char *addr, size_t bytes)         { }
3121 void os::numa_make_global(char *addr, size_t bytes)    { }
3122 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3123 bool os::numa_topology_changed()                       { return false; }
3124 size_t os::numa_get_groups_num()                       { return numa_node_list_holder.get_count(); }
3125 int os::numa_get_group_id()                            { return 0; }
3126 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3127   // check for size bigger than actual groups_num
3128   size = MIN2(size, numa_get_groups_num());
3129   for (int i = 0; i < size; i++) {
3130     ids[i] = numa_node_list_holder.get_node_list_entry(i);
3131   }
3132   return size;
3133 }
3134 
3135 bool os::get_page_info(char *start, page_info* info) {
3136   return false;
3137 }
3138 
3139 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
3140   return end;
3141 }
3142 
3143 char* os::non_memory_address_word() {
3144   // Must never look like an address returned by reserve_memory,
3145   // even in its subfields (as defined by the CPU immediate fields,
3146   // if the CPU splits constants across multiple instructions).
3147   return (char*)-1;
3148 }
3149 
3150 #define MAX_ERROR_COUNT 100
3151 #define SYS_THREAD_ERROR 0xffffffffUL
3152 


3637   os::set_polling_page( polling_page );
3638 
3639 #ifndef PRODUCT
3640   if( Verbose && PrintMiscellaneous )
3641     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
3642 #endif
3643 
3644   if (!UseMembar) {
3645     address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
3646     guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
3647 
3648     return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
3649     guarantee( return_page != NULL, "Commit Failed for memory serialize page");
3650 
3651     os::set_memory_serialize_page( mem_serialize_page );
3652 
3653 #ifndef PRODUCT
3654     if(Verbose && PrintMiscellaneous)
3655       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3656 #endif
3657   }
3658 
3659   os::large_page_init();
3660 
3661   // Setup Windows Exceptions
3662 
3663   // On Itanium systems, Structured Exception Handling does not
3664   // work since stack frames must be walkable by the OS.  Since
3665   // much of our code is dynamically generated, and we do not have
3666   // proper unwind .xdata sections, the system simply exits
3667   // rather than delivering the exception.  To work around
3668   // this we use VectorExceptions instead.
3669 #ifdef _WIN64
3670   if (UseVectoredExceptions) {
3671     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelExceptionFilter);
3672   }
3673 #endif
3674 
3675   // for debugging float code generation bugs
3676   if (ForceFloatExceptions) {
3677 #ifndef  _WIN64


3740     // atexit functions can be delayed until process exit time, which
3741     // can be problematic for embedded VM situations. Embedded VMs should
3742     // call DestroyJavaVM() to assure that VM resources are released.
3743 
3744     // note: perfMemory_exit_helper atexit function may be removed in
3745     // the future if the appropriate cleanup code can be added to the
3746     // VM_Exit VMOperation's doit method.
3747     if (atexit(perfMemory_exit_helper) != 0) {
3748       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3749     }
3750   }
3751 
3752 #ifndef _WIN64
3753   // Print something if NX is enabled (win32 on AMD64)
3754   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
3755 #endif
3756 
3757   // initialize thread priority policy
3758   prio_init();
3759 
3760   if (UseNUMAInterleaving) {
3761     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
3762     bool success = numa_interleaving_init();
3763     if (!success) UseNUMAInterleaving = false;
3764   }
3765 
3766   return JNI_OK;
3767 }
3768 
3769 void os::init_3(void) {
3770   return;
3771 }
3772 
3773 // Mark the polling page as unreadable
3774 void os::make_polling_page_unreadable(void) {
3775   DWORD old_status;
3776   if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) )
3777     fatal("Could not disable polling page");
3778 };
3779 
3780 // Mark the polling page as readable
3781 void os::make_polling_page_readable(void) {
3782   DWORD old_status;
3783   if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) )


4917 int os::get_sock_name(int fd, struct sockaddr *him, int *len) {
4918   ShouldNotReachHere();
4919   return 0;
4920 }
4921 
4922 int os::get_sock_opt(int fd, int level, int optname,
4923                              char *optval, int* optlen) {
4924   ShouldNotReachHere();
4925   return 0;
4926 }
4927 
4928 int os::set_sock_opt(int fd, int level, int optname,
4929                              const char *optval, int optlen) {
4930   ShouldNotReachHere();
4931   return 0;
4932 }
4933 
4934 
4935 // Kernel32 API
4936 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);
4937 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
4938 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG);
4939 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG);
4940 
4941 GetLargePageMinimum_Fn      os::Kernel32Dll::_GetLargePageMinimum = NULL;
4942 VirtualAllocExNuma_Fn       os::Kernel32Dll::_VirtualAllocExNuma = NULL;
4943 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL;
4944 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL;
4945 BOOL                        os::Kernel32Dll::initialized = FALSE;
4946 SIZE_T os::Kernel32Dll::GetLargePageMinimum() {
4947   assert(initialized && _GetLargePageMinimum != NULL,
4948     "GetLargePageMinimumAvailable() not yet called");
4949   return _GetLargePageMinimum();
4950 }
4951 
4952 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() {
4953   if (!initialized) {
4954     initialize();
4955   }
4956   return _GetLargePageMinimum != NULL;
4957 }
4958 
4959 BOOL os::Kernel32Dll::NumaCallsAvailable() {
4960   if (!initialized) {
4961     initialize();
4962   }
4963   return _VirtualAllocExNuma != NULL;
4964 }
4965 
4966 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) {
4967   assert(initialized && _VirtualAllocExNuma != NULL,
4968     "NUMACallsAvailable() not yet called");
4969 
4970   return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node);
4971 }
4972 
4973 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) {
4974   assert(initialized && _GetNumaHighestNodeNumber != NULL,
4975     "NUMACallsAvailable() not yet called");
4976 
4977   return _GetNumaHighestNodeNumber(ptr_highest_node_number);
4978 }
4979 
4980 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) {
4981   assert(initialized && _GetNumaNodeProcessorMask != NULL,
4982     "NUMACallsAvailable() not yet called");
4983 
4984   return _GetNumaNodeProcessorMask(node, proc_mask);
4985 }
4986 
4987 
4988 void os::Kernel32Dll::initializeCommon() {
4989   if (!initialized) {
4990     HMODULE handle = ::GetModuleHandle("Kernel32.dll");
4991     assert(handle != NULL, "Just check");
4992     _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");
4993     _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma");
4994     _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber");
4995     _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask");
4996     initialized = TRUE;
4997   }
4998 }
4999 
5000 
5001 
5002 #ifndef JDK6_OR_EARLIER
5003 
5004 void os::Kernel32Dll::initialize() {
5005   initializeCommon();
5006 }
5007 
5008 
5009 // Kernel32 API
5010 inline BOOL os::Kernel32Dll::SwitchToThread() {
5011   return ::SwitchToThread();
5012 }
5013 
5014 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
5015   return true;
5016 }
5017 
5018   // Help tools
5019 inline BOOL os::Kernel32Dll::HelpToolsAvailable() {
5020   return true;
5021 }
5022 
5023 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
5024   return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
5025 }
5026 
5027 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5028   return ::Module32First(hSnapshot, lpme);


5090 }
5091 
5092 inline BOOL os::Advapi32Dll::AdvapiAvailable() {
5093   return true;
5094 }
5095 
5096 #else
5097 // Kernel32 API
5098 typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
5099 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD);
5100 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32);
5101 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32);
5102 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO);
5103 
5104 SwitchToThread_Fn           os::Kernel32Dll::_SwitchToThread = NULL;
5105 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL;
5106 Module32First_Fn            os::Kernel32Dll::_Module32First = NULL;
5107 Module32Next_Fn             os::Kernel32Dll::_Module32Next = NULL;
5108 GetNativeSystemInfo_Fn      os::Kernel32Dll::_GetNativeSystemInfo = NULL;
5109 
5110 
5111 void os::Kernel32Dll::initialize() {
5112   if (!initialized) {
5113     HMODULE handle = ::GetModuleHandle("Kernel32.dll");
5114     assert(handle != NULL, "Just check");
5115 
5116     _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread");

5117     _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn)
5118       ::GetProcAddress(handle, "CreateToolhelp32Snapshot");
5119     _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First");
5120     _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next");
5121     _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo");
5122     initializeCommon();  // resolve the functions that always need resolving
5123 
5124     initialized = TRUE;
5125   }
5126 }
5127 
5128 BOOL os::Kernel32Dll::SwitchToThread() {
5129   assert(initialized && _SwitchToThread != NULL,
5130     "SwitchToThreadAvailable() not yet called");
5131   return _SwitchToThread();
5132 }
5133 
5134 
5135 BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
5136   if (!initialized) {
5137     initialize();
5138   }
5139   return _SwitchToThread != NULL;
5140 }
5141 
5142 // Help tools


5168     "HelpToolsAvailable() not yet called");
5169 
5170   return _Module32Next(hSnapshot, lpme);
5171 }
5172 
5173 
5174 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
5175   if (!initialized) {
5176     initialize();
5177   }
5178   return _GetNativeSystemInfo != NULL;
5179 }
5180 
5181 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
5182   assert(initialized && _GetNativeSystemInfo != NULL,
5183     "GetNativeSystemInfoAvailable() not yet called");
5184 
5185   _GetNativeSystemInfo(lpSystemInfo);
5186 }
5187 
5188 
5189 
5190 // PSAPI API
5191 
5192 
5193 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD);
5194 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);;
5195 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
5196 
5197 EnumProcessModules_Fn   os::PSApiDll::_EnumProcessModules = NULL;
5198 GetModuleFileNameEx_Fn  os::PSApiDll::_GetModuleFileNameEx = NULL;
5199 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL;
5200 BOOL                    os::PSApiDll::initialized = FALSE;
5201 
5202 void os::PSApiDll::initialize() {
5203   if (!initialized) {
5204     HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0);
5205     if (handle != NULL) {
5206       _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle,
5207         "EnumProcessModules");
5208       _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle,
5209         "GetModuleFileNameExA");