136 FILE_BACKED_PVT_BIT = 1 << 2,
137 FILE_BACKED_SHARED_BIT = 1 << 3,
138 LARGEPAGES_BIT = 1 << 6,
139 DAX_SHARED_BIT = 1 << 8
140 };
141
142 ////////////////////////////////////////////////////////////////////////////////
143 // global variables
144 julong os::Linux::_physical_memory = 0;
145
146 address os::Linux::_initial_thread_stack_bottom = NULL;
147 uintptr_t os::Linux::_initial_thread_stack_size = 0;
148
149 int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
150 int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
151 pthread_t os::Linux::_main_thread;
152 int os::Linux::_page_size = -1;
153 bool os::Linux::_supports_fast_thread_cpu_time = false;
154 const char * os::Linux::_glibc_version = NULL;
155 const char * os::Linux::_libpthread_version = NULL;
156
157 static jlong initial_time_count=0;
158
159 static int clock_tics_per_sec = 100;
160
161 // If the VM might have been created on the primordial thread, we need to resolve the
162 // primordial thread stack bounds and check if the current thread might be the
163 // primordial thread in places. If we know that the primordial thread is never used,
164 // such as when the VM was created by one of the standard java launchers, we can
165 // avoid this
166 static bool suppress_primordial_thread_resolution = false;
167
168 // For diagnostics to print a message once. see run_periodic_checks
169 static sigset_t check_signal_done;
170 static bool check_signals = true;
171
172 // Signal number used to suspend/resume a thread
173
174 // do not use any signal number less than SIGSEGV, see 4355769
175 static int SR_signum = SIGUSR2;
2931 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2932 return os::Linux::commit_memory_impl(addr, size, exec) == 0;
2933 }
2934
2935 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2936 const char* mesg) {
2937 assert(mesg != NULL, "mesg must be specified");
2938 int err = os::Linux::commit_memory_impl(addr, size, exec);
2939 if (err != 0) {
2940 // the caller wants all commit errors to exit with the specified mesg:
2941 warn_fail_commit_memory(addr, size, exec, err);
2942 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2943 }
2944 }
2945
2946 // Define MAP_HUGETLB here so we can build HotSpot on old systems.
2947 #ifndef MAP_HUGETLB
2948 #define MAP_HUGETLB 0x40000
2949 #endif
2950
2951 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2952 #ifndef MADV_HUGEPAGE
2953 #define MADV_HUGEPAGE 14
2954 #endif
2955
2956 int os::Linux::commit_memory_impl(char* addr, size_t size,
2957 size_t alignment_hint, bool exec) {
2958 int err = os::Linux::commit_memory_impl(addr, size, exec);
2959 if (err == 0) {
2960 realign_memory(addr, size, alignment_hint);
2961 }
2962 return err;
2963 }
2964
2965 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2966 bool exec) {
2967 return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2968 }
2969
2970 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3713 if (fscanf(f, "%lx", &cdm) != 1) {
3714 fclose(f);
3715 return;
3716 }
3717
3718 long saved_cdm = cdm;
3719 rewind(f);
3720 cdm |= bit;
3721
3722 if (cdm != saved_cdm) {
3723 fprintf(f, "%#lx", cdm);
3724 }
3725
3726 fclose(f);
3727 }
3728
3729 // Large page support
3730
3731 static size_t _large_page_size = 0;
3732
3733 size_t os::Linux::find_large_page_size() {
3734 size_t large_page_size = 0;
3735
3736 // large_page_size on Linux is used to round up heap size. x86 uses either
3737 // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3738 // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3739 // page as large as 256M.
3740 //
3741 // Here we try to figure out page size by parsing /proc/meminfo and looking
3742 // for a line with the following format:
3743 // Hugepagesize: 2048 kB
3744 //
3745 // If we can't determine the value (e.g. /proc is not mounted, or the text
3746 // format has been changed), we'll use the largest page size supported by
3747 // the processor.
3748
3749 #ifndef ZERO
3750 large_page_size =
3751 AARCH64_ONLY(2 * M)
3752 AMD64_ONLY(2 * M)
3753 ARM32_ONLY(2 * M)
3761 FILE *fp = fopen("/proc/meminfo", "r");
3762 if (fp) {
3763 while (!feof(fp)) {
3764 int x = 0;
3765 char buf[16];
3766 if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
3767 if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
3768 large_page_size = x * K;
3769 break;
3770 }
3771 } else {
3772 // skip to next line
3773 for (;;) {
3774 int ch = fgetc(fp);
3775 if (ch == EOF || ch == (int)'\n') break;
3776 }
3777 }
3778 }
3779 fclose(fp);
3780 }
3781
3782 if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
3783 warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
3784 SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
3785 proper_unit_for_byte_size(large_page_size));
3786 }
3787
3788 return large_page_size;
3789 }
3790
3791 size_t os::Linux::setup_large_page_size() {
3792 _large_page_size = Linux::find_large_page_size();
3793 const size_t default_page_size = (size_t)Linux::page_size();
3794 if (_large_page_size > default_page_size) {
3795 _page_sizes[0] = _large_page_size;
3796 _page_sizes[1] = default_page_size;
3797 _page_sizes[2] = 0;
3798 }
3799
3800 return _large_page_size;
3801 }
3802
3803 bool os::Linux::setup_large_page_type(size_t page_size) {
3804 if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
3805 FLAG_IS_DEFAULT(UseSHM) &&
3806 FLAG_IS_DEFAULT(UseTransparentHugePages)) {
3807
3808 // The type of large pages has not been specified by the user.
3809
3810 // Try UseHugeTLBFS and then UseSHM.
3811 UseHugeTLBFS = UseSHM = true;
3812
3813 // Don't try UseTransparentHugePages since there are known
3814 // performance issues with it turned on. This might change in the future.
3815 UseTransparentHugePages = false;
3816 }
3817
3818 if (UseTransparentHugePages) {
3819 bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
3820 if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
3821 UseHugeTLBFS = false;
3822 UseSHM = false;
4011 (!FLAG_IS_DEFAULT(UseLargePages) ||
4012 !FLAG_IS_DEFAULT(UseHugeTLBFS) ||
4013 !FLAG_IS_DEFAULT(LargePageSizeInBytes));
4014
4015 if (warn_on_failure) {
4016 char msg[128];
4017 jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
4018 PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
4019 warning("%s", msg);
4020 }
4021 }
4022
4023 char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes,
4024 char* req_addr,
4025 bool exec) {
4026 assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
4027 assert(is_aligned(bytes, os::large_page_size()), "Unaligned size");
4028 assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");
4029
4030 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
4031 char* addr = (char*)::mmap(req_addr, bytes, prot,
4032 MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
4033 -1, 0);
4034
4035 if (addr == MAP_FAILED) {
4036 warn_on_large_pages_failure(req_addr, bytes, errno);
4037 return NULL;
4038 }
4039
4040 assert(is_aligned(addr, os::large_page_size()), "Must be");
4041
4042 return addr;
4043 }
4044
4045 // Reserve memory using mmap(MAP_HUGETLB).
4046 // - bytes shall be a multiple of alignment.
4047 // - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
4048 // - alignment sets the alignment at which memory shall be allocated.
4049 // It must be a multiple of allocation granularity.
4050 // Returns address of memory or NULL. If req_addr was not NULL, will only return
4051 // req_addr or NULL.
4052 char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes,
4053 size_t alignment,
4069 assert(is_aligned(start, alignment), "Must be");
4070
4071 char* end = start + bytes;
4072
4073 // Find the regions of the allocated chunk that can be promoted to large pages.
4074 char* lp_start = align_up(start, large_page_size);
4075 char* lp_end = align_down(end, large_page_size);
4076
4077 size_t lp_bytes = lp_end - lp_start;
4078
4079 assert(is_aligned(lp_bytes, large_page_size), "Must be");
4080
4081 if (lp_bytes == 0) {
4082 // The mapped region doesn't even span the start and the end of a large page.
4083 // Fall back to allocate a non-special area.
4084 ::munmap(start, end - start);
4085 return NULL;
4086 }
4087
4088 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
4089
4090 void* result;
4091
4092 // Commit small-paged leading area.
4093 if (start != lp_start) {
4094 result = ::mmap(start, lp_start - start, prot,
4095 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
4096 -1, 0);
4097 if (result == MAP_FAILED) {
4098 ::munmap(lp_start, end - lp_start);
4099 return NULL;
4100 }
4101 }
4102
4103 // Commit large-paged area.
4104 result = ::mmap(lp_start, lp_bytes, prot,
4105 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
4106 -1, 0);
4107 if (result == MAP_FAILED) {
4108 warn_on_large_pages_failure(lp_start, lp_bytes, errno);
4109 // If the mmap above fails, the large pages region will be unmapped and we
4110 // have regions before and after with small pages. Release these regions.
4111 //
4112 // | mapped | unmapped | mapped |
4113 // ^ ^ ^ ^
4114 // start lp_start lp_end end
4115 //
4116 ::munmap(start, lp_start - start);
4117 ::munmap(lp_end, end - lp_end);
4118 return NULL;
4119 }
4120
4121 // Commit small-paged trailing area.
4122 if (lp_end != end) {
4123 result = ::mmap(lp_end, end - lp_end, prot,
4124 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
4125 -1, 0);
4126 if (result == MAP_FAILED) {
|
136 FILE_BACKED_PVT_BIT = 1 << 2,
137 FILE_BACKED_SHARED_BIT = 1 << 3,
138 LARGEPAGES_BIT = 1 << 6,
139 DAX_SHARED_BIT = 1 << 8
140 };
141
142 ////////////////////////////////////////////////////////////////////////////////
143 // global variables
144 julong os::Linux::_physical_memory = 0;
145
146 address os::Linux::_initial_thread_stack_bottom = NULL;
147 uintptr_t os::Linux::_initial_thread_stack_size = 0;
148
149 int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
150 int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
151 pthread_t os::Linux::_main_thread;
152 int os::Linux::_page_size = -1;
153 bool os::Linux::_supports_fast_thread_cpu_time = false;
154 const char * os::Linux::_glibc_version = NULL;
155 const char * os::Linux::_libpthread_version = NULL;
156 size_t os::Linux::_default_large_page_size = 0;
157
158 static jlong initial_time_count=0;
159
160 static int clock_tics_per_sec = 100;
161
162 // If the VM might have been created on the primordial thread, we need to resolve the
163 // primordial thread stack bounds and check if the current thread might be the
164 // primordial thread in places. If we know that the primordial thread is never used,
165 // such as when the VM was created by one of the standard java launchers, we can
166 // avoid this
167 static bool suppress_primordial_thread_resolution = false;
168
169 // For diagnostics to print a message once. see run_periodic_checks
170 static sigset_t check_signal_done;
171 static bool check_signals = true;
172
173 // Signal number used to suspend/resume a thread
174
175 // do not use any signal number less than SIGSEGV, see 4355769
176 static int SR_signum = SIGUSR2;
2932 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2933 return os::Linux::commit_memory_impl(addr, size, exec) == 0;
2934 }
2935
2936 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2937 const char* mesg) {
2938 assert(mesg != NULL, "mesg must be specified");
2939 int err = os::Linux::commit_memory_impl(addr, size, exec);
2940 if (err != 0) {
2941 // the caller wants all commit errors to exit with the specified mesg:
2942 warn_fail_commit_memory(addr, size, exec, err);
2943 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2944 }
2945 }
2946
2947 // Define MAP_HUGETLB here so we can build HotSpot on old systems.
2948 #ifndef MAP_HUGETLB
2949 #define MAP_HUGETLB 0x40000
2950 #endif
2951
2952 // If MAP_HUGETLB is set, and the system supports multiple huge page sizes,
2953 // flag bits [26:31] can be used to encode the log2 of the desired huge page size.
2954 // Otherwise the system's default huge page size will be used.
2955 // See mmap(2) man page for more info (since Linux 3.8).
2956 // https://lwn.net/Articles/533499/
2957 #ifndef MAP_HUGE_SHIFT
2958 #define MAP_HUGE_SHIFT 26
2959 #endif
2960
2961 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2962 #ifndef MADV_HUGEPAGE
2963 #define MADV_HUGEPAGE 14
2964 #endif
2965
2966 int os::Linux::commit_memory_impl(char* addr, size_t size,
2967 size_t alignment_hint, bool exec) {
2968 int err = os::Linux::commit_memory_impl(addr, size, exec);
2969 if (err == 0) {
2970 realign_memory(addr, size, alignment_hint);
2971 }
2972 return err;
2973 }
2974
2975 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2976 bool exec) {
2977 return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2978 }
2979
2980 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3723 if (fscanf(f, "%lx", &cdm) != 1) {
3724 fclose(f);
3725 return;
3726 }
3727
3728 long saved_cdm = cdm;
3729 rewind(f);
3730 cdm |= bit;
3731
3732 if (cdm != saved_cdm) {
3733 fprintf(f, "%#lx", cdm);
3734 }
3735
3736 fclose(f);
3737 }
3738
3739 // Large page support
3740
3741 static size_t _large_page_size = 0;
3742
3743 size_t os::Linux::find_default_large_page_size() {
3744 size_t large_page_size = 0;
3745
3746 // large_page_size on Linux is used to round up heap size. x86 uses either
3747 // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3748 // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3749 // page as large as 256M.
3750 //
3751 // Here we try to figure out page size by parsing /proc/meminfo and looking
3752 // for a line with the following format:
3753 // Hugepagesize: 2048 kB
3754 //
3755 // If we can't determine the value (e.g. /proc is not mounted, or the text
3756 // format has been changed), we'll use the largest page size supported by
3757 // the processor.
3758
3759 #ifndef ZERO
3760 large_page_size =
3761 AARCH64_ONLY(2 * M)
3762 AMD64_ONLY(2 * M)
3763 ARM32_ONLY(2 * M)
3771 FILE *fp = fopen("/proc/meminfo", "r");
3772 if (fp) {
3773 while (!feof(fp)) {
3774 int x = 0;
3775 char buf[16];
3776 if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
3777 if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
3778 large_page_size = x * K;
3779 break;
3780 }
3781 } else {
3782 // skip to next line
3783 for (;;) {
3784 int ch = fgetc(fp);
3785 if (ch == EOF || ch == (int)'\n') break;
3786 }
3787 }
3788 }
3789 fclose(fp);
3790 }
3791 return large_page_size;
3792 }
3793
3794 bool os::Linux::is_valid_large_page_size(size_t large_page_size) {
3795 // We need to scan /sys/kernel/mm/hugepages
3796 // to discover the available page sizes
3797 const char* sys_hugepages = "/sys/kernel/mm/hugepages";
3798 if (dir_is_empty(sys_hugepages)) {
3799 return false;
3800 }
3801
3802 DIR *dir = opendir(sys_hugepages);
3803 if (dir == NULL) {
3804 return false;
3805 }
3806
3807 struct dirent *entry;
3808 size_t page_size;
3809 bool is_valid = false;
3810 while ( (entry = readdir(dir)) != NULL) {
3811 if (entry->d_type == DT_DIR &&
3812 sscanf(entry->d_name, "hugepages-%zukB", &page_size) == 1) {
3813 // The kernel is using kB, hotspot uses bytes
3814 if (large_page_size == page_size * K) {
3815 is_valid = true;
3816 break;
3817 }
3818 }
3819 }
3820 closedir(dir);
3821 return is_valid;
3822 }
3823
3824 size_t os::Linux::setup_large_page_size() {
3825 _default_large_page_size = Linux::find_default_large_page_size();
3826 _large_page_size = _default_large_page_size;
3827
3828 if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != _large_page_size ) {
3829 if (is_valid_large_page_size(LargePageSizeInBytes)) {
3830 _large_page_size = LargePageSizeInBytes;
3831 } else {
3832 warning("Setting LargePageSizeInBytes=" SIZE_FORMAT " has no effect on this OS. Default large page size is "
3833 SIZE_FORMAT "%s.",
3834 LargePageSizeInBytes,
3835 byte_size_in_proper_unit(_large_page_size), proper_unit_for_byte_size(_large_page_size));
3836 }
3837 }
3838
3839 const size_t default_page_size = (size_t)Linux::page_size();
3840 if (_large_page_size > default_page_size) {
3841 _page_sizes[0] = _large_page_size;
3842 _page_sizes[1] = default_page_size;
3843 _page_sizes[2] = 0;
3844 }
3845
3846 return _large_page_size;
3847 }
3848
3849 size_t os::Linux::default_large_page_size() {
3850 return _default_large_page_size;
3851 }
3852
3853 bool os::Linux::setup_large_page_type(size_t page_size) {
3854 if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
3855 FLAG_IS_DEFAULT(UseSHM) &&
3856 FLAG_IS_DEFAULT(UseTransparentHugePages)) {
3857
3858 // The type of large pages has not been specified by the user.
3859
3860 // Try UseHugeTLBFS and then UseSHM.
3861 UseHugeTLBFS = UseSHM = true;
3862
3863 // Don't try UseTransparentHugePages since there are known
3864 // performance issues with it turned on. This might change in the future.
3865 UseTransparentHugePages = false;
3866 }
3867
3868 if (UseTransparentHugePages) {
3869 bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
3870 if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
3871 UseHugeTLBFS = false;
3872 UseSHM = false;
4061 (!FLAG_IS_DEFAULT(UseLargePages) ||
4062 !FLAG_IS_DEFAULT(UseHugeTLBFS) ||
4063 !FLAG_IS_DEFAULT(LargePageSizeInBytes));
4064
4065 if (warn_on_failure) {
4066 char msg[128];
4067 jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
4068 PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
4069 warning("%s", msg);
4070 }
4071 }
4072
4073 char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes,
4074 char* req_addr,
4075 bool exec) {
4076 assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
4077 assert(is_aligned(bytes, os::large_page_size()), "Unaligned size");
4078 assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");
4079
4080 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
4081 int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB;
4082
4083 if (os::large_page_size() != default_large_page_size()) {
4084 flags |= (exact_log2(os::large_page_size()) << MAP_HUGE_SHIFT);
4085 }
4086 char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0);
4087
4088 if (addr == MAP_FAILED) {
4089 warn_on_large_pages_failure(req_addr, bytes, errno);
4090 return NULL;
4091 }
4092
4093 assert(is_aligned(addr, os::large_page_size()), "Must be");
4094
4095 return addr;
4096 }
4097
4098 // Reserve memory using mmap(MAP_HUGETLB).
4099 // - bytes shall be a multiple of alignment.
4100 // - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
4101 // - alignment sets the alignment at which memory shall be allocated.
4102 // It must be a multiple of allocation granularity.
4103 // Returns address of memory or NULL. If req_addr was not NULL, will only return
4104 // req_addr or NULL.
4105 char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes,
4106 size_t alignment,
4122 assert(is_aligned(start, alignment), "Must be");
4123
4124 char* end = start + bytes;
4125
4126 // Find the regions of the allocated chunk that can be promoted to large pages.
4127 char* lp_start = align_up(start, large_page_size);
4128 char* lp_end = align_down(end, large_page_size);
4129
4130 size_t lp_bytes = lp_end - lp_start;
4131
4132 assert(is_aligned(lp_bytes, large_page_size), "Must be");
4133
4134 if (lp_bytes == 0) {
4135 // The mapped region doesn't even span the start and the end of a large page.
4136 // Fall back to allocate a non-special area.
4137 ::munmap(start, end - start);
4138 return NULL;
4139 }
4140
4141 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
4142 int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED;
4143 void* result;
4144
4145 // Commit small-paged leading area.
4146 if (start != lp_start) {
4147 result = ::mmap(start, lp_start - start, prot, flags, -1, 0);
4148 if (result == MAP_FAILED) {
4149 ::munmap(lp_start, end - lp_start);
4150 return NULL;
4151 }
4152 }
4153
4154 // Commit large-paged area.
4155 flags |= MAP_HUGETLB;
4156
4157 if (os::large_page_size() != default_large_page_size()) {
4158 flags |= (exact_log2(os::large_page_size()) << MAP_HUGE_SHIFT);
4159 }
4160
4161 result = ::mmap(lp_start, lp_bytes, prot, flags, -1, 0);
4162 if (result == MAP_FAILED) {
4163 warn_on_large_pages_failure(lp_start, lp_bytes, errno);
4164 // If the mmap above fails, the large pages region will be unmapped and we
4165 // have regions before and after with small pages. Release these regions.
4166 //
4167 // | mapped | unmapped | mapped |
4168 // ^ ^ ^ ^
4169 // start lp_start lp_end end
4170 //
4171 ::munmap(start, lp_start - start);
4172 ::munmap(lp_end, end - lp_end);
4173 return NULL;
4174 }
4175
4176 // Commit small-paged trailing area.
4177 if (lp_end != end) {
4178 result = ::mmap(lp_end, end - lp_end, prot,
4179 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
4180 -1, 0);
4181 if (result == MAP_FAILED) {
|