--- old/src/hotspot/os/aix/os_aix.cpp 2020-08-28 04:55:17.000000000 -0700 +++ new/src/hotspot/os/aix/os_aix.cpp 2020-08-28 04:55:16.000000000 -0700 @@ -2269,7 +2269,7 @@ pd_commit_memory_or_exit(addr, size, exec, mesg); } -bool os::pd_uncommit_memory(char* addr, size_t size) { +bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) { assert(is_aligned_to(addr, os::vm_page_size()), "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", p2i(addr), os::vm_page_size()); @@ -2347,7 +2347,7 @@ // Reserves and attaches a shared memory segment. // Will assert if a wish address is given and could not be obtained. -char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { +char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint, bool executable) { // All other Unices do a mmap(MAP_FIXED) if the addr is given, // thereby clobbering old mappings at that place. That is probably --- old/src/hotspot/os/bsd/os_bsd.cpp 2020-08-28 04:55:18.000000000 -0700 +++ new/src/hotspot/os/bsd/os_bsd.cpp 2020-08-28 04:55:17.000000000 -0700 @@ -1907,12 +1907,16 @@ // problem. bool os::pd_commit_memory(char* addr, size_t size, bool exec) { int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; -#ifdef __OpenBSD__ +#if defined(__OpenBSD__) // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot); if (::mprotect(addr, size, prot) == 0) { return true; } +#elif defined(__APPLE__) + if (::mprotect(addr, size, prot) == 0) { + return true; + } #else uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); @@ -1995,11 +1999,22 @@ } -bool os::pd_uncommit_memory(char* addr, size_t size) { -#ifdef __OpenBSD__ +bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) { +#if defined(__OpenBSD__) // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with PROT_NONE", p2i(addr), p2i(addr+size)); return ::mprotect(addr, size, PROT_NONE) == 0; +#elif defined(__APPLE__) + if (exec) { + if (::madvise(addr, size, MADV_FREE) != 0) { + return false; + } + return ::mprotect(addr, size, PROT_NONE) == 0; + } else { + uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, + MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); + return res != (uintptr_t) MAP_FAILED; + } #else uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); @@ -2014,7 +2029,7 @@ // If this is a growable mapping, remove the guard pages entirely by // munmap()ping them. If not, just call uncommit_memory(). bool os::remove_stack_guard_pages(char* addr, size_t size) { - return os::uncommit_memory(addr, size); + return os::uncommit_memory(addr, size, !ExecMem); } // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory @@ -2023,11 +2038,17 @@ // 'requested_addr' is only treated as a hint, the return value may or // may not start from the requested address. Unlike Bsd mmap(), this // function returns NULL to indicate failure. -static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) { +static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed, bool executable) { char * addr; int flags; flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS; +#ifdef __APPLE__ + if (executable) { + guarantee(!fixed, "MAP_JIT (for execute) is incompatible with MAP_FIXED"); + flags |= MAP_JIT; + } +#endif if (fixed) { assert((uintptr_t)requested_addr % os::Bsd::page_size() == 0, "unaligned address"); flags |= MAP_FIXED; @@ -2047,8 +2068,9 @@ } char* os::pd_reserve_memory(size_t bytes, char* requested_addr, - size_t alignment_hint) { - return anon_mmap(requested_addr, bytes, (requested_addr != NULL)); + size_t alignment_hint, + bool executable) { + return anon_mmap(requested_addr, bytes, (requested_addr != NULL), executable); } bool os::pd_release_memory(char* addr, size_t size) { @@ -2158,7 +2180,7 @@ // Bsd mmap allows caller to pass an address as hint; give it a try first, // if kernel honors the hint then we can return immediately. - char * addr = anon_mmap(requested_addr, bytes, false); + char * addr = anon_mmap(requested_addr, bytes, false/*fixed*/, false/*executable*/); if (addr == requested_addr) { return requested_addr; } --- old/src/hotspot/os/linux/os_linux.cpp 2020-08-28 04:55:18.000000000 -0700 +++ new/src/hotspot/os/linux/os_linux.cpp 2020-08-28 04:55:18.000000000 -0700 @@ -3458,7 +3458,7 @@ struct bitmask* os::Linux::_numa_interleave_bitmask; struct bitmask* os::Linux::_numa_membind_bitmask; -bool os::pd_uncommit_memory(char* addr, size_t size) { +bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) { uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); return res != (uintptr_t) MAP_FAILED; @@ -3643,7 +3643,7 @@ return ::munmap(addr, size) == 0; } - return os::uncommit_memory(addr, size); + return os::uncommit_memory(addr, size, !ExecMem); } // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory @@ -3718,7 +3718,8 @@ } char* os::pd_reserve_memory(size_t bytes, char* requested_addr, - size_t alignment_hint) { + size_t alignment_hint, + bool executable) { return anon_mmap(requested_addr, bytes, (requested_addr != NULL)); } --- old/src/hotspot/os/windows/os_windows.cpp 2020-08-28 04:55:19.000000000 -0700 +++ new/src/hotspot/os/windows/os_windows.cpp 2020-08-28 04:55:19.000000000 -0700 @@ -3257,7 +3257,7 @@ return aligned_base; } -char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { +char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint, bool executable) { assert((size_t)addr % os::vm_allocation_granularity() == 0, "reserve alignment"); assert(bytes % os::vm_page_size() == 0, "reserve page size"); @@ -3460,7 +3460,7 @@ pd_commit_memory_or_exit(addr, size, exec, mesg); } -bool os::pd_uncommit_memory(char* addr, size_t bytes) { +bool os::pd_uncommit_memory(char* addr, size_t bytes, bool exec) { if (bytes == 0) { // Don't bother the OS with noops. return true; @@ -3479,7 +3479,7 @@ } bool os::remove_stack_guard_pages(char* addr, size_t size) { - return os::uncommit_memory(addr, size); + return os::uncommit_memory(addr, size, !ExecMem); } static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { --- old/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp 2020-08-28 04:55:20.000000000 -0700 +++ new/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp 2020-08-28 04:55:20.000000000 -0700 @@ -216,7 +216,7 @@ "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page); char* start_addr = page_start(start_page); - os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char))); + os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)), !ExecMem); } void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) { --- old/src/hotspot/share/gc/parallel/psCardTable.cpp 2020-08-28 04:55:21.000000000 -0700 +++ new/src/hotspot/share/gc/parallel/psCardTable.cpp 2020-08-28 04:55:20.000000000 -0700 @@ -587,7 +587,8 @@ MemRegion(cur_committed.start(), new_start_aligned)); if (!uncommit_region.is_empty()) { if (!os::uncommit_memory((char*)uncommit_region.start(), - uncommit_region.byte_size())) { + uncommit_region.byte_size(), + !ExecMem)) { // If the uncommit fails, ignore it. Let the // committed table resizing go even though the committed // table will over state the committed space. --- old/src/hotspot/share/gc/parallel/psVirtualspace.cpp 2020-08-28 04:55:21.000000000 -0700 +++ new/src/hotspot/share/gc/parallel/psVirtualspace.cpp 2020-08-28 04:55:21.000000000 -0700 @@ -113,7 +113,7 @@ } char* const base_addr = committed_high_addr() - bytes; - bool result = special() || os::uncommit_memory(base_addr, bytes); + bool result = special() || os::uncommit_memory(base_addr, bytes, !ExecMem); if (result) { _committed_high_addr -= bytes; } --- old/src/hotspot/share/gc/shared/cardTable.cpp 2020-08-28 04:55:22.000000000 -0700 +++ new/src/hotspot/share/gc/shared/cardTable.cpp 2020-08-28 04:55:22.000000000 -0700 @@ -254,7 +254,8 @@ cur_committed.end())); if (!uncommit_region.is_empty()) { if (!os::uncommit_memory((char*)uncommit_region.start(), - uncommit_region.byte_size())) { + uncommit_region.byte_size(), + !ExecMem)) { assert(false, "Card table contraction failed"); // The call failed so don't change the end of the // committed region. This is better than taking the --- old/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-08-28 04:55:23.000000000 -0700 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-08-28 04:55:23.000000000 -0700 @@ -1318,7 +1318,7 @@ assert(oop_stack.is_empty(), "should be empty"); - if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) { + if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), !ExecMem)) { log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration"); } } @@ -2669,7 +2669,7 @@ size_t slice = r->index() / _bitmap_regions_per_slice; size_t off = _bitmap_bytes_per_slice * slice; size_t len = _bitmap_bytes_per_slice; - if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) { + if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len, !ExecMem)) { return false; } return true; --- old/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp 2020-08-28 04:55:24.000000000 -0700 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp 2020-08-28 04:55:24.000000000 -0700 @@ -627,7 +627,7 @@ void ShenandoahHeapRegion::do_uncommit() { ShenandoahHeap* heap = ShenandoahHeap::heap(); - if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) { + if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes, !ExecMem)) { report_java_out_of_memory("Unable to uncommit region"); } if (!heap->uncommit_bitmap_slice(this)) { --- old/src/hotspot/share/memory/virtualspace.cpp 2020-08-28 04:55:25.000000000 -0700 +++ new/src/hotspot/share/memory/virtualspace.cpp 2020-08-28 04:55:24.000000000 -0700 @@ -192,7 +192,7 @@ base = NULL; } } else { - base = os::reserve_memory(size, NULL, alignment, _fd_for_heap); + base = os::reserve_memory(size, NULL, alignment, _fd_for_heap, _executable); } if (base == NULL) return; @@ -984,7 +984,7 @@ assert(middle_high_boundary() <= aligned_upper_new_high && aligned_upper_new_high + upper_needs <= upper_high_boundary(), "must not shrink beyond region"); - if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) { + if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) { debug_only(warning("os::uncommit_memory failed")); return; } else { @@ -995,7 +995,7 @@ assert(lower_high_boundary() <= aligned_middle_new_high && aligned_middle_new_high + middle_needs <= middle_high_boundary(), "must not shrink beyond region"); - if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) { + if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) { debug_only(warning("os::uncommit_memory failed")); return; } else { @@ -1006,7 +1006,7 @@ assert(low_boundary() <= aligned_lower_new_high && aligned_lower_new_high + lower_needs <= lower_high_boundary(), "must not shrink beyond region"); - if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) { + if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) { debug_only(warning("os::uncommit_memory failed")); return; } else { --- old/src/hotspot/share/prims/whitebox.cpp 2020-08-28 04:55:26.000000000 -0700 +++ new/src/hotspot/share/prims/whitebox.cpp 2020-08-28 04:55:25.000000000 -0700 @@ -771,7 +771,7 @@ WB_END WB_ENTRY(void, WB_NMTUncommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size)) - os::uncommit_memory((char *)(uintptr_t)addr, size); + os::uncommit_memory((char *)(uintptr_t)addr, size, !ExecMem); WB_END WB_ENTRY(void, WB_NMTReleaseMemory(JNIEnv* env, jobject o, jlong addr, jlong size)) --- old/src/hotspot/share/runtime/os.cpp 2020-08-28 04:55:26.000000000 -0700 +++ new/src/hotspot/share/runtime/os.cpp 2020-08-28 04:55:26.000000000 -0700 @@ -1652,7 +1652,7 @@ return os::pd_create_stack_guard_pages(addr, bytes); } -char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint, int file_desc) { +char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint, int file_desc, bool executable) { char* result = NULL; if (file_desc != -1) { @@ -1663,7 +1663,7 @@ MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); } } else { - result = pd_reserve_memory(bytes, addr, alignment_hint); + result = pd_reserve_memory(bytes, addr, alignment_hint, executable); if (result != NULL) { MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); } @@ -1728,16 +1728,16 @@ MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); } -bool os::uncommit_memory(char* addr, size_t bytes) { +bool os::uncommit_memory(char* addr, size_t bytes, bool exec) { bool res; if (MemTracker::tracking_level() > NMT_minimal) { Tracker tkr(Tracker::uncommit); - res = pd_uncommit_memory(addr, bytes); + res = pd_uncommit_memory(addr, bytes, exec); if (res) { tkr.record((address)addr, bytes); } } else { - res = pd_uncommit_memory(addr, bytes); + res = pd_uncommit_memory(addr, bytes, exec); } return res; } --- old/src/hotspot/share/runtime/os.hpp 2020-08-28 04:55:27.000000000 -0700 +++ new/src/hotspot/share/runtime/os.hpp 2020-08-28 04:55:27.000000000 -0700 @@ -114,7 +114,8 @@ } static char* pd_reserve_memory(size_t bytes, char* addr = 0, - size_t alignment_hint = 0); + size_t alignment_hint = 0, + bool executable = false); static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr); static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc); static bool pd_commit_memory(char* addr, size_t bytes, bool executable); @@ -127,7 +128,7 @@ static void pd_commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint, bool executable, const char* mesg); - static bool pd_uncommit_memory(char* addr, size_t bytes); + static bool pd_uncommit_memory(char* addr, size_t bytes, bool exec); static bool pd_release_memory(char* addr, size_t bytes); static char* pd_map_memory(int fd, const char* file_name, size_t file_offset, @@ -311,7 +312,8 @@ static int vm_allocation_granularity(); static char* reserve_memory(size_t bytes, char* addr = 0, - size_t alignment_hint = 0, int file_desc = -1); + size_t alignment_hint = 0, int file_desc = -1, + bool executable = false); static char* reserve_memory(size_t bytes, char* addr, size_t alignment_hint, MEMFLAGS flags); static char* reserve_memory_aligned(size_t size, size_t alignment, int file_desc = -1); @@ -338,7 +340,7 @@ static void commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint, bool executable, const char* mesg); - static bool uncommit_memory(char* addr, size_t bytes); + static bool uncommit_memory(char* addr, size_t bytes, bool exec); static bool release_memory(char* addr, size_t bytes); // Touch memory pages that cover the memory range from start to end (exclusive) --- old/src/hotspot/share/runtime/thread.cpp 2020-08-28 04:55:28.000000000 -0700 +++ new/src/hotspot/share/runtime/thread.cpp 2020-08-28 04:55:27.000000000 -0700 @@ -2749,7 +2749,7 @@ } else { log_warning(os, thread)("Attempt to protect stack guard pages failed (" PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len)); - if (os::uncommit_memory((char *) low_addr, len)) { + if (os::uncommit_memory((char *) low_addr, len, !ExecMem)) { log_warning(os, thread)("Attempt to deallocate stack guard pages failed."); } return;