--- old/make/autoconf/hotspot.m4 2019-03-14 17:07:28.046621670 +0000 +++ new/make/autoconf/hotspot.m4 2019-03-14 17:07:27.702618055 +0000 @@ -350,7 +350,8 @@ # Only enable ZGC on supported platforms AC_MSG_CHECKING([if zgc can be built]) - if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then + if (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64") || \ + (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64"); then AC_MSG_RESULT([yes]) else DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES zgc" --- old/src/hotspot/cpu/aarch64/assembler_aarch64.cpp 2019-03-14 17:07:29.074632471 +0000 +++ new/src/hotspot/cpu/aarch64/assembler_aarch64.cpp 2019-03-14 17:07:28.710628647 +0000 @@ -1265,6 +1265,13 @@ __ movptr(r, (uint64_t)target()); break; } + case post: { + // Post-indexed, just copy the contents of the register. Offset added afterwards. + if (_base == r) // it's a nop + break; + __ mov(r, _base); + break; + } default: ShouldNotReachHere(); } --- old/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp 2019-03-14 17:07:30.174644030 +0000 +++ new/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp 2019-03-14 17:07:29.834640457 +0000 @@ -1011,7 +1011,11 @@ if (UseCompressedOops && !wide) { __ decode_heap_oop(dest->as_register()); } - __ verify_oop(dest->as_register()); + + if (!UseZGC) { + // Load barrier has not yet been applied, so ZGC can't verify the oop here + __ verify_oop(dest->as_register()); + } } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { if (UseCompressedClassPointers) { __ decode_klass_not_null(dest->as_register()); @@ -2866,7 +2870,11 @@ void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { - assert(patch_code == lir_patch_none, "Patch code not supported"); + if (patch_code != lir_patch_none) { + deoptimize_trap(info); + return; + } + __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr())); } --- old/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp 2019-03-14 17:07:31.234655168 +0000 +++ new/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp 2019-03-14 17:07:30.890651553 +0000 @@ -37,7 +37,7 @@ public: virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, - Register addr, Register count, RegSet saved_regs) {} + Register src, Register dst, Register count, RegSet saved_regs) {} virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, Register start, Register end, Register tmp, RegSet saved_regs) {} virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, --- old/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.cpp 2019-03-14 17:07:32.314666517 +0000 +++ new/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.cpp 2019-03-14 17:07:31.962662818 +0000 @@ -29,10 +29,10 @@ #define __ masm-> void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, - Register addr, Register count, RegSet saved_regs) { + Register src, Register dst, Register count, RegSet saved_regs) { if (is_oop) { - gen_write_ref_array_pre_barrier(masm, decorators, addr, count, saved_regs); + gen_write_ref_array_pre_barrier(masm, decorators, dst, count, saved_regs); } } --- old/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.hpp 2019-03-14 17:07:33.538679379 +0000 +++ new/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.hpp 2019-03-14 17:07:33.078674545 +0000 @@ -44,7 +44,7 @@ public: virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, - Register addr, Register count, RegSet saved_regs); + Register src, Register dst, Register count, RegSet saved_regs); virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, Register start, Register end, Register tmp, RegSet saved_regs); virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, --- old/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp 2019-03-14 17:07:34.858693250 +0000 +++ new/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp 2019-03-14 17:07:34.486689342 +0000 @@ -43,23 +43,23 @@ address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL; void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, - Register addr, Register count, RegSet saved_regs) { + Register src, Register dst, Register count, RegSet saved_regs) { if (is_oop) { bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; if (!dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) { __ push(saved_regs, sp); if (count == c_rarg0) { - if (addr == c_rarg1) { + if (dst == c_rarg1) { // exactly backwards!! __ mov(rscratch1, c_rarg0); __ mov(c_rarg0, c_rarg1); __ mov(c_rarg1, rscratch1); } else { __ mov(c_rarg1, count); - __ mov(c_rarg0, addr); + __ mov(c_rarg0, dst); } } else { - __ mov(c_rarg0, addr); + __ mov(c_rarg0, dst); __ mov(c_rarg1, count); } if (UseCompressedOops) { --- old/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp 2019-03-14 17:07:36.034705608 +0000 +++ new/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp 2019-03-14 17:07:35.598701027 +0000 @@ -76,9 +76,9 @@ #endif virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, - Register addr, Register count, RegSet saved_regs); + Register src, Register dst, Register count, RegSet saved_regs); virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, - Register start, Register end, Register tmp, RegSet saved_regs); + Register src, Register end, Register tmp, RegSet saved_regs); virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp_thread); virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, --- old/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp 2019-03-14 17:07:37.754723684 +0000 +++ new/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp 2019-03-14 17:07:37.218718051 +0000 @@ -45,6 +45,9 @@ #ifdef COMPILER2 #include "opto/runtime.hpp" #endif +#if INCLUDE_ZGC +#include "gc/z/zThreadLocalData.hpp" +#endif #ifdef BUILTIN_SIM #include "../../../../../../simulator/simulator.hpp" @@ -579,6 +582,16 @@ // make sure object is 'reasonable' __ cbz(r0, exit); // if obj is NULL it is OK +#if INCLUDE_ZGC + if (UseZGC) { + // Check if mask is good. + // verifies that ZAddressBadMask & r0 == 0 + __ ldr(c_rarg3, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); + __ andr(c_rarg2, r0, c_rarg3); + __ cbnz(c_rarg2, error); + } +#endif + // Check if the oop is in the right area of memory __ mov(c_rarg3, (intptr_t) Universe::verify_oop_mask()); __ andr(c_rarg2, r0, c_rarg3); @@ -1363,7 +1376,7 @@ } BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); - bs->arraycopy_prologue(_masm, decorators, is_oop, d, count, saved_reg); + bs->arraycopy_prologue(_masm, decorators, is_oop, s, d, count, saved_reg); if (is_oop) { // save regs before copy_memory @@ -1437,7 +1450,7 @@ } BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); - bs->arraycopy_prologue(_masm, decorators, is_oop, d, count, saved_regs); + bs->arraycopy_prologue(_masm, decorators, is_oop, s, d, count, saved_regs); if (is_oop) { // save regs before copy_memory @@ -1799,7 +1812,7 @@ } BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); - bs->arraycopy_prologue(_masm, decorators, is_oop, to, count, wb_pre_saved_regs); + bs->arraycopy_prologue(_masm, decorators, is_oop, from, to, count, wb_pre_saved_regs); // save the original count __ mov(count_save, count); --- old/src/hotspot/share/gc/z/zMarkStackEntry.hpp 2019-03-14 17:07:39.270739616 +0000 +++ new/src/hotspot/share/gc/z/zMarkStackEntry.hpp 2019-03-14 17:07:38.834735033 +0000 @@ -69,7 +69,7 @@ private: typedef ZBitField field_finalizable; typedef ZBitField field_partial_array; - typedef ZBitField field_object_address; + typedef ZBitField field_object_address; typedef ZBitField field_partial_array_length; typedef ZBitField field_partial_array_offset; --- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp 2019-03-14 17:07:40.222749620 +0000 @@ -0,0 +1,392 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "code/codeBlob.hpp" +#include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zBarrierSet.hpp" +#include "gc/z/zBarrierSetAssembler.hpp" +#include "gc/z/zBarrierSetRuntime.hpp" +#include "memory/resourceArea.hpp" +#ifdef COMPILER1 +#include "c1/c1_LIRAssembler.hpp" +#include "c1/c1_MacroAssembler.hpp" +#include "gc/z/c1/zBarrierSetC1.hpp" +#endif // COMPILER1 + +#include "gc/z/zThreadLocalData.hpp" + +ZBarrierSetAssembler::ZBarrierSetAssembler() : + _load_barrier_slow_stub(), + _load_barrier_weak_slow_stub() {} + +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + +#undef __ +#define __ masm-> + +void ZBarrierSetAssembler::load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Register dst, + Address src, + Register tmp1, + Register tmp_thread) { + if (!ZBarrierSet::barrier_needed(decorators, type)) { + // Barrier not needed + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); + return; + } + + + Label done; + + // TODO: We should be able to rely upon rheapbase containing the badmask. + // Currently we can't though - we need to reload, rather than restore in some places. + Register scratch = rheapbase; + Register scratch2 = rscratch2; + + if (dst == scratch2) { + scratch2 = rscratch1; + } + + assert_different_registers(scratch, scratch2, dst); + assert_different_registers(scratch, scratch2, src.base()); + + RegSet savedRegs = RegSet::of(scratch2, scratch); + // Not saving this causes issues in testing. + __ push(savedRegs, sp); + + // Load bad mask into scratch register. + __ ldr(scratch, address_bad_mask_from_thread(rthread)); + __ lea(scratch2, src); + __ ldr(dst, src); + + // Test reference against bad mask. If mask bad, then we need to fix it up. + __ tst(dst, scratch); + __ br(Assembler::EQ, done); + + __ enter(); + + __ push(RegSet::range(r0,r28) - RegSet::of(dst), sp); + + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch2); + + // Make sure dst has the return value. + if (dst != r0) { + __ mov(dst, r0); + } + + __ pop(RegSet::range(r0,r28) - RegSet::of(dst), sp); + __ leave(); + + + __ bind(done); + + // Restore tmps + __ pop(savedRegs, sp); +} + +#ifdef ASSERT + +void ZBarrierSetAssembler::store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Address dst, + Register val, + Register tmp1, + Register tmp2) { + // Verify value + if (type == T_OBJECT || type == T_ARRAY) { + // Note that src could be noreg, which means we + // are storing null and can skip verification. + if (val != noreg) { + Label done; + + // TODO: Remove this and have rheapbase set in context. + RegSet savedRegs = RegSet::of(rheapbase); + __ push(savedRegs, sp); + + __ ldr(rheapbase, address_bad_mask_from_thread(rthread)); + __ tst(val, rheapbase); + __ br(Assembler::EQ, done); + __ stop("Verify oop store failed"); + __ should_not_reach_here(); + __ bind(done); + __ pop(savedRegs, sp); + } + } + + // Store value + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); +} + +#endif // ASSERT + +void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, + DecoratorSet decorators, + bool is_oop, + Register src, + Register dst, + Register count, + RegSet saved_regs) { + if (!is_oop) { + // Barrier not needed + return; + } + + assert_different_registers(src, count, rscratch1); + + __ pusha(); + + if (count == c_rarg0) { + if (src == c_rarg1) { + // exactly backwards!! + __ mov(rscratch1, c_rarg0); + __ mov(c_rarg0, c_rarg1); + __ mov(c_rarg1, rscratch1); + } else { + __ mov(c_rarg1, count); + __ mov(c_rarg0, src); + } + } else { + __ mov(c_rarg0, src); + __ mov(c_rarg1, count); + } + // Save the necessary global regs... will be used after. + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2); + + __ popa(); +} + +void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, + Register jni_env, + Register robj, + Register tmp, + Label& slowpath) { + // NOTE! The code generated here is executed in native context, and therefore + // we don't have the address bad mask in r27 and we don't have the thread pointer + // in r28. However, we do have the JNIEnv* in c_rarg0 from the call to + // JNI_FastGetField and so we use that to get the address bad mask. + + // Resolve jobject + BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath); + + // The Address offset is too large to direct load - -784. Our range it +-255ish + // I guess every thread hitting the global ZBadMask would be bad? + // TODO: This is a string of operations on a single register. + __ mov(tmp, (long int)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) - + in_bytes(JavaThread::jni_environment_offset()))); + // Load address bad mask + __ add(tmp, jni_env, tmp); + __ ldr(tmp, Address(tmp)); + + // Check address bad mask + __ tst(robj, tmp); + __ br(Assembler::NE, slowpath); +} + +#ifdef COMPILER1 + +#undef __ +#define __ ce->masm()-> + +void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, + LIR_Opr ref) const { + // TODO: In principle, we could use rheapbase without reloading, but as it is a constant, + // we'd need to handle it differently with ZGC. + __ ldr(rheapbase, address_bad_mask_from_thread(rthread)); + __ tst(ref->as_register(), rheapbase); +} + +void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, + ZLoadBarrierStubC1* stub) const { + // Stub entry + __ bind(*stub->entry()); + + Register ref = stub->ref()->as_register(); + Register ref_addr = noreg; + Register tmp = noreg; + + if (stub->tmp()->is_valid()) { + // Load address into tmp register + ce->leal(stub->ref_addr(), stub->tmp()); + ref_addr = tmp = stub->tmp()->as_pointer_register(); + } else { + // Address already in register + ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); + } + + assert_different_registers(ref, ref_addr, noreg); + + // Save r0 unless it is the result or tmp register + // Set up SP to accomodate parameters and maybe r0.. + if (ref != r0 && tmp != r0) { + __ sub(sp, sp, 32); + __ str(r0, Address(sp, 16)); + } else { + __ sub(sp, sp, 16); + } + + // Setup arguments and call runtime stub + ce->store_parameter(ref_addr, 1); + ce->store_parameter(ref, 0); + + __ far_call(stub->runtime_stub()); + + __ verify_oop(r0, "Bad oop"); + + if (ref != r0) { + __ mov(ref, r0); + } + + // Restore r0 unless it is the result or tmp register + if (ref != r0 && tmp != r0) { + __ ldr(r0, Address(sp, 16)); + __ add(sp, sp, 32); + } else { + __ add(sp, sp, 16); + } + + // Stub exit + __ b(*stub->continuation()); +} + +#undef __ +#define __ sasm-> + +void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, + DecoratorSet decorators) const { + __ prologue("zgc_load_barrier stub", false); + + // We don't use push/pop_clobbered_registers() - we need to pull out the result from r0. + for (int i = 0; i < 32; i +=2) { + __ stpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ pre(sp,-16))); + } + + RegSet saveRegs = RegSet::range(r0,r28) - RegSet::of(r0); + __ push(saveRegs, sp); + + __ load_parameter(0, c_rarg0); + __ load_parameter(1, c_rarg1); + + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + + __ pop(saveRegs, sp); + + for (int i = 30; i >0; i -=2) { + __ ldpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ post(sp, 16))); + } + + __ epilogue(); +} +#endif // COMPILER1 + +#undef __ +#define __ cgen->assembler()-> + +// Generates a register specific stub for calling +// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or +// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(). +// +// The raddr register serves as both input and output for this stub. When the stub is +// called the raddr register contains the object field address (oop*) where the bad oop +// was loaded from, which caused the slow path to be taken. On return from the stub the +// raddr register contains the good/healed oop returned from +// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or +// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(). +static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) { + // Don't generate stub for invalid registers + if (raddr == zr || raddr == r29 || raddr == r30) { + return NULL; + } + // Create stub name + char name[64]; + const bool weak = (decorators & ON_WEAK_OOP_REF) != 0; + os::snprintf(name, sizeof(name), "zgc_load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name()); + + __ align(CodeEntryAlignment); + StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode)); + address start = __ pc(); + + RegSet savedRegs = RegSet::range(r0,r28) - RegSet::of(raddr); + + __ enter(); + __ push(savedRegs, sp); + + // Setup arguments + if (raddr != c_rarg1) { + __ mov(c_rarg1, raddr); + } + + __ ldr(c_rarg0, Address(raddr)); + + // Call barrier function + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1); + + // Move result returned in r0 to raddr, if needed + if (raddr != r0) { + __ mov(raddr, r0); + } + + __ pop(savedRegs, sp); + __ leave(); + __ ret(lr); + + return start; +} + +#undef __ + +static void barrier_stubs_init_inner(const char* label, const DecoratorSet decorators, address* stub) { + const int nregs = 29; // Exclude FP, XZR, SP from calculation. + const int code_size = nregs * 254; // Rough estimate of code size + + ResourceMark rm; + + CodeBuffer buf(BufferBlob::create(label, code_size)); + StubCodeGenerator cgen(&buf); + + for (int i = 0; i < nregs; i++) { + const Register reg = as_Register(i); + stub[i] = generate_load_barrier_stub(&cgen, reg, decorators); + } +} + +void ZBarrierSetAssembler::barrier_stubs_init() { + barrier_stubs_init_inner("zgc_load_barrier_stubs", ON_STRONG_OOP_REF, _load_barrier_slow_stub); + barrier_stubs_init_inner("zgc_load_barrier_weak_stubs", ON_WEAK_OOP_REF, _load_barrier_weak_slow_stub); +} + +address ZBarrierSetAssembler::load_barrier_slow_stub(Register reg) { + return _load_barrier_slow_stub[reg->encoding()]; +} + +address ZBarrierSetAssembler::load_barrier_weak_slow_stub(Register reg) { + return _load_barrier_weak_slow_stub[reg->encoding()]; +} --- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp 2019-03-14 17:07:41.446762484 +0000 @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP +#define CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP + +#ifdef COMPILER1 +class LIR_Assembler; +class LIR_OprDesc; +typedef LIR_OprDesc* LIR_Opr; +class StubAssembler; +class ZLoadBarrierStubC1; +#endif // COMPILER1 + +class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { +private: + address _load_barrier_slow_stub[RegisterImpl::number_of_registers]; + address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers]; + +public: + ZBarrierSetAssembler(); + + virtual void load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Register dst, + Address src, + Register tmp1, + Register tmp_thread); + +#ifdef ASSERT + virtual void store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Address dst, + Register val, + Register tmp1, + Register tmp2); +#endif // ASSERT + + virtual void arraycopy_prologue(MacroAssembler* masm, + DecoratorSet decorators, + bool is_oop, + Register src, + Register dst, + Register count, + RegSet saved_regs); + + virtual void try_resolve_jobject_in_native(MacroAssembler* masm, + Register jni_env, + Register robj, + Register tmp, + Label& slowpath); + +#ifdef COMPILER1 + void generate_c1_load_barrier_test(LIR_Assembler* ce, + LIR_Opr ref) const; + + void generate_c1_load_barrier_stub(LIR_Assembler* ce, + ZLoadBarrierStubC1* stub) const; + + void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, + DecoratorSet decorators) const; +#endif // COMPILER1 + + virtual void barrier_stubs_init(); + + address load_barrier_slow_stub(Register reg); + address load_barrier_weak_slow_stub(Register reg); +}; + +#endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP --- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad 2019-03-14 17:07:42.554774129 +0000 @@ -0,0 +1,113 @@ +// +// Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. +// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +// +// This code is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License version 2 only, as +// published by the Free Software Foundation. +// +// This code is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// version 2 for more details (a copy is included in the LICENSE file that +// accompanied this code). +// +// You should have received a copy of the GNU General Public License version +// 2 along with this work; if not, write to the Free Software Foundation, +// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +// +// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +// or visit www.oracle.com if you need additional information or have any +// questions. +// + +source %{ +#include "gc/z/zBarrierSetAssembler.hpp" +%} + +// +// Execute ZGC load barrier (strong) slow path +// +instruct loadBarrierSlowReg(iRegP dst, memory mem, rFlagsReg cr) %{ + match(Set dst (LoadBarrierSlowReg mem)); + effect(DEF dst, KILL cr); + + format %{"LoadBarrierSlowReg $dst, $mem" %} + ins_encode %{ + #if INCLUDE_ZGC + Register d = $dst$$Register; + + Register base = $mem$$base$$Register; + int index = $mem$$index; + int scale = $mem$$scale; + int disp = $mem$$disp; + + if (index == -1) { + if (disp != 0) { + __ lea(d, Address(base, disp)); + } else { + __ mov(d, base); + } + } else { + Register index_reg = as_Register(index); + if (disp == 0) { + __ lea(d, Address(base, index_reg, Address::lsl(scale))); + } else { + __ lea(d, Address(base, disp)); + __ lea(d, Address(d, index_reg, Address::lsl(scale))); + } + } + + ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); + + __ far_call(RuntimeAddress(bs->load_barrier_slow_stub(d))); +#else + ShouldNotReachHere(); +#endif + %} + ins_pipe(pipe_slow); +%} + +// +// Execute ZGC load barrier (weak) slow path +// +instruct loadBarrierWeakSlowReg(iRegP dst, memory mem, rFlagsReg cr) %{ + match(Set dst (LoadBarrierWeakSlowReg mem)); + + effect(DEF dst, KILL cr); + + format %{"LoadBarrierWeakSlowReg $dst, $mem" %} + ins_encode %{ + #if INCLUDE_ZGC + Register d = $dst$$Register; + + Register base = $mem$$base$$Register; + int index = $mem$$index; + int scale = $mem$$scale; + int disp = $mem$$disp; + + if (index == -1) { + if (disp != 0) { + __ lea(d, Address(base, disp)); + } else { + __ mov(d, base); + } + } else { + Register index_reg = as_Register(index); + if (disp == 0) { + __ lea(d, Address(base, index_reg, Address::lsl(scale))); + } else { + __ lea(d, Address(base, disp)); + __ lea(d, Address(d, index_reg, Address::lsl(scale))); + } + } + + ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); + + __ far_call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d))); +#else + ShouldNotReachHere(); +#endif + %} + ins_pipe(pipe_slow); +%} --- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/os_cpu/linux_aarch64/gc/z/zAddress_linux_aarch64.inline.hpp 2019-03-14 17:07:43.670785858 +0000 @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_CPU_LINUX_AARCH64_GC_Z_ZADDRESS_LINUX_AARCH64_INLINE_HPP +#define OS_CPU_LINUX_AARCH64_GC_Z_ZADDRESS_LINUX_AARCH64_INLINE_HPP + +inline uintptr_t ZAddress::address(uintptr_t value) { + return value | ZAddressSpaceStart; +} + +#endif // OS_CPU_LINUX_AARCH64_GC_Z_ZADDRESS_LINUX_AARCH64_INLINE_HPP --- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/os_cpu/linux_aarch64/gc/z/zArguments_linux_aarch64.cpp 2019-03-14 17:07:44.886798638 +0000 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zArguments.hpp" +#include "runtime/globals.hpp" +#include "runtime/globals_extension.hpp" +#include "utilities/debug.hpp" + +void ZArguments::initialize_platform() { + // On aarch64 we need literal oops to be 64-bit, rather than 48-bit. + FLAG_SET_DEFAULT(Use64BitLiteralAddresses, true); + + // Disable class unloading - we don't support concurrent class unloading yet. + FLAG_SET_DEFAULT(ClassUnloading, false); + FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false); +} --- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/os_cpu/linux_aarch64/gc/z/zGlobals_linux_aarch64.cpp 2019-03-14 17:07:46.062810998 +0000 @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zGlobals.hpp" + +uintptr_t ZAddressReservedStart() { + return ZAddressMetadataMarked0 + ZAddressSpaceStart; +} + +uintptr_t ZAddressReservedEnd() { + return ZAddressMetadataRemapped + ZAddressSpaceEnd; +} --- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/os_cpu/linux_aarch64/gc/z/zGlobals_linux_aarch64.hpp 2019-03-14 17:07:47.146822392 +0000 @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_CPU_LINUX_AARCH64_GC_Z_ZGLOBALS_LINUX_AARCH64_HPP +#define OS_CPU_LINUX_AARCH64_GC_Z_ZGLOBALS_LINUX_AARCH64_HPP + +// +// Page Allocation Tiers +// --------------------- +// +// Page Type Page Size Object Size Limit Object Alignment +// ------------------------------------------------------------------ +// Small 2M <= 256K +// Medium 32M <= 4M 4K +// Large X*M > 4M 2M +// ------------------------------------------------------------------ +// +// +// Address Space & Pointer Layout +// ------------------------------ +// +// +--------------------------------+ 0xFFFFFFFFFFFFFFFF (16EB) +// . . +// . . +// . . +// +--------------------------------+ 0x0000080000000000 (8TB) +// | Heap | +// +--------------------------------+ 0x0000040000000000 (4TB) +// . . +// +--------------------------------+ 0x0000000000000000 +// +// +// * 63-56 - Top Byte Ignored (TBI) +// | | +// | | +// | | +// | | +// |6 6|5 5|5 4 4 4 0 +// |3 0|9 6|5 3 2 1 0 +// +----+----+--------------+-+-----------------------------------------------+ +// |1111|0000|00000000 00000|1|11 11111111 11111111 11111111 11111111 11111111| +// +----+----+--------------+-+-----------------------------------------------+ +// | | | | | +// | | | | * 41-0 Object Offset (42-bits, 4TB address space) +// | | | | +// | | | * 42-42 Address Base (1-bit) +// | | | +// | | * 55-43 Unused (13-bits, always zero) +// | | +// | * 59-56 Fixed (4-bits, always zero) +// * 63-60 Metadata Bits (4-bits, always zero) 0001 = Marked0 +// 0010 = Marked1 +// 0100 = Remapped +// 1000 = Finalizable +// +// SRDM zMarkStackEntry strips off the top two bits, which frustrates our ability +// to use the top 4 bits. However, this means they occupy the same bits as the MTE +// bits. + +const size_t ZPlatformGranuleSizeShift = 21; // 2M + +const size_t ZPlatformAddressOffsetBits = 42; // 4TB + +const uintptr_t ZPlatformAddressMetadataShift = BitsPerWord - 4; // 4 bits allocated for VA masking. + +const uintptr_t ZPlatformAddressSpaceStart = (uintptr_t)1 << ZPlatformAddressOffsetBits; +const uintptr_t ZPlatformAddressSpaceSize = (uintptr_t)1 << ZPlatformAddressOffsetBits; + +const size_t ZPlatformNMethodDisarmedOffset = 0; + +const size_t ZPlatformCacheLineSize = 64; // SRDM This can vary by platform. Get dynamically. + +#endif // OS_CPU_LINUX_AARCH64_GC_Z_ZGLOBALS_LINUX_AARCH64_HPP --- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/os_cpu/linux_aarch64/gc/z/zLargePages_linux_aarch64.cpp 2019-03-14 17:07:48.214833617 +0000 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zLargePages.hpp" +#include "runtime/globals.hpp" + +void ZLargePages::initialize_platform() { + if (UseLargePages) { + if (UseTransparentHugePages) { + _state = Transparent; + } else { + _state = Explicit; + } + } else { + _state = Disabled; + } +} --- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/os_cpu/linux_aarch64/gc/z/zNUMA_linux_aarch64.cpp 2019-03-14 17:07:49.314845179 +0000 @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "gc/z/zErrno.hpp" +#include "gc/z/zCPU.hpp" +#include "gc/z/zNUMA.hpp" +#include "runtime/os.hpp" +#include "utilities/debug.hpp" + +#include +#include + +#ifndef MPOL_F_NODE +#define MPOL_F_NODE (1<<0) // Return next IL mode instead of node mask +#endif + +#ifndef MPOL_F_ADDR +#define MPOL_F_ADDR (1<<1) // Look up VMA using address +#endif + +static int z_get_mempolicy(uint32_t* mode, const unsigned long *nmask, unsigned long maxnode, uintptr_t addr, int flags) { + return syscall(__NR_get_mempolicy, mode, nmask, maxnode, addr, flags); +} + +void ZNUMA::initialize_platform() { + _enabled = UseNUMA; +} + +uint32_t ZNUMA::count() { + if (!_enabled) { + // NUMA support not enabled + return 1; + } + + return os::Linux::numa_max_node() + 1; +} + +uint32_t ZNUMA::id() { + if (!_enabled) { + // NUMA support not enabled + return 0; + } + + return os::Linux::get_node_by_cpu(ZCPU::id()); +} + +uint32_t ZNUMA::memory_id(uintptr_t addr) { + if (!_enabled) { + // NUMA support not enabled, assume everything belongs to node zero + return 0; + } + + uint32_t id = (uint32_t)-1; + + if (z_get_mempolicy(&id, NULL, 0, addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) { + ZErrno err; + fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string()); + } + + assert(id < count(), "Invalid NUMA id"); + + return id; +} --- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/os_cpu/linux_aarch64/gc/z/zPhysicalMemoryBacking_linux_aarch64.cpp 2019-03-14 17:07:50.466857288 +0000 @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zErrno.hpp" +#include "gc/z/zErrno.hpp" +#include "gc/z/zLargePages.inline.hpp" +#include "gc/z/zMemory.hpp" +#include "gc/z/zNUMA.hpp" +#include "gc/z/zPhysicalMemory.inline.hpp" +#include "gc/z/zPhysicalMemoryBacking_linux_aarch64.hpp" +#include "logging/log.hpp" +#include "runtime/os.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" + +#include +#include +#include + +// Support for building on older Linux systems +#ifndef MADV_HUGEPAGE +#define MADV_HUGEPAGE 14 +#endif + +// Proc file entry for max map mount +#define ZFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count" + +ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) { + // Check and warn if max map count is too low + check_max_map_count(max_capacity); +} + +void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity) const { + const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT; + FILE* const file = fopen(filename, "r"); + if (file == NULL) { + // Failed to open file, skip check + log_debug(gc, init)("Failed to open %s", filename); + return; + } + + size_t actual_max_map_count = 0; + const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count); + fclose(file); + if (result != 1) { + // Failed to read file, skip check + log_debug(gc, init)("Failed to read %s", filename); + return; + } + + // The required max map count is impossible to calculate exactly since subsystems + // other than ZGC are also creating memory mappings, and we have no control over that. + // However, ZGC tends to create the most mappings and dominate the total count. + // In the worst cases, We speculate that we need another 20% to allow for + // non-ZGC subsystems to map memory. + const size_t required_max_map_count = (max_capacity / ZGranuleSize) * 1.2; + if (actual_max_map_count < required_max_map_count) { + log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****"); + log_warning(gc, init)("The system limit on number of memory mappings per process might be too low " + "for the given"); + log_warning(gc, init)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at", + max_capacity / M, filename); + log_warning(gc, init)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing " + "execution with the current", required_max_map_count, actual_max_map_count); + log_warning(gc, init)("limit could lead to a fatal error, due to failure to map memory."); + } +} + +size_t ZPhysicalMemoryBacking::try_expand(size_t old_capacity, size_t new_capacity) { + assert(old_capacity < new_capacity, "Invalid old/new capacity"); + return new_capacity; +} + +ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) { + assert(is_aligned(size, ZGranuleSize), "Invalid size"); + return ZPhysicalMemory(size); +} + +void ZPhysicalMemoryBacking::free(ZPhysicalMemory pmem) { + assert(pmem.nsegments() == 1, "Invalid number of segments"); +} + +void ZPhysicalMemoryBacking::map_failed(ZErrno err) const { + if (err == ENOMEM) { + fatal("Failed to map memory. Please check the system limit on number of " + "memory mappings allowed per process (see %s)", ZFILENAME_PROC_MAX_MAP_COUNT); + } else { + fatal("Failed to map memory (%s)", err.to_string()); + } +} + +uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const { + // We only have one heap mapping, so just convert the offset to a heap address + return ZAddress::address(offset); +} + +void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const { + assert(pmem.nsegments() == 1, "Invalid number of segments"); + uintptr_t addr = ZAddress::address(offset); + + const size_t size = pmem.size(); + int flags = MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE; + + if (ZLargePages::is_explicit()) { + flags |= MAP_HUGETLB; + } + + const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, flags, 0, 0); + + if (res == MAP_FAILED) { + ZErrno err; + map_failed(err); + } + + // Advise on use of transparent huge pages before touching it + if (ZLargePages::is_transparent()) { + if (madvise((void*)addr, size, MADV_HUGEPAGE) == -1) { + ZErrno err; + log_error(gc)("Failed to advise use of transparent huge pages (%s)", err.to_string()); + } + } + + // NUMA interleave memory before touching it + ZNUMA::memory_interleave(addr, size); + + if (AlwaysPreTouch) { + const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size(); + os::pretouch_memory((void*)addr, (void*)(addr + size), page_size); + } +} + +void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const { + const size_t size = pmem.size(); + const uintptr_t addr = ZAddress::address(offset); + + const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); + if (res == MAP_FAILED) { + ZErrno err; + map_failed(err); + } +} + +void ZPhysicalMemoryBacking::flip(ZPhysicalMemory pmem, uintptr_t offset) const { + // Does nothing when using VA-masking +} --- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/os_cpu/linux_aarch64/gc/z/zPhysicalMemoryBacking_linux_aarch64.hpp 2019-03-14 17:07:51.554868724 +0000 @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_CPU_LINUX_AARCH64_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_AARCH64_HPP +#define OS_CPU_LINUX_AARCH64_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_AARCH64_HPP + +#include "gc/z/zMemory.hpp" + +class ZErrno; +class ZPhysicalMemory; + +class ZPhysicalMemoryBacking { +private: + void check_max_map_count(size_t max_capacity) const; + void map_failed(ZErrno err) const; +public: + ZPhysicalMemoryBacking(size_t max_capacity); + + bool is_initialized() const { return true; } + + size_t try_expand(size_t old_capacity, size_t new_capacity); + + ZPhysicalMemory alloc(size_t size); + void free(ZPhysicalMemory pmem); + + uintptr_t nmt_address(uintptr_t offset) const; + + void map(ZPhysicalMemory pmem, uintptr_t offset) const; + void unmap(ZPhysicalMemory pmem, uintptr_t offset) const; + void flip(ZPhysicalMemory pmem, uintptr_t offset) const; +}; + +#endif // OS_CPU_LINUX_AARCH64_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_AARCH64_HPP --- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/os_cpu/linux_aarch64/gc/z/zVirtualMemory_linux_aarch64.cpp 2019-03-14 17:07:52.630880035 +0000 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zVirtualMemory.hpp" +#include "logging/log.hpp" + +#include +#include + +bool ZVirtualMemoryManager::reserve(uintptr_t start, size_t size) { + // Reserve address space + const uintptr_t actual_start = (uintptr_t)mmap((void*)start, size, PROT_NONE, + MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); + if (actual_start != start) { + log_error(gc)("Failed to reserve address space for Java heap"); + return false; + } + + return true; +}