// // Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it // under the terms of the GNU General Public License version 2 only, as // published by the Free Software Foundation. // // This code is distributed in the hope that it will be useful, but WITHOUT // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License // version 2 for more details (a copy is included in the LICENSE file that // accompanied this code). // // You should have received a copy of the GNU General Public License version // 2 along with this work; if not, write to the Free Software Foundation, // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. // // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA // or visit www.oracle.com if you need additional information or have any // questions. // source_hpp %{ #include "gc/z/c2/zBarrierSetC2.hpp" %} source %{ #include "gc/z/zBarrierSetAssembler.hpp" static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Register base, int index, int scale, int disp, bool weak) { const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst) : ZBarrierSet::assembler()->load_barrier_slow_stub(dst); if (index == -1) { if (disp != 0) { __ lea(dst, Address(base, disp)); } else { __ mov(dst, base); } } else { Register index_reg = as_Register(index); if (disp == 0) { __ lea(dst, Address(base, index_reg, Address::lsl(scale))); } else { __ lea(dst, Address(base, disp)); __ lea(dst, Address(dst, index_reg, Address::lsl(scale))); } } __ far_call(RuntimeAddress(stub)); } %} // // Execute ZGC load barrier (strong) slow path // instruct loadBarrierSlowReg(iRegP dst, memory mem, rFlagsReg cr) %{ match(Set dst (LoadBarrierSlowReg mem)); predicate(!n->as_LoadBarrierSlowReg()->is_weak()); effect(DEF dst, KILL cr); format %{"LoadBarrierSlowReg $dst, $mem" %} ins_encode %{ z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register, $mem$$index, $mem$$scale, $mem$$disp, false); %} ins_pipe(pipe_slow); %} // // Execute ZGC load barrier (weak) slow path // instruct loadBarrierWeakSlowReg(iRegP dst, memory mem, rFlagsReg cr) %{ match(Set dst (LoadBarrierSlowReg mem)); predicate(n->as_LoadBarrierSlowReg()->is_weak()); effect(DEF dst, KILL cr); format %{"LoadBarrierWeakSlowReg $dst, $mem" %} ins_encode %{ z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register, $mem$$index, $mem$$scale, $mem$$disp, true); %} ins_pipe(pipe_slow); %} // Specialized versions of compareAndExchangeP that adds a keepalive that is consumed // but doesn't affect output. instruct z_compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegP keepalive, rFlagsReg cr) %{ match(Set res (ZCompareAndExchangeP (Binary mem keepalive) (Binary oldval newval))); ins_cost(2 * VOLATILE_REF_COST); effect(TEMP_DEF res, KILL cr); format %{ "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval" %} ins_encode %{ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, /*acquire*/ false, /*release*/ true, /*weak*/ false, $res$$Register); %} ins_pipe(pipe_slow); %} instruct z_compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegP keepalive, rFlagsReg cr) %{ match(Set res (ZCompareAndSwapP (Binary mem keepalive) (Binary oldval newval))); match(Set res (ZWeakCompareAndSwapP (Binary mem keepalive) (Binary oldval newval))); ins_cost(2 * VOLATILE_REF_COST); effect(KILL cr); format %{ "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval" "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" %} ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval), aarch64_enc_cset_eq(res)); ins_pipe(pipe_slow); %} instruct z_get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev, iRegP keepalive) %{ match(Set prev (ZGetAndSetP mem (Binary newv keepalive))); ins_cost(2 * VOLATILE_REF_COST); format %{ "atomic_xchg $prev, $newv, [$mem]" %} ins_encode %{ __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base)); %} ins_pipe(pipe_serial); %}