1 //
   2 // Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
   3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 // or visit www.oracle.com if you need additional information or have any
  21 // questions.
  22 //
  23 
  24 source_hpp %{
  25 
  26 #include "gc/z/c2/zBarrierSetC2.hpp"
  27 
  28 %}
  29 
  30 source %{
  31 
  32 #include "gc/z/zBarrierSetAssembler.hpp"
  33 
  34 static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, 
  35                                     Register base, int index, int scale, 
  36                                     int disp, bool weak) {
  37   const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
  38                             : ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
  39 
  40   if (index == -1) {
  41     if (disp != 0) {
  42       __ lea(dst, Address(base, disp));
  43     } else {
  44        __ mov(dst, base);
  45     }
  46   } else {
  47     Register index_reg = as_Register(index);
  48     if (disp == 0) {
  49       __ lea(dst, Address(base, index_reg, Address::lsl(scale)));
  50     } else {
  51       __ lea(dst, Address(base, disp));
  52       __ lea(dst, Address(dst, index_reg, Address::lsl(scale)));
  53     }
  54   }
  55 
  56   __ far_call(RuntimeAddress(stub));
  57 }
  58 
  59 %}
  60 
  61 //
  62 // Execute ZGC load barrier (strong) slow path
  63 //
  64 instruct loadBarrierSlowReg(iRegP dst, memory mem, rFlagsReg cr) %{
  65   match(Set dst (LoadBarrierSlowReg mem));
  66   predicate(!n->as_LoadBarrierSlowReg()->is_weak());
  67 
  68   effect(DEF dst, KILL cr);
  69 
  70   format %{"LoadBarrierSlowReg $dst, $mem" %}
  71   ins_encode %{
  72     z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register,
  73                             $mem$$index, $mem$$scale, $mem$$disp, false);
  74   %}
  75   ins_pipe(pipe_slow);
  76 %}
  77 
  78 //
  79 // Execute ZGC load barrier (weak) slow path
  80 //
  81 instruct loadBarrierWeakSlowReg(iRegP dst, memory mem, rFlagsReg cr) %{
  82   match(Set dst (LoadBarrierSlowReg mem));
  83   predicate(n->as_LoadBarrierSlowReg()->is_weak());
  84 
  85   effect(DEF dst, KILL cr);
  86 
  87   format %{"LoadBarrierWeakSlowReg $dst, $mem" %}
  88   ins_encode %{
  89     z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register,
  90                             $mem$$index, $mem$$scale, $mem$$disp, true);
  91   %}
  92   ins_pipe(pipe_slow);
  93 %}
  94 
  95 
  96 // Specialized versions of compareAndExchangeP that adds a keepalive that is consumed
  97 // but doesn't affect output.
  98 
  99 instruct z_compareAndExchangeP(iRegPNoSp res, indirect mem,
 100                                iRegP oldval, iRegP newval, iRegP keepalive,
 101                                rFlagsReg cr) %{
 102   match(Set res (ZCompareAndExchangeP (Binary mem keepalive) (Binary oldval newval)));
 103   ins_cost(2 * VOLATILE_REF_COST);
 104   effect(TEMP_DEF res, KILL cr);
 105   format %{
 106     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 107   %}
 108   ins_encode %{
 109     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 110                Assembler::xword, /*acquire*/ false, /*release*/ true,
 111                /*weak*/ false, $res$$Register);
 112   %}
 113   ins_pipe(pipe_slow);
 114 %}
 115 
 116 instruct z_compareAndSwapP(iRegINoSp res,
 117                            indirect mem,
 118                            iRegP oldval, iRegP newval, iRegP keepalive,
 119                             rFlagsReg cr) %{
 120 
 121   match(Set res (ZCompareAndSwapP (Binary mem keepalive) (Binary oldval newval)));
 122   match(Set res (ZWeakCompareAndSwapP (Binary mem keepalive) (Binary oldval newval)));
 123 
 124   ins_cost(2 * VOLATILE_REF_COST);
 125 
 126   effect(KILL cr);
 127 
 128  format %{
 129     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 130     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 131  %}
 132 
 133  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 134             aarch64_enc_cset_eq(res));
 135 
 136   ins_pipe(pipe_slow);
 137 %}
 138 
 139 
 140 instruct z_get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev,
 141                         iRegP keepalive) %{
 142   match(Set prev (ZGetAndSetP mem (Binary newv keepalive)));
 143 
 144   ins_cost(2 * VOLATILE_REF_COST);
 145   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 146   ins_encode %{
 147     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 148   %}
 149   ins_pipe(pipe_serial);
 150 %}