1 // 2 // Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 24 source_hpp %{ 25 26 #include "gc/z/c2/zBarrierSetC2.hpp" 27 28 %} 29 30 source %{ 31 32 #include "gc/z/zBarrierSetAssembler.hpp" 33 34 static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Address src, bool weak) { 35 assert(dst != rsp, "Invalid register"); 36 assert(dst != r15, "Invalid register"); 37 38 const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst) 39 : ZBarrierSet::assembler()->load_barrier_slow_stub(dst); 40 __ lea(dst, src); 41 __ call(RuntimeAddress(stub)); 42 } 43 44 %} 45 46 // For XMM and YMM enabled processors 47 instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr, 48 rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3, 49 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, 50 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, 51 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{ 52 match(Set dst (LoadBarrierSlowReg src dst)); 53 predicate(UseAVX <= 2 && !n->as_LoadBarrierSlowReg()->is_weak()); 54 55 effect(KILL cr, 56 KILL x0, KILL x1, KILL x2, KILL x3, 57 KILL x4, KILL x5, KILL x6, KILL x7, 58 KILL x8, KILL x9, KILL x10, KILL x11, 59 KILL x12, KILL x13, KILL x14, KILL x15); 60 61 format %{ "lea $dst, $src\n\t" 62 "call #ZLoadBarrierSlowPath" %} 63 64 ins_encode %{ 65 z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */); 66 %} 67 ins_pipe(pipe_slow); 68 %} 69 70 // For ZMM enabled processors 71 instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr, 72 rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3, 73 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, 74 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, 75 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15, 76 rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19, 77 rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23, 78 rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27, 79 rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{ 80 81 match(Set dst (LoadBarrierSlowReg src dst)); 82 predicate(UseAVX == 3 && !n->as_LoadBarrierSlowReg()->is_weak()); 83 84 effect(KILL cr, 85 KILL x0, KILL x1, KILL x2, KILL x3, 86 KILL x4, KILL x5, KILL x6, KILL x7, 87 KILL x8, KILL x9, KILL x10, KILL x11, 88 KILL x12, KILL x13, KILL x14, KILL x15, 89 KILL x16, KILL x17, KILL x18, KILL x19, 90 KILL x20, KILL x21, KILL x22, KILL x23, 91 KILL x24, KILL x25, KILL x26, KILL x27, 92 KILL x28, KILL x29, KILL x30, KILL x31); 93 94 format %{ "lea $dst, $src\n\t" 95 "call #ZLoadBarrierSlowPath" %} 96 97 ins_encode %{ 98 z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */); 99 %} 100 ins_pipe(pipe_slow); 101 %} 102 103 // For XMM and YMM enabled processors 104 instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr, 105 rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3, 106 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, 107 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, 108 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{ 109 match(Set dst (LoadBarrierSlowReg src dst)); 110 predicate(UseAVX <= 2 && n->as_LoadBarrierSlowReg()->is_weak()); 111 112 effect(KILL cr, 113 KILL x0, KILL x1, KILL x2, KILL x3, 114 KILL x4, KILL x5, KILL x6, KILL x7, 115 KILL x8, KILL x9, KILL x10, KILL x11, 116 KILL x12, KILL x13, KILL x14, KILL x15); 117 118 format %{ "lea $dst, $src\n\t" 119 "call #ZLoadBarrierSlowPath" %} 120 121 ins_encode %{ 122 z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */); 123 %} 124 ins_pipe(pipe_slow); 125 %} 126 127 // For ZMM enabled processors 128 instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr, 129 rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3, 130 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, 131 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, 132 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15, 133 rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19, 134 rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23, 135 rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27, 136 rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{ 137 138 match(Set dst (LoadBarrierSlowReg src dst)); 139 predicate(UseAVX == 3 && n->as_LoadBarrierSlowReg()->is_weak()); 140 141 effect(KILL cr, 142 KILL x0, KILL x1, KILL x2, KILL x3, 143 KILL x4, KILL x5, KILL x6, KILL x7, 144 KILL x8, KILL x9, KILL x10, KILL x11, 145 KILL x12, KILL x13, KILL x14, KILL x15, 146 KILL x16, KILL x17, KILL x18, KILL x19, 147 KILL x20, KILL x21, KILL x22, KILL x23, 148 KILL x24, KILL x25, KILL x26, KILL x27, 149 KILL x28, KILL x29, KILL x30, KILL x31); 150 151 format %{ "lea $dst, $src\n\t" 152 "call #ZLoadBarrierSlowPath" %} 153 154 ins_encode %{ 155 z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */); 156 %} 157 ins_pipe(pipe_slow); 158 %} 159 160 // Specialized versions of compareAndExchangeP that adds a keepalive that is consumed 161 // but doesn't affect output. 162 163 instruct z_compareAndExchangeP( 164 memory mem_ptr, 165 rax_RegP oldval, rRegP newval, rRegP keepalive, 166 rFlagsReg cr) %{ 167 predicate(VM_Version::supports_cx8()); 168 match(Set oldval (ZCompareAndExchangeP (Binary mem_ptr keepalive) (Binary oldval newval))); 169 effect(KILL cr); 170 171 format %{ "cmpxchgq $mem_ptr,$newval\t# " 172 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %} 173 opcode(0x0F, 0xB1); 174 ins_encode(lock_prefix, 175 REX_reg_mem_wide(newval, mem_ptr), 176 OpcP, OpcS, 177 reg_mem(newval, mem_ptr) // lock cmpxchg 178 ); 179 ins_pipe( pipe_cmpxchg ); 180 %} 181 182 instruct z_compareAndSwapP(rRegI res, 183 memory mem_ptr, 184 rax_RegP oldval, rRegP newval, rRegP keepalive, 185 rFlagsReg cr) %{ 186 predicate(VM_Version::supports_cx8()); 187 match(Set res (ZCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval))); 188 match(Set res (ZWeakCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval))); 189 effect(KILL cr, KILL oldval); 190 191 format %{ "cmpxchgq $mem_ptr,$newval\t# " 192 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" 193 "sete $res\n\t" 194 "movzbl $res, $res" %} 195 opcode(0x0F, 0xB1); 196 ins_encode(lock_prefix, 197 REX_reg_mem_wide(newval, mem_ptr), 198 OpcP, OpcS, 199 reg_mem(newval, mem_ptr), 200 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete 201 REX_reg_breg(res, res), // movzbl 202 Opcode(0xF), Opcode(0xB6), reg_reg(res, res)); 203 ins_pipe( pipe_cmpxchg ); 204 %} 205 206 instruct z_xchgP( memory mem, rRegP newval, rRegP keepalive) %{ 207 match(Set newval (ZGetAndSetP mem (Binary newval keepalive))); 208 format %{ "XCHGQ $newval,[$mem]" %} 209 ins_encode %{ 210 __ xchgq($newval$$Register, $mem$$Address); 211 %} 212 ins_pipe( pipe_cmpxchg ); 213 %}