1 /*
   2  * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "asm/macroAssembler.inline.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "gc/z/zBarrier.inline.hpp"
  28 #include "gc/z/zBarrierSet.hpp"
  29 #include "gc/z/zBarrierSetAssembler.hpp"
  30 #include "gc/z/zBarrierSetRuntime.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #ifdef COMPILER1
  33 #include "c1/c1_LIRAssembler.hpp"
  34 #include "c1/c1_MacroAssembler.hpp"
  35 #include "gc/z/c1/zBarrierSetC1.hpp"
  36 #endif // COMPILER1
  37 
  38 #include "gc/z/zThreadLocalData.hpp"
  39 
  40 ZBarrierSetAssembler::ZBarrierSetAssembler() :
  41     _load_barrier_slow_stub(),
  42     _load_barrier_weak_slow_stub() {}
  43 
  44 #ifdef PRODUCT
  45 #define BLOCK_COMMENT(str) /* nothing */
  46 #else
  47 #define BLOCK_COMMENT(str) __ block_comment(str)
  48 #endif
  49 
  50 #undef __
  51 #define __ masm->
  52 
  53 void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
  54                                    DecoratorSet decorators,
  55                                    BasicType type,
  56                                    Register dst,
  57                                    Address src,
  58                                    Register tmp1,
  59                                    Register tmp_thread) {
  60   if (!ZBarrierSet::barrier_needed(decorators, type)) {
  61     // Barrier not needed
  62     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
  63     return;
  64   }
  65 
  66   // rscratch1 can be passed as src or dst, so don't use it.
  67   RegSet savedRegs = RegSet::of(rscratch2, rheapbase);
  68 
  69   Label done;
  70   assert_different_registers(rheapbase, rscratch2, dst);
  71   assert_different_registers(rheapbase, rscratch2, src.base());
  72 
  73   __ push(savedRegs, sp);
  74 
  75   // Load bad mask into scratch register.
  76   __ ldr(rheapbase, address_bad_mask_from_thread(rthread));
  77   __ lea(rscratch2, src);
  78   __ ldr(dst, src);
  79 
  80   // Test reference against bad mask. If mask bad, then we need to fix it up.
  81   __ tst(dst, rheapbase);
  82   __ br(Assembler::EQ, done);
  83 
  84   __ enter();
  85 
  86   __ push(RegSet::range(r0,r28) - RegSet::of(dst), sp);
  87 
  88   // call_VM_leaf uses rscratch1.
  89   __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, rscratch2);
  90 
  91   // Make sure dst has the return value.
  92   if (dst != r0) {
  93     __ mov(dst, r0);
  94   }
  95 
  96   __ pop(RegSet::range(r0,r28) - RegSet::of(dst), sp);
  97   __ leave();
  98 
  99 
 100   __ bind(done);
 101 
 102   // Restore tmps
 103   __ pop(savedRegs, sp);
 104 }
 105 
 106 #ifdef ASSERT
 107 
 108 void ZBarrierSetAssembler::store_at(MacroAssembler* masm,
 109                                         DecoratorSet decorators,
 110                                         BasicType type,
 111                                         Address dst,
 112                                         Register val,
 113                                         Register tmp1,
 114                                         Register tmp2) {
 115   // Verify value
 116   if (type == T_OBJECT || type == T_ARRAY) {
 117     // Note that src could be noreg, which means we
 118     // are storing null and can skip verification.
 119     if (val != noreg) {
 120       Label done;
 121 
 122       // tmp1 and tmp2 are often set to noreg.
 123       RegSet savedRegs = RegSet::of(rscratch1);
 124       __ push(savedRegs, sp);
 125 
 126       __ ldr(tmp1, address_bad_mask_from_thread(rthread));
 127       __ tst(val, tmp1);
 128       __ br(Assembler::EQ, done);
 129       __ stop("Verify oop store failed");
 130       __ should_not_reach_here();
 131       __ bind(done);
 132       __ pop(savedRegs, sp);
 133     }
 134   }
 135 
 136   // Store value
 137   BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
 138 }
 139 
 140 #endif // ASSERT
 141 
 142 void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
 143                                               DecoratorSet decorators,
 144                                               bool is_oop,
 145                                               Register src,
 146                                               Register dst,
 147                                               Register count,
 148                                               RegSet saved_regs) {
 149   if (!is_oop) {
 150     // Barrier not needed
 151     return;
 152   }
 153 
 154   BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {");
 155 
 156   assert_different_registers(src, count, rscratch1);
 157 
 158   __ pusha();
 159 
 160   if (count == c_rarg0) {
 161     if (src == c_rarg1) {
 162       // exactly backwards!!
 163       __ mov(rscratch1, c_rarg0);
 164       __ mov(c_rarg0, c_rarg1);
 165       __ mov(c_rarg1, rscratch1);
 166     } else {
 167       __ mov(c_rarg1, count);
 168       __ mov(c_rarg0, src);
 169     }
 170   } else {
 171     __ mov(c_rarg0, src);
 172     __ mov(c_rarg1, count);
 173   }
 174 
 175   __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2);
 176 
 177   __ popa();
 178   BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue");
 179 }
 180 
 181 void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
 182                                                          Register jni_env,
 183                                                          Register robj,
 184                                                          Register tmp,
 185                                                          Label& slowpath) {
 186   BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {");
 187 
 188   assert_different_registers(jni_env, robj, tmp);
 189 
 190   // Resolve jobject
 191   BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath);
 192 
 193   // The Address offset is too large to direct load - -784. Our range is +127, -128.
 194   __ mov(tmp, (long int)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) -
 195       in_bytes(JavaThread::jni_environment_offset())));
 196   // Load address bad mask
 197   __ add(tmp, jni_env, tmp);
 198   __ ldr(tmp, Address(tmp));
 199 
 200   // Check address bad mask
 201   __ tst(robj, tmp);
 202   __ br(Assembler::NE, slowpath);
 203 
 204   BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
 205 }
 206 
 207 #ifdef COMPILER1
 208 
 209 #undef __
 210 #define __ ce->masm()->
 211 
 212 void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
 213                                                          LIR_Opr ref) const {
 214   assert_different_registers(rheapbase, rthread, ref->as_register());
 215 
 216   __ ldr(rheapbase, address_bad_mask_from_thread(rthread));
 217   __ tst(ref->as_register(), rheapbase);
 218 }
 219 
 220 void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
 221                                                          ZLoadBarrierStubC1* stub) const {
 222   // Stub entry
 223   __ bind(*stub->entry());
 224 
 225   Register ref = stub->ref()->as_register();
 226   Register ref_addr = noreg;
 227   Register tmp = noreg;
 228 
 229   if (stub->tmp()->is_valid()) {
 230     // Load address into tmp register
 231     ce->leal(stub->ref_addr(), stub->tmp());
 232     ref_addr = tmp = stub->tmp()->as_pointer_register();
 233   } else {
 234     // Address already in register
 235     ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
 236   }
 237 
 238   assert_different_registers(ref, ref_addr, noreg);
 239 
 240   // Save r0 unless it is the result or tmp register
 241   // Set up SP to accomodate parameters and maybe r0..
 242   if (ref != r0 && tmp != r0) {
 243     __ sub(sp, sp, 32);
 244     __ str(r0, Address(sp, 16));
 245   } else {
 246     __ sub(sp, sp, 16);
 247   }
 248 
 249   // Setup arguments and call runtime stub
 250   ce->store_parameter(ref_addr, 1);
 251   ce->store_parameter(ref, 0);
 252 
 253   __ far_call(stub->runtime_stub());
 254 
 255   // Verify result
 256   __ verify_oop(r0, "Bad oop");
 257 
 258   // Move result into place
 259   if (ref != r0) {
 260     __ mov(ref, r0);
 261   }
 262 
 263   // Restore r0 unless it is the result or tmp register
 264   if (ref != r0 && tmp != r0) {
 265     __ ldr(r0, Address(sp, 16));
 266     __ add(sp, sp, 32);
 267   } else {
 268     __ add(sp, sp, 16);
 269   }
 270 
 271   // Stub exit
 272   __ b(*stub->continuation());
 273 }
 274 
 275 #undef __
 276 #define __ sasm->
 277 
 278 void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
 279                                                                  DecoratorSet decorators) const {
 280   __ prologue("zgc_load_barrier stub", false);
 281 
 282   // We don't use push/pop_clobbered_registers() - we need to pull out the result from r0.
 283   for (int i = 0; i < 32; i +=2) {
 284     __ stpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ pre(sp,-16)));
 285   }
 286 
 287   RegSet saveRegs = RegSet::range(r0,r28) - RegSet::of(r0);
 288   __ push(saveRegs, sp);
 289 
 290   // Setup arguments
 291   __ load_parameter(0, c_rarg0);
 292   __ load_parameter(1, c_rarg1);
 293 
 294   __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
 295 
 296   __ pop(saveRegs, sp);
 297 
 298   for (int i = 30; i >0; i -=2) {
 299       __ ldpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ post(sp, 16)));
 300     }
 301 
 302   __ epilogue();
 303 }
 304 #endif // COMPILER1
 305 
 306 #undef __
 307 #define __ cgen->assembler()->
 308 
 309 // Generates a register specific stub for calling
 310 // ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
 311 // ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
 312 //
 313 // The raddr register serves as both input and output for this stub. When the stub is
 314 // called the raddr register contains the object field address (oop*) where the bad oop
 315 // was loaded from, which caused the slow path to be taken. On return from the stub the
 316 // raddr register contains the good/healed oop returned from
 317 // ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
 318 // ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
 319 static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
 320   // Don't generate stub for invalid registers
 321   if (raddr == zr || raddr == r29 || raddr == r30) {
 322     return NULL;
 323   }
 324 
 325   // Create stub name
 326   char name[64];
 327   const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
 328   os::snprintf(name, sizeof(name), "zgc_load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
 329 
 330   __ align(CodeEntryAlignment);
 331   StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
 332   address start = __ pc();
 333 
 334   // Save live registers
 335   RegSet savedRegs = RegSet::range(r0,r28) - RegSet::of(raddr);
 336 
 337   __ enter();
 338   __ push(savedRegs, sp);
 339 
 340   // Setup arguments
 341   if (raddr != c_rarg1) {
 342     __ mov(c_rarg1, raddr);
 343   }
 344 
 345   __ ldr(c_rarg0, Address(raddr));
 346 
 347   // Call barrier function
 348   __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
 349 
 350   // Move result returned in r0 to raddr, if needed
 351   if (raddr != r0) {
 352     __ mov(raddr, r0);
 353   }
 354 
 355   __ pop(savedRegs, sp);
 356   __ leave();
 357   __ ret(lr);
 358 
 359   return start;
 360 }
 361 
 362 #undef __
 363 
 364 static void barrier_stubs_init_inner(const char* label, const DecoratorSet decorators, address* stub) {
 365   const int nregs = 28;              // Exclude FP, XZR, SP from calculation.
 366   const int code_size = nregs * 254; // Rough estimate of code size
 367 
 368   ResourceMark rm;
 369 
 370   CodeBuffer buf(BufferBlob::create(label, code_size));
 371   StubCodeGenerator cgen(&buf);
 372 
 373   for (int i = 0; i < nregs; i++) {
 374     const Register reg = as_Register(i);
 375     stub[i] = generate_load_barrier_stub(&cgen, reg, decorators);
 376   }
 377 }
 378 
 379 void ZBarrierSetAssembler::barrier_stubs_init() {
 380   barrier_stubs_init_inner("zgc_load_barrier_stubs", ON_STRONG_OOP_REF, _load_barrier_slow_stub);
 381   barrier_stubs_init_inner("zgc_load_barrier_weak_stubs", ON_WEAK_OOP_REF, _load_barrier_weak_slow_stub);
 382 }
 383 
 384 address ZBarrierSetAssembler::load_barrier_slow_stub(Register reg) {
 385   return _load_barrier_slow_stub[reg->encoding()];
 386 }
 387 
 388 address ZBarrierSetAssembler::load_barrier_weak_slow_stub(Register reg) {
 389   return _load_barrier_weak_slow_stub[reg->encoding()];
 390 }