1 /* 2 * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shenandoah/brooksPointer.hpp" 26 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 27 #include "gc/shenandoah/shenandoahHeap.hpp" 28 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 29 #include "gc/shenandoah/shenandoahHeuristics.hpp" 30 #include "gc/shenandoah/shenandoahRuntime.hpp" 31 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interp_masm.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "runtime/thread.hpp" 36 #ifdef COMPILER1 37 #include "c1/c1_LIRAssembler.hpp" 38 #include "c1/c1_MacroAssembler.hpp" 39 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" 40 #endif 41 42 #define __ masm-> 43 44 address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL; 45 address ShenandoahBarrierSetAssembler::_shenandoah_wb_C = NULL; 46 47 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, 48 Register addr, Register count, RegSet saved_regs) { 49 if (is_oop) { 50 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; 51 if (!dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) { 52 __ push(saved_regs, sp); 53 if (count == c_rarg0) { 54 if (addr == c_rarg1) { 55 // exactly backwards!! 56 __ mov(rscratch1, c_rarg0); 57 __ mov(c_rarg0, c_rarg1); 58 __ mov(c_rarg1, rscratch1); 59 } else { 60 __ mov(c_rarg1, count); 61 __ mov(c_rarg0, addr); 62 } 63 } else { 64 __ mov(c_rarg0, addr); 65 __ mov(c_rarg1, count); 66 } 67 if (UseCompressedOops) { 68 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2); 69 } else { 70 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2); 71 } 72 __ pop(saved_regs, sp); 73 } 74 } 75 } 76 77 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, 78 Register start, Register end, Register scratch, RegSet saved_regs) { 79 if (is_oop) { 80 __ push(saved_regs, sp); 81 // must compute element count unless barrier set interface is changed (other platforms supply count) 82 assert_different_registers(start, end, scratch); 83 __ lea(scratch, Address(end, BytesPerHeapOop)); 84 __ sub(scratch, scratch, start); // subtract start to get #bytes 85 __ lsr(scratch, scratch, LogBytesPerHeapOop); // convert to element count 86 __ mov(c_rarg0, start); 87 __ mov(c_rarg1, scratch); 88 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2); 89 __ pop(saved_regs, sp); 90 } 91 } 92 93 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, 94 Register obj, 95 Register pre_val, 96 Register thread, 97 Register tmp, 98 bool tosca_live, 99 bool expand_call) { 100 if (ShenandoahSATBBarrier) { 101 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); 102 } 103 } 104 105 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm, 106 Register obj, 107 Register pre_val, 108 Register thread, 109 Register tmp, 110 bool tosca_live, 111 bool expand_call) { 112 // If expand_call is true then we expand the call_VM_leaf macro 113 // directly to skip generating the check by 114 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 115 116 assert(thread == rthread, "must be"); 117 118 Label done; 119 Label runtime; 120 121 assert_different_registers(obj, pre_val, tmp, rscratch1); 122 assert(pre_val != noreg && tmp != noreg, "expecting a register"); 123 124 Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset())); 125 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 126 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 127 128 // Is marking active? 129 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 130 __ ldrw(tmp, in_progress); 131 } else { 132 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 133 __ ldrb(tmp, in_progress); 134 } 135 __ cbzw(tmp, done); 136 137 // Do we need to load the previous value? 138 if (obj != noreg) { 139 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); 140 } 141 142 // Is the previous value null? 143 __ cbz(pre_val, done); 144 145 // Can we store original value in the thread's buffer? 146 // Is index == 0? 147 // (The index field is typed as size_t.) 148 149 __ ldr(tmp, index); // tmp := *index_adr 150 __ cbz(tmp, runtime); // tmp == 0? 151 // If yes, goto runtime 152 153 __ sub(tmp, tmp, wordSize); // tmp := tmp - wordSize 154 __ str(tmp, index); // *index_adr := tmp 155 __ ldr(rscratch1, buffer); 156 __ add(tmp, tmp, rscratch1); // tmp := tmp + *buffer_adr 157 158 // Record the previous value 159 __ str(pre_val, Address(tmp, 0)); 160 __ b(done); 161 162 __ bind(runtime); 163 // save the live input values 164 RegSet saved = RegSet::of(pre_val); 165 if (tosca_live) saved += RegSet::of(r0); 166 if (obj != noreg) saved += RegSet::of(obj); 167 168 __ push(saved, sp); 169 170 // Calling the runtime using the regular call_VM_leaf mechanism generates 171 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 172 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL. 173 // 174 // If we care generating the pre-barrier without a frame (e.g. in the 175 // intrinsified Reference.get() routine) then ebp might be pointing to 176 // the caller frame and so this check will most likely fail at runtime. 177 // 178 // Expanding the call directly bypasses the generation of the check. 179 // So when we do not have have a full interpreter frame on the stack 180 // expand_call should be passed true. 181 182 if (expand_call) { 183 assert(pre_val != c_rarg1, "smashed arg"); 184 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); 185 } else { 186 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); 187 } 188 189 __ pop(saved, sp); 190 191 __ bind(done); 192 } 193 194 void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) { 195 if (ShenandoahReadBarrier) { 196 read_barrier_impl(masm, dst); 197 } 198 } 199 200 void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) { 201 assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled"); 202 Label is_null; 203 __ cbz(dst, is_null); 204 read_barrier_not_null_impl(masm, dst); 205 __ bind(is_null); 206 } 207 208 void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) { 209 if (ShenandoahReadBarrier) { 210 read_barrier_not_null_impl(masm, dst); 211 } 212 } 213 214 215 void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) { 216 assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled"); 217 __ ldr(dst, Address(dst, BrooksPointer::byte_offset())); 218 } 219 220 void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) { 221 if (ShenandoahWriteBarrier) { 222 write_barrier_impl(masm, dst); 223 } 224 } 225 226 void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) { 227 assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled"); 228 assert(dst != rscratch1, "need rscratch1"); 229 assert(dst != rscratch2, "need rscratch2"); 230 231 Label done; 232 233 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 234 __ ldrb(rscratch1, gc_state); 235 236 // Check for heap stability 237 __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); 238 __ tst(rscratch1, rscratch2); 239 __ br(Assembler::EQ, done); 240 241 // Heap is unstable, need to perform the read-barrier even if WB is inactive 242 if (ShenandoahWriteBarrierRB) { 243 __ ldr(dst, Address(dst, BrooksPointer::byte_offset())); 244 } 245 246 // Check for evacuation-in-progress and jump to WB slow-path if needed 247 __ mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); 248 __ tst(rscratch1, rscratch2); 249 __ br(Assembler::EQ, done); 250 251 RegSet to_save = RegSet::of(r0); 252 if (dst != r0) { 253 __ push(to_save, sp); 254 __ mov(r0, dst); 255 } 256 257 __ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb()))); 258 259 if (dst != r0) { 260 __ mov(dst, r0); 261 __ pop(to_save, sp); 262 } 263 264 __ bind(done); 265 } 266 267 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) { 268 if (ShenandoahStoreValEnqueueBarrier) { 269 Label is_null; 270 __ cbz(dst, is_null); 271 write_barrier_impl(masm, dst); 272 __ bind(is_null); 273 // Save possibly live regs. 274 RegSet live_regs = RegSet::range(r0, r4) - dst; 275 __ push(live_regs, sp); 276 __ strd(v0, __ pre(sp, 2 * -wordSize)); 277 278 satb_write_barrier_pre(masm, noreg, dst, rthread, tmp, true, false); 279 280 // Restore possibly live regs. 281 __ ldrd(v0, __ post(sp, 2 * wordSize)); 282 __ pop(live_regs, sp); 283 } 284 if (ShenandoahStoreValReadBarrier) { 285 read_barrier_impl(masm, dst); 286 } 287 } 288 289 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 290 Register dst, Address src, Register tmp1, Register tmp_thread) { 291 bool on_oop = type == T_OBJECT || type == T_ARRAY; 292 bool in_heap = (decorators & IN_HEAP) != 0; 293 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 294 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; 295 bool on_reference = on_weak || on_phantom; 296 297 if (in_heap) { 298 read_barrier_not_null(masm, src.base()); 299 } 300 301 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 302 if (ShenandoahKeepAliveBarrier && on_oop && on_reference) { 303 __ enter(); 304 satb_write_barrier_pre(masm /* masm */, 305 noreg /* obj */, 306 dst /* pre_val */, 307 rthread /* thread */, 308 tmp1 /* tmp */, 309 true /* tosca_live */, 310 true /* expand_call */); 311 __ leave(); 312 } 313 } 314 315 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 316 Address dst, Register val, Register tmp1, Register tmp2) { 317 bool on_oop = type == T_OBJECT || type == T_ARRAY; 318 bool in_heap = (decorators & IN_HEAP) != 0; 319 if (in_heap) { 320 write_barrier(masm, dst.base()); 321 } 322 if (!on_oop) { 323 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); 324 return; 325 } 326 327 // flatten object address if needed 328 if (dst.index() == noreg && dst.offset() == 0) { 329 if (dst.base() != r3) { 330 __ mov(r3, dst.base()); 331 } 332 } else { 333 __ lea(r3, dst); 334 } 335 336 shenandoah_write_barrier_pre(masm, 337 r3 /* obj */, 338 tmp2 /* pre_val */, 339 rthread /* thread */, 340 tmp1 /* tmp */, 341 val != noreg /* tosca_live */, 342 false /* expand_call */); 343 344 if (val == noreg) { 345 BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg); 346 } else { 347 storeval_barrier(masm, val, tmp1); 348 // G1 barrier needs uncompressed oop for region cross check. 349 Register new_val = val; 350 if (UseCompressedOops) { 351 new_val = rscratch2; 352 __ mov(new_val, val); 353 } 354 BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg); 355 } 356 357 } 358 359 void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) { 360 __ cmp(op1, op2); 361 if (ShenandoahAcmpBarrier) { 362 Label done; 363 __ br(Assembler::EQ, done); 364 // The object may have been evacuated, but we won't see it without a 365 // membar here. 366 __ membar(Assembler::LoadStore| Assembler::LoadLoad); 367 read_barrier(masm, op1); 368 read_barrier(masm, op2); 369 __ cmp(op1, op2); 370 __ bind(done); 371 } 372 } 373 374 void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj, 375 Register var_size_in_bytes, 376 int con_size_in_bytes, 377 Register t1, 378 Register t2, 379 Label& slow_case) { 380 381 assert_different_registers(obj, t2); 382 assert_different_registers(obj, var_size_in_bytes); 383 Register end = t2; 384 385 int oop_extra_words = Universe::heap()->oop_extra_words(); 386 387 __ ldr(obj, Address(rthread, JavaThread::tlab_top_offset())); 388 if (var_size_in_bytes == noreg) { 389 __ lea(end, Address(obj, (int) (con_size_in_bytes + BrooksPointer::byte_size()))); 390 } else { 391 __ add(var_size_in_bytes, var_size_in_bytes, BrooksPointer::byte_size()); 392 __ lea(end, Address(obj, var_size_in_bytes)); 393 } 394 __ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset())); 395 __ cmp(end, rscratch1); 396 __ br(Assembler::HI, slow_case); 397 398 // update the tlab top pointer 399 __ str(end, Address(rthread, JavaThread::tlab_top_offset())); 400 401 __ add(obj, obj, BrooksPointer::byte_size()); 402 __ str(obj, Address(obj, BrooksPointer::byte_offset())); 403 404 // recover var_size_in_bytes if necessary 405 if (var_size_in_bytes == end) { 406 __ sub(var_size_in_bytes, var_size_in_bytes, obj); 407 } 408 } 409 410 void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) { 411 bool oop_not_null = (decorators & IS_NOT_NULL) != 0; 412 bool is_write = (decorators & ACCESS_WRITE) != 0; 413 if (is_write) { 414 if (oop_not_null) { 415 write_barrier(masm, obj); 416 } else { 417 Label done; 418 __ cbz(obj, done); 419 write_barrier(masm, obj); 420 __ bind(done); 421 } 422 } else { 423 if (oop_not_null) { 424 read_barrier_not_null(masm, obj); 425 } else { 426 read_barrier(masm, obj); 427 } 428 } 429 } 430 431 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val, 432 bool acquire, bool release, bool weak, bool encode, 433 Register tmp1, Register tmp2, Register tmp3, 434 Register result) { 435 436 if (!ShenandoahCASBarrier) { 437 if (UseCompressedOops) { 438 if (encode) { 439 __ encode_heap_oop(tmp1, expected); 440 expected = tmp1; 441 __ encode_heap_oop(tmp3, new_val); 442 new_val = tmp3; 443 } 444 __ cmpxchg(addr, expected, new_val, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1); 445 __ membar(__ AnyAny); 446 } else { 447 __ cmpxchg(addr, expected, new_val, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1); 448 __ membar(__ AnyAny); 449 } 450 return; 451 } 452 453 if (encode) { 454 storeval_barrier(masm, new_val, tmp3); 455 } 456 457 if (UseCompressedOops) { 458 if (encode) { 459 __ encode_heap_oop(tmp1, expected); 460 expected = tmp1; 461 __ encode_heap_oop(tmp2, new_val); 462 new_val = tmp2; 463 } 464 } 465 bool is_cae = (result != noreg); 466 bool is_narrow = UseCompressedOops; 467 Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword; 468 if (! is_cae) result = rscratch1; 469 470 assert_different_registers(addr, expected, new_val, result, tmp3); 471 472 Label retry, done, fail; 473 474 // CAS, using LL/SC pair. 475 __ bind(retry); 476 __ load_exclusive(result, addr, size, acquire); 477 if (is_narrow) { 478 __ cmpw(result, expected); 479 } else { 480 __ cmp(result, expected); 481 } 482 __ br(Assembler::NE, fail); 483 __ store_exclusive(tmp3, new_val, addr, size, release); 484 if (weak) { 485 __ cmpw(tmp3, 0u); // If the store fails, return NE to our caller 486 } else { 487 __ cbnzw(tmp3, retry); 488 } 489 __ b(done); 490 491 __ bind(fail); 492 // Check if rb(expected)==rb(result) 493 // Shuffle registers so that we have memory value ready for next expected. 494 __ mov(tmp3, expected); 495 __ mov(expected, result); 496 if (is_narrow) { 497 __ decode_heap_oop(result, result); 498 __ decode_heap_oop(tmp3, tmp3); 499 } 500 read_barrier_impl(masm, result); 501 read_barrier_impl(masm, tmp3); 502 __ cmp(result, tmp3); 503 // Retry with expected now being the value we just loaded from addr. 504 __ br(Assembler::EQ, retry); 505 if (is_narrow && is_cae) { 506 // For cmp-and-exchange and narrow oops, we need to restore 507 // the compressed old-value. We moved it to 'expected' a few lines up. 508 __ mov(result, expected); 509 } 510 __ bind(done); 511 512 } 513 514 #ifdef COMPILER1 515 516 #undef __ 517 #define __ ce->masm()-> 518 519 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) { 520 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 521 // At this point we know that marking is in progress. 522 // If do_load() is true then we have to emit the 523 // load of the previous value; otherwise it has already 524 // been loaded into _pre_val. 525 526 __ bind(*stub->entry()); 527 528 assert(stub->pre_val()->is_register(), "Precondition."); 529 530 Register pre_val_reg = stub->pre_val()->as_register(); 531 532 if (stub->do_load()) { 533 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/); 534 } 535 __ cbz(pre_val_reg, *stub->continuation()); 536 ce->store_parameter(stub->pre_val()->as_register(), 0); 537 __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); 538 __ b(*stub->continuation()); 539 } 540 541 void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) { 542 543 Register obj = stub->obj()->as_register(); 544 Register res = stub->result()->as_register(); 545 546 Label done; 547 548 __ bind(*stub->entry()); 549 550 if (res != obj) { 551 __ mov(res, obj); 552 } 553 // Check for null. 554 if (stub->needs_null_check()) { 555 __ cbz(res, done); 556 } 557 558 write_barrier(ce->masm(), res); 559 560 __ bind(done); 561 __ b(*stub->continuation()); 562 } 563 564 #undef __ 565 566 #define __ sasm-> 567 568 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { 569 __ prologue("shenandoah_pre_barrier", false); 570 571 // arg0 : previous value of memory 572 573 BarrierSet* bs = BarrierSet::barrier_set(); 574 575 const Register pre_val = r0; 576 const Register thread = rthread; 577 const Register tmp = rscratch1; 578 579 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 580 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 581 582 Label done; 583 Label runtime; 584 585 // Is marking still active? 586 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 587 __ ldrb(tmp, gc_state); 588 __ mov(rscratch2, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL); 589 __ tst(tmp, rscratch2); 590 __ br(Assembler::EQ, done); 591 592 // Can we store original value in the thread's buffer? 593 __ ldr(tmp, queue_index); 594 __ cbz(tmp, runtime); 595 596 __ sub(tmp, tmp, wordSize); 597 __ str(tmp, queue_index); 598 __ ldr(rscratch2, buffer); 599 __ add(tmp, tmp, rscratch2); 600 __ load_parameter(0, rscratch2); 601 __ str(rscratch2, Address(tmp, 0)); 602 __ b(done); 603 604 __ bind(runtime); 605 __ push_call_clobbered_registers(); 606 __ load_parameter(0, pre_val); 607 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); 608 __ pop_call_clobbered_registers(); 609 __ bind(done); 610 611 __ epilogue(); 612 } 613 614 #undef __ 615 616 #endif // COMPILER1 617 618 address ShenandoahBarrierSetAssembler::shenandoah_wb() { 619 assert(_shenandoah_wb != NULL, "need write barrier stub"); 620 return _shenandoah_wb; 621 } 622 623 address ShenandoahBarrierSetAssembler::shenandoah_wb_C() { 624 assert(_shenandoah_wb_C != NULL, "need write barrier stub"); 625 return _shenandoah_wb_C; 626 } 627 628 #define __ cgen->assembler()-> 629 630 // Shenandoah write barrier. 631 // 632 // Input: 633 // r0: OOP to evacuate. Not null. 634 // 635 // Output: 636 // r0: Pointer to evacuated OOP. 637 // 638 // Trash rscratch1, rscratch2. Preserve everything else. 639 address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen, bool c_abi, bool do_cset_test) { 640 641 __ align(6); 642 StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb"); 643 address start = __ pc(); 644 645 if (do_cset_test) { 646 Label work; 647 __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); 648 __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 649 __ ldrb(rscratch2, Address(rscratch2, rscratch1)); 650 __ tbnz(rscratch2, 0, work); 651 __ ret(lr); 652 __ bind(work); 653 } 654 655 Register obj = r0; 656 657 __ enter(); // required for proper stackwalking of RuntimeStub frame 658 659 if (!c_abi) { 660 __ push_call_clobbered_registers(); 661 } else { 662 __ push_call_clobbered_fp_registers(); 663 } 664 665 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT)); 666 __ blrt(lr, 1, 0, MacroAssembler::ret_type_integral); 667 if (!c_abi) { 668 __ mov(rscratch1, obj); 669 __ pop_call_clobbered_registers(); 670 __ mov(obj, rscratch1); 671 } else { 672 __ pop_call_clobbered_fp_registers(); 673 } 674 675 __ leave(); // required for proper stackwalking of RuntimeStub frame 676 __ ret(lr); 677 678 return start; 679 } 680 681 #undef __ 682 683 void ShenandoahBarrierSetAssembler::barrier_stubs_init() { 684 if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) { 685 int stub_code_size = 2048; 686 ResourceMark rm; 687 BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size); 688 CodeBuffer buf(bb); 689 StubCodeGenerator cgen(&buf); 690 _shenandoah_wb = generate_shenandoah_wb(&cgen, false, true); 691 _shenandoah_wb_C = generate_shenandoah_wb(&cgen, true, !ShenandoahWriteBarrierCsetTestInIR); 692 } 693 }