< prev index next >

src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp

Print this page
rev 53513 : 8217016: Shenandoah: Streamline generation of CAS barriers


 406   bool is_write = (decorators & ACCESS_WRITE) != 0;
 407   if (is_write) {
 408     if (oop_not_null) {
 409       write_barrier(masm, obj);
 410     } else {
 411       Label done;
 412       __ cbz(obj, done);
 413       write_barrier(masm, obj);
 414       __ bind(done);
 415     }
 416   } else {
 417     if (oop_not_null) {
 418       read_barrier_not_null(masm, obj);
 419     } else {
 420       read_barrier(masm, obj);
 421     }
 422   }
 423 }
 424 
 425 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
 426                                                 bool acquire, bool release, bool weak, bool encode,
 427                                                 Register tmp1, Register tmp2, Register tmp3,
 428                                                 Register result) {
 429 
 430   if (!ShenandoahCASBarrier) {
 431     if (UseCompressedOops) {
 432       if (encode) {
 433         __ encode_heap_oop(tmp1, expected);
 434         expected = tmp1;
 435         __ encode_heap_oop(tmp3, new_val);
 436         new_val = tmp3;
 437       }
 438       __ cmpxchg(addr, expected, new_val, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
 439       __ membar(__ AnyAny);
 440     } else {
 441       __ cmpxchg(addr, expected, new_val, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
 442       __ membar(__ AnyAny);
 443     }
 444     return;
 445   }
 446 
 447   if (encode) {
 448     storeval_barrier(masm, new_val, tmp3);
 449   }
 450 
 451   if (UseCompressedOops) {
 452     if (encode) {
 453       __ encode_heap_oop(tmp1, expected);
 454       expected = tmp1;
 455       __ encode_heap_oop(tmp2, new_val);
 456       new_val = tmp2;
 457     }
 458   }
 459   bool is_cae = (result != noreg);
 460   bool is_narrow = UseCompressedOops;
 461   Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
 462   if (! is_cae) result = rscratch1;
 463 
 464   assert_different_registers(addr, expected, new_val, result, tmp3);
 465 
 466   Label retry, done, fail;
 467 
 468   // CAS, using LL/SC pair.
 469   __ bind(retry);
 470   __ load_exclusive(result, addr, size, acquire);
 471   if (is_narrow) {
 472     __ cmpw(result, expected);
 473   } else {
 474     __ cmp(result, expected);
 475   }
 476   __ br(Assembler::NE, fail);
 477   __ store_exclusive(tmp3, new_val, addr, size, release);
 478   if (weak) {
 479     __ cmpw(tmp3, 0u); // If the store fails, return NE to our caller
 480   } else {
 481     __ cbnzw(tmp3, retry);
 482   }
 483   __ b(done);
 484 
 485  __  bind(fail);
 486   // Check if rb(expected)==rb(result)
 487   // Shuffle registers so that we have memory value ready for next expected.
 488   __ mov(tmp3, expected);
 489   __ mov(expected, result);
 490   if (is_narrow) {
 491     __ decode_heap_oop(result, result);
 492     __ decode_heap_oop(tmp3, tmp3);
 493   }
 494   read_barrier_impl(masm, result);
 495   read_barrier_impl(masm, tmp3);
 496   __ cmp(result, tmp3);
 497   // Retry with expected now being the value we just loaded from addr.
 498   __ br(Assembler::EQ, retry);
 499   if (is_narrow && is_cae) {
 500     // For cmp-and-exchange and narrow oops, we need to restore
 501     // the compressed old-value. We moved it to 'expected' a few lines up.
 502     __ mov(result, expected);
 503   }
 504   __ bind(done);
 505 



 506 }
 507 
 508 #ifdef COMPILER1
 509 
 510 #undef __
 511 #define __ ce->masm()->
 512 
 513 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
 514   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
 515   // At this point we know that marking is in progress.
 516   // If do_load() is true then we have to emit the
 517   // load of the previous value; otherwise it has already
 518   // been loaded into _pre_val.
 519 
 520   __ bind(*stub->entry());
 521 
 522   assert(stub->pre_val()->is_register(), "Precondition.");
 523 
 524   Register pre_val_reg = stub->pre_val()->as_register();
 525 




 406   bool is_write = (decorators & ACCESS_WRITE) != 0;
 407   if (is_write) {
 408     if (oop_not_null) {
 409       write_barrier(masm, obj);
 410     } else {
 411       Label done;
 412       __ cbz(obj, done);
 413       write_barrier(masm, obj);
 414       __ bind(done);
 415     }
 416   } else {
 417     if (oop_not_null) {
 418       read_barrier_not_null(masm, obj);
 419     } else {
 420       read_barrier(masm, obj);
 421     }
 422   }
 423 }
 424 
 425 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
 426                                                 bool acquire, bool release, bool weak, bool is_cae,

 427                                                 Register result) {
 428 
 429   Register tmp = rscratch2;





























 430   bool is_narrow = UseCompressedOops;
 431   Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;

 432 
 433   assert_different_registers(addr, expected, new_val, result, tmp);
 434 
 435   Label retry, done, fail;
 436 
 437   // CAS, using LL/SC pair.
 438   __ bind(retry);
 439   __ load_exclusive(result, addr, size, acquire);
 440   if (is_narrow) {
 441     __ cmpw(result, expected);
 442   } else {
 443     __ cmp(result, expected);
 444   }
 445   __ br(Assembler::NE, fail);
 446   __ store_exclusive(tmp, new_val, addr, size, release);
 447   if (weak) {
 448     __ cmpw(tmp, 0u); // If the store fails, return NE to our caller
 449   } else {
 450     __ cbnzw(tmp, retry);
 451   }
 452   __ b(done);
 453 
 454  __  bind(fail);
 455   // Check if rb(expected)==rb(result)
 456   // Shuffle registers so that we have memory value ready for next expected.
 457   __ mov(tmp, expected);
 458   __ mov(expected, result);
 459   if (is_narrow) {
 460     __ decode_heap_oop(result, result);
 461     __ decode_heap_oop(tmp, tmp);
 462   }
 463   read_barrier_impl(masm, result);
 464   read_barrier_impl(masm, tmp);
 465   __ cmp(result, tmp);
 466   // Retry with expected now being the value we just loaded from addr.
 467   __ br(Assembler::EQ, retry);
 468   if (is_cae && is_narrow) {
 469     // For cmp-and-exchange and narrow oops, we need to restore
 470     // the compressed old-value. We moved it to 'expected' a few lines up.
 471     __ mov(result, expected);
 472   }
 473   __ bind(done);
 474 
 475   if (!is_cae) {
 476     __ cset(result, Assembler::EQ);
 477   }
 478 }
 479 
 480 #ifdef COMPILER1
 481 
 482 #undef __
 483 #define __ ce->masm()->
 484 
 485 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
 486   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
 487   // At this point we know that marking is in progress.
 488   // If do_load() is true then we have to emit the
 489   // load of the previous value; otherwise it has already
 490   // been loaded into _pre_val.
 491 
 492   __ bind(*stub->entry());
 493 
 494   assert(stub->pre_val()->is_register(), "Precondition.");
 495 
 496   Register pre_val_reg = stub->pre_val()->as_register();
 497 


< prev index next >