< prev index next >

src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp

Print this page
rev 56771 : 8233339: Shenandoah: Centralize load barrier decisions into ShenandoahBarrierSet


 428     assert_different_registers(dst, tmp, thread);
 429 
 430     satb_write_barrier_pre(masm, noreg, dst, thread, tmp, true, false);
 431     __ movdbl(xmm0, Address(rsp, 0));
 432     __ addptr(rsp, 2 * Interpreter::stackElementSize);
 433     //__ pop_callee_saved_registers();
 434     __ popa();
 435   }
 436 }
 437 
 438 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src) {
 439   if (ShenandoahLoadRefBarrier) {
 440     Label done;
 441     __ testptr(dst, dst);
 442     __ jcc(Assembler::zero, done);
 443     load_reference_barrier_not_null(masm, dst, src);
 444     __ bind(done);
 445   }
 446 }
 447 














 448 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 449              Register dst, Address src, Register tmp1, Register tmp_thread) {
 450   bool on_oop = is_reference_type(type);
 451   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
 452   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
 453   bool not_in_heap = (decorators & IN_NATIVE) != 0;
 454   bool on_reference = on_weak || on_phantom;
 455   bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
 456   bool keep_alive = ((decorators & AS_NO_KEEPALIVE) == 0) || is_traversal_mode;
 457 
 458   Register result_dst = dst;
 459   bool use_tmp1_for_dst = false;

 460 
 461   if (on_oop) {
 462     // We want to preserve src

 463     if (dst == src.base() || dst == src.index()) {
 464       // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
 465       if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
 466         dst = tmp1;
 467         use_tmp1_for_dst = true;
 468       } else {
 469         dst = rdi;
 470         __ push(dst);
 471       }
 472     }
 473     assert_different_registers(dst, src.base(), src.index());
 474   }
 475 
 476   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
 477 
 478   if (on_oop) {
 479     if (not_in_heap && !is_traversal_mode) {

 480       load_reference_barrier_native(masm, dst, src);
 481     } else {
 482       load_reference_barrier(masm, dst, src);
 483     }
 484 

 485     if (dst != result_dst) {
 486       __ movptr(result_dst, dst);
 487 
 488       if (!use_tmp1_for_dst) {
 489         __ pop(dst);
 490       }
 491 
 492       dst = result_dst;
 493     }
 494 
 495     if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
 496       const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
 497       assert_different_registers(dst, tmp1, tmp_thread);
 498       NOT_LP64(__ get_thread(thread));
 499       // Generate the SATB pre-barrier code to log the value of
 500       // the referent field in an SATB buffer.
 501       shenandoah_write_barrier_pre(masm /* masm */,
 502                                    noreg /* obj */,
 503                                    dst /* pre_val */,
 504                                    thread /* thread */,
 505                                    tmp1 /* tmp */,
 506                                    true /* tosca_live */,
 507                                    true /* expand_call */);
 508     }
 509   }
 510 }
 511 
 512 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 513               Address dst, Register val, Register tmp1, Register tmp2) {
 514 
 515   bool on_oop = is_reference_type(type);
 516   bool in_heap = (decorators & IN_HEAP) != 0;
 517   bool as_normal = (decorators & AS_NORMAL) != 0;
 518   if (on_oop && in_heap) {
 519     bool needs_pre_barrier = as_normal;
 520 
 521     Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi);
 522     Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
 523     // flatten object address if needed
 524     // We do it regardless of precise because we need the registers
 525     if (dst.index() == noreg && dst.disp() == 0) {
 526       if (dst.base() != tmp1) {
 527         __ movptr(tmp1, dst.base());
 528       }




 428     assert_different_registers(dst, tmp, thread);
 429 
 430     satb_write_barrier_pre(masm, noreg, dst, thread, tmp, true, false);
 431     __ movdbl(xmm0, Address(rsp, 0));
 432     __ addptr(rsp, 2 * Interpreter::stackElementSize);
 433     //__ pop_callee_saved_registers();
 434     __ popa();
 435   }
 436 }
 437 
 438 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src) {
 439   if (ShenandoahLoadRefBarrier) {
 440     Label done;
 441     __ testptr(dst, dst);
 442     __ jcc(Assembler::zero, done);
 443     load_reference_barrier_not_null(masm, dst, src);
 444     __ bind(done);
 445   }
 446 }
 447 
 448 //
 449 // Arguments:
 450 //
 451 // Inputs:
 452 //   src:        oop location, might be clobbered
 453 //   tmp1:       scratch register, might not be valid.
 454 //   tmp_thread: unused
 455 //
 456 // Output:
 457 //   dst:        oop loaded from src location
 458 //
 459 // Kill:
 460 //   tmp1 (if it is valid)
 461 //
 462 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 463              Register dst, Address src, Register tmp1, Register tmp_thread) {








 464   Register result_dst = dst;
 465   bool use_tmp1_for_dst = false;
 466   bool need_load_reference_barrier = ShenandoahBarrierSet::need_load_reference_barrier(decorators, type);
 467 
 468   // Only preserve src address if we need load reference barrier
 469   if (need_load_reference_barrier) {
 470     // Use tmp1 or rdi as temporary output register to avoid clobbering src
 471     if (dst == src.base() || dst == src.index()) {
 472       // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
 473       if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
 474         dst = tmp1;
 475         use_tmp1_for_dst = true;
 476       } else {
 477         dst = rdi;
 478         __ push(dst);
 479       }
 480     }
 481     assert_different_registers(dst, src.base(), src.index());
 482   }
 483 
 484   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
 485 
 486   if (!need_load_reference_barrier) return;
 487 
 488   if (ShenandoahBarrierSet::use_native_load_reference_barrier(decorators, type)) {
 489     load_reference_barrier_native(masm, dst, src);
 490   } else {
 491     load_reference_barrier(masm, dst, src);
 492   }
 493 
 494   // Move loaded oop to final destination
 495   if (dst != result_dst) {
 496     __ movptr(result_dst, dst);
 497 
 498     if (!use_tmp1_for_dst) {
 499       __ pop(dst);
 500     }
 501 
 502     dst = result_dst;
 503   }
 504 
 505   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
 506     const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
 507     assert_different_registers(dst, tmp1, tmp_thread);
 508     NOT_LP64(__ get_thread(thread));
 509     // Generate the SATB pre-barrier code to log the value of
 510     // the referent field in an SATB buffer.
 511     shenandoah_write_barrier_pre(masm /* masm */,
 512                                  noreg /* obj */,
 513                                  dst /* pre_val */,
 514                                  thread /* thread */,
 515                                  tmp1 /* tmp */,
 516                                  true /* tosca_live */,
 517                                  true /* expand_call */);

 518   }
 519 }
 520 
 521 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 522               Address dst, Register val, Register tmp1, Register tmp2) {
 523 
 524   bool on_oop = is_reference_type(type);
 525   bool in_heap = (decorators & IN_HEAP) != 0;
 526   bool as_normal = (decorators & AS_NORMAL) != 0;
 527   if (on_oop && in_heap) {
 528     bool needs_pre_barrier = as_normal;
 529 
 530     Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi);
 531     Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
 532     // flatten object address if needed
 533     // We do it regardless of precise because we need the registers
 534     if (dst.index() == noreg && dst.disp() == 0) {
 535       if (dst.base() != tmp1) {
 536         __ movptr(tmp1, dst.base());
 537       }


< prev index next >