< prev index next >

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page
rev 8961 : [mq]: diff-shenandoah.patch

@@ -245,18 +245,26 @@
 }
 
 void LIRItem::load_item_force(LIR_Opr reg) {
   LIR_Opr r = result();
   if (r != reg) {
+    _result = _gen->force_opr_to(r, reg);
+  }
+}
+
+LIR_Opr LIRGenerator::force_opr_to(LIR_Opr op, LIR_Opr reg) {
+  if (op != reg) {
 #if !defined(ARM) && !defined(E500V2)
-    if (r->type() != reg->type()) {
+    if (op->type() != reg->type()) {
       // moves between different types need an intervening spill slot
-      r = _gen->force_to_spill(r, reg->type());
+      op = force_to_spill(op, reg->type());
     }
 #endif
-    __ move(r, reg);
-    _result = reg;
+    __ move(op, reg);
+    return reg;
+  } else {
+    return op;
   }
 }
 
 ciObject* LIRItem::get_jobject_constant() const {
   ObjectType* oc = type()->as_ObjectType();

@@ -1420,10 +1428,11 @@
                                bool do_load, bool patch, CodeEmitInfo* info) {
   // Do the pre-write barrier, if any.
   switch (_bs->kind()) {
 #if INCLUDE_ALL_GCS
     case BarrierSet::G1SATBCTLogging:
+    case BarrierSet::ShenandoahBarrierSet:
       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
       break;
 #endif // INCLUDE_ALL_GCS
     case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:

@@ -1442,10 +1451,12 @@
   switch (_bs->kind()) {
 #if INCLUDE_ALL_GCS
     case BarrierSet::G1SATBCTLogging:
       G1SATBCardTableModRef_post_barrier(addr,  new_val);
       break;
+    case BarrierSet::ShenandoahBarrierSet:
+      break;
 #endif // INCLUDE_ALL_GCS
     case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       CardTableModRef_post_barrier(addr,  new_val);
       break;

@@ -1712,26 +1723,38 @@
     tty->print_cr("   ###class not loaded at store_%s bci %d",
                   x->is_static() ?  "static" : "field", x->printable_bci());
   }
 #endif
 
+  LIR_Opr obj = object.result();
+
   if (x->needs_null_check() &&
       (needs_patching ||
        MacroAssembler::needs_explicit_null_check(x->offset()))) {
     // emit an explicit null check because the offset is too large
-    __ null_check(object.result(), new CodeEmitInfo(info));
+    __ null_check(obj, new CodeEmitInfo(info));
+  }
+
+  obj = shenandoah_write_barrier(obj, info, x->needs_null_check());
+  LIR_Opr val = value.result();
+  if (is_oop && UseShenandoahGC) {
+    if (! val->is_register()) {
+      assert(val->is_constant(), "expect constant");
+    } else {
+      val = shenandoah_read_barrier(val, NULL, true);
+    }
   }
 
   LIR_Address* address;
   if (needs_patching) {
     // we need to patch the offset in the instruction so don't allow
     // generate_address to try to be smart about emitting the -1.
     // Otherwise the patching code won't know how to find the
     // instruction to patch.
-    address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
+    address = new LIR_Address(obj, PATCHED_ADDR, field_type);
   } else {
-    address = generate_address(object.result(), x->offset(), field_type);
+    address = generate_address(obj, x->offset(), field_type);
   }
 
   if (is_volatile && os::is_MP()) {
     __ membar_release();
   }

@@ -1745,19 +1768,19 @@
                 (info ? new CodeEmitInfo(info) : NULL));
   }
 
   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
   if (needs_atomic_access && !needs_patching) {
-    volatile_field_store(value.result(), address, info);
+    volatile_field_store(val, address, info);
   } else {
     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
-    __ store(value.result(), address, info, patch_code);
+    __ store(val, address, info, patch_code);
   }
 
   if (is_oop) {
     // Store to object so mark the card of the header
-    post_barrier(object.result(), value.result());
+    post_barrier(obj, val);
   }
 
   if (is_volatile && os::is_MP()) {
     __ membar();
   }

@@ -1791,34 +1814,35 @@
     tty->print_cr("   ###class not loaded at load_%s bci %d",
                   x->is_static() ?  "static" : "field", x->printable_bci());
   }
 #endif
 
+  LIR_Opr obj = object.result();
   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
   if (x->needs_null_check() &&
       (needs_patching ||
        MacroAssembler::needs_explicit_null_check(x->offset()) ||
        stress_deopt)) {
-    LIR_Opr obj = object.result();
     if (stress_deopt) {
       obj = new_register(T_OBJECT);
       __ move(LIR_OprFact::oopConst(NULL), obj);
     }
     // emit an explicit null check because the offset is too large
     __ null_check(obj, new CodeEmitInfo(info));
   }
 
+  obj = shenandoah_read_barrier(obj, info, x->needs_null_check() && x->explicit_null_check() != NULL);
   LIR_Opr reg = rlock_result(x, field_type);
   LIR_Address* address;
   if (needs_patching) {
     // we need to patch the offset in the instruction so don't allow
     // generate_address to try to be smart about emitting the -1.
     // Otherwise the patching code won't know how to find the
     // instruction to patch.
-    address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
+    address = new LIR_Address(obj, PATCHED_ADDR, field_type);
   } else {
-    address = generate_address(object.result(), x->offset(), field_type);
+    address = generate_address(obj, x->offset(), field_type);
   }
 
   bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
   if (needs_atomic_access && !needs_patching) {
     volatile_field_load(address, reg, info);

@@ -1830,10 +1854,43 @@
   if (is_volatile && os::is_MP()) {
     __ membar_acquire();
   }
 }
 
+LIR_Opr LIRGenerator::shenandoah_read_barrier(LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
+  if (UseShenandoahGC) {
+
+    LabelObj* done = new LabelObj();
+    LIR_Opr result = new_register(T_OBJECT);
+    __ move(obj, result);
+    if (need_null_check) {
+      __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
+      __ branch(lir_cond_equal, T_LONG, done->label());
+    }
+    LIR_Address* brooks_ptr_address = generate_address(result, -8, T_ADDRESS);
+    __ load(brooks_ptr_address, result, info ? new CodeEmitInfo(info) : NULL, lir_patch_none);
+
+    __ branch_destination(done->label());
+    return result;
+  } else {
+    return obj;
+  }
+}
+
+LIR_Opr LIRGenerator::shenandoah_write_barrier(LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
+  if (UseShenandoahGC) {
+
+    LIR_Opr result = new_register(T_OBJECT);
+    LIR_Opr tmp1 = new_register(T_INT);
+    LIR_Opr tmp2 = new_register(T_INT);
+    __ shenandoah_wb(obj, result, tmp1, tmp2, info ? new CodeEmitInfo(info) : NULL, need_null_check);
+    return result;
+
+  } else {
+    return obj;
+  }
+}
 
 //------------------------java.nio.Buffer.checkIndex------------------------
 
 // int java.nio.Buffer.checkIndex(int)
 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {

@@ -1926,23 +1983,26 @@
       __ move(LIR_OprFact::oopConst(NULL), obj);
       __ null_check(obj, new CodeEmitInfo(null_check_info));
     }
   }
 
+  LIR_Opr ary = array.result();
+  ary = shenandoah_read_barrier(ary, null_check_info, null_check_info != NULL);
+
   // emit array address setup early so it schedules better
-  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
+  LIR_Address* array_addr = emit_array_address(ary, index.result(), x->elt_type(), false);
 
   if (GenerateRangeChecks && needs_range_check) {
     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
     } else if (use_length) {
       // TODO: use a (modified) version of array_range_check that does not require a
       //       constant length to be loaded to a register
       __ cmp(lir_cond_belowEqual, length.result(), index.result());
       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
     } else {
-      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
+      array_range_check(ary, index.result(), null_check_info, range_check_info);
       // The range check performs the null check, so clear it out for the load
       null_check_info = NULL;
     }
   }
 

@@ -2251,11 +2311,11 @@
   //       pre_barrier(..., value, ...);
   //     }
   //   }
   // }
 
-  if (UseG1GC && type == T_OBJECT) {
+  if ((UseShenandoahGC || UseG1GC) && type == T_OBJECT) {
     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
     bool gen_offset_check = true;    // Assume we need to generate the offset guard.
     bool gen_source_check = true;    // Assume we need to check the src object for null.
     bool gen_type_check = true;      // Assume we need to check the reference_type.
 

@@ -2793,10 +2853,11 @@
     if (method()->is_synchronized() && GenerateSynchronizationCode) {
       LIR_Opr lock = new_register(T_INT);
       __ load_stack_address_monitor(0, lock);
 
       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
+      obj = shenandoah_write_barrier(obj, info, false);
       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
 
       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
     }

@@ -3013,13 +3074,13 @@
 
 
 
 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
 void LIRGenerator::do_IfOp(IfOp* x) {
+  ValueTag xtag = x->x()->type()->tag();
 #ifdef ASSERT
   {
-    ValueTag xtag = x->x()->type()->tag();
     ValueTag ttag = x->tval()->type()->tag();
     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
   }

@@ -3038,11 +3099,18 @@
   LIRItem f_val(x->fval(), this);
   t_val.dont_load_item();
   f_val.dont_load_item();
   LIR_Opr reg = rlock_result(x);
 
-  __ cmp(lir_cond(x->cond()), left.result(), right.result());
+  LIR_Opr left_opr = left.result();
+  LIR_Opr right_opr = right.result();
+  if (xtag == objectTag && UseShenandoahGC && x->y()->type() != objectNull) { // Don't need to resolve for ifnull.
+    left_opr = shenandoah_write_barrier(left_opr, NULL, true);
+    right_opr = shenandoah_read_barrier(right_opr, NULL, true);
+  }
+
+  __ cmp(lir_cond(x->cond()), left_opr, right_opr);
   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
 }
 
 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
     assert(x->number_of_arguments() == expected_arguments, "wrong type");
< prev index next >