< prev index next >

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page
rev 12906 : [mq]: gc_interface

@@ -32,19 +32,17 @@
 #include "c1/c1_ValueStack.hpp"
 #include "ci/ciArrayKlass.hpp"
 #include "ci/ciInstance.hpp"
 #include "ci/ciObjArray.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/c1BarrierSetCodeGen.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/vm_version.hpp"
 #include "utilities/bitMap.inline.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 #ifdef TRACE_HAVE_INTRINSICS
 #include "trace/traceMacros.hpp"
 #endif
 
 #ifdef ASSERT

@@ -307,15 +305,10 @@
 
 
 //--------------------------------------------------------------
 
 
-void LIRGenerator::init() {
-  _bs = Universe::heap()->barrier_set();
-}
-
-
 void LIRGenerator::block_do_prolog(BlockBegin* block) {
 #ifndef PRODUCT
   if (PrintIRWithLIR) {
     block->print();
   }

@@ -1239,23 +1232,15 @@
   CodeEmitInfo* info = NULL;
   if (x->needs_null_check()) {
     info = state_for(x);
   }
 
-  LIR_Address* referent_field_adr =
-    new LIR_Address(reference.result(), referent_offset, T_OBJECT);
-
-  LIR_Opr result = rlock_result(x);
-
-  __ load(referent_field_adr, result, info);
-
-  // Register the value in the referent field with the pre-barrier
-  pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
-              result /* pre_val */,
-              false  /* do_load */,
-              false  /* patch */,
-              NULL   /* info */);
+  C1DecoratorSet decorators = C1_ACCESS_ON_HEAP | C1_ACCESS_ON_WEAK;
+  LIR_Opr result = access_load_at(decorators, as_BasicType(x->type()),
+                                  reference, LIR_OprFact::intConst(referent_offset),
+                                  NULL, NULL);
+  set_result(x, result);
 }
 
 // Example: clazz.isInstance(object)
 void LIRGenerator::do_isInstance(Intrinsic* x) {
   assert(x->number_of_arguments() == 2, "wrong type");

@@ -1444,237 +1429,32 @@
   _constants.append(c);
   _reg_for_constants.append(result);
   return result;
 }
 
-// Various barriers
-
-void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
-                               bool do_load, bool patch, CodeEmitInfo* info) {
-  // Do the pre-write barrier, if any.
-  switch (_bs->kind()) {
-#if INCLUDE_ALL_GCS
-    case BarrierSet::G1SATBCTLogging:
-      G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
-      break;
-#endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
-      // No pre barriers
-      break;
-    case BarrierSet::ModRef:
-      // No pre barriers
-      break;
-    default      :
-      ShouldNotReachHere();
-
-  }
-}
-
-void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
-  switch (_bs->kind()) {
-#if INCLUDE_ALL_GCS
-    case BarrierSet::G1SATBCTLogging:
-      G1SATBCardTableModRef_post_barrier(addr,  new_val);
-      break;
-#endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
-      CardTableModRef_post_barrier(addr,  new_val);
-      break;
-    case BarrierSet::ModRef:
-      // No post barriers
-      break;
-    default      :
-      ShouldNotReachHere();
-    }
-}
-
-////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
-                                                     bool do_load, bool patch, CodeEmitInfo* info) {
-  // First we test whether marking is in progress.
-  BasicType flag_type;
-  if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
-    flag_type = T_INT;
-  } else {
-    guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
-              "Assumption");
-    // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
-    // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
-    flag_type = T_BOOLEAN;
-  }
-  LIR_Opr thrd = getThreadPointer();
-  LIR_Address* mark_active_flag_addr =
-    new LIR_Address(thrd,
-                    in_bytes(JavaThread::satb_mark_queue_offset() +
-                             SATBMarkQueue::byte_offset_of_active()),
-                    flag_type);
-  // Read the marking-in-progress flag.
-  LIR_Opr flag_val = new_register(T_INT);
-  __ load(mark_active_flag_addr, flag_val);
-  __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
-
-  LIR_PatchCode pre_val_patch_code = lir_patch_none;
-
-  CodeStub* slow;
-
-  if (do_load) {
-    assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
-    assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
-
-    if (patch)
-      pre_val_patch_code = lir_patch_normal;
-
-    pre_val = new_register(T_OBJECT);
-
-    if (!addr_opr->is_address()) {
-      assert(addr_opr->is_register(), "must be");
-      addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
-    }
-    slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
-  } else {
-    assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
-    assert(pre_val->is_register(), "must be");
-    assert(pre_val->type() == T_OBJECT, "must be an object");
-    assert(info == NULL, "sanity");
-
-    slow = new G1PreBarrierStub(pre_val);
-  }
-
-  __ branch(lir_cond_notEqual, T_INT, slow);
-  __ branch_destination(slow->continuation());
-}
-
-void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
-  // If the "new_val" is a constant NULL, no barrier is necessary.
-  if (new_val->is_constant() &&
-      new_val->as_constant_ptr()->as_jobject() == NULL) return;
-
-  if (!new_val->is_register()) {
-    LIR_Opr new_val_reg = new_register(T_OBJECT);
-    if (new_val->is_constant()) {
-      __ move(new_val, new_val_reg);
-    } else {
-      __ leal(new_val, new_val_reg);
-    }
-    new_val = new_val_reg;
-  }
-  assert(new_val->is_register(), "must be a register at this point");
-
-  if (addr->is_address()) {
-    LIR_Address* address = addr->as_address_ptr();
-    LIR_Opr ptr = new_pointer_register();
-    if (!address->index()->is_valid() && address->disp() == 0) {
-      __ move(address->base(), ptr);
-    } else {
-      assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
-      __ leal(addr, ptr);
-    }
-    addr = ptr;
-  }
-  assert(addr->is_register(), "must be a register at this point");
-
-  LIR_Opr xor_res = new_pointer_register();
-  LIR_Opr xor_shift_res = new_pointer_register();
-  if (TwoOperandLIRForm ) {
-    __ move(addr, xor_res);
-    __ logical_xor(xor_res, new_val, xor_res);
-    __ move(xor_res, xor_shift_res);
-    __ unsigned_shift_right(xor_shift_res,
-                            LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
-                            xor_shift_res,
-                            LIR_OprDesc::illegalOpr());
-  } else {
-    __ logical_xor(addr, new_val, xor_res);
-    __ unsigned_shift_right(xor_res,
-                            LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
-                            xor_shift_res,
-                            LIR_OprDesc::illegalOpr());
-  }
-
-  if (!new_val->is_register()) {
-    LIR_Opr new_val_reg = new_register(T_OBJECT);
-    __ leal(new_val, new_val_reg);
-    new_val = new_val_reg;
-  }
-  assert(new_val->is_register(), "must be a register at this point");
-
-  __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
-
-  CodeStub* slow = new G1PostBarrierStub(addr, new_val);
-  __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
-  __ branch_destination(slow->continuation());
-}
-
-#endif // INCLUDE_ALL_GCS
-////////////////////////////////////////////////////////////////////////
-
-void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
-  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(_bs);
-  assert(sizeof(*(ct->byte_map_base)) == sizeof(jbyte), "adjust this code");
-  LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base);
-  if (addr->is_address()) {
-    LIR_Address* address = addr->as_address_ptr();
-    // ptr cannot be an object because we use this barrier for array card marks
-    // and addr can point in the middle of an array.
-    LIR_Opr ptr = new_pointer_register();
-    if (!address->index()->is_valid() && address->disp() == 0) {
-      __ move(address->base(), ptr);
-    } else {
-      assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
-      __ leal(addr, ptr);
-    }
-    addr = ptr;
-  }
-  assert(addr->is_register(), "must be a register at this point");
-
-#ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
-  CardTableModRef_post_barrier_helper(addr, card_table_base);
-#else
-  LIR_Opr tmp = new_pointer_register();
-  if (TwoOperandLIRForm) {
-    __ move(addr, tmp);
-    __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
-  } else {
-    __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
-  }
-
-  LIR_Address* card_addr;
-  if (can_inline_as_constant(card_table_base)) {
-    card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
-  } else {
-    card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE);
-  }
-
-  LIR_Opr dirty = LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val());
-  if (UseCondCardMark) {
-    LIR_Opr cur_value = new_register(T_INT);
-    if (UseConcMarkSweepGC) {
-      __ membar_storeload();
-    }
-    __ move(card_addr, cur_value);
+//------------------------field access--------------------------------------
 
-    LabelObj* L_already_dirty = new LabelObj();
-    __ cmp(lir_cond_equal, cur_value, dirty);
-    __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
-    __ move(dirty, card_addr);
-    __ branch_destination(L_already_dirty->label());
-  } else {
-    if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
-      __ membar_storestore();
-    }
-    __ move(dirty, card_addr);
-  }
-#endif
+void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
+  assert(x->number_of_arguments() == 4, "wrong type");
+  LIRItem obj   (x->argument_at(0), this);  // object
+  LIRItem offset(x->argument_at(1), this);  // offset of field
+  LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
+  LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
+  assert(obj.type()->tag() == objectTag, "invalid type");
+
+  // In 64bit the type can be long, sparc doesn't have this assert
+  // assert(offset.type()->tag() == intTag, "invalid type");
+
+  assert(cmp.type()->tag() == type->tag(), "invalid type");
+  assert(val.type()->tag() == type->tag(), "invalid type");
+
+  C1DecoratorSet decorators = C1_ACCESS_ON_HEAP | C1_MO_VOLATILE;
+  LIR_Opr result = access_cas_at(decorators, as_BasicType(type),
+                                 obj, offset, cmp, val);
+  set_result(x, result);
 }
 
-
-//------------------------field access--------------------------------------
-
 // Comment copied form templateTable_i486.cpp
 // ----------------------------------------------------------------------------
 // Volatile variables demand their effects be made known to all CPU's in
 // order.  Store buffers on most chips allow reads & writes to reorder; the
 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of

@@ -1703,11 +1483,10 @@
 
 void LIRGenerator::do_StoreField(StoreField* x) {
   bool needs_patching = x->needs_patching();
   bool is_volatile = x->field()->is_volatile();
   BasicType field_type = x->field_type();
-  bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
 
   CodeEmitInfo* info = NULL;
   if (needs_patching) {
     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
     info = state_for(x, x->state_before());

@@ -1756,52 +1535,126 @@
     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
   }
 
-  LIR_Address* address;
+  C1DecoratorSet decorators = C1_ACCESS_ON_HEAP;
+  if (is_volatile) {
+    decorators |= C1_MO_VOLATILE;
+  }
   if (needs_patching) {
-    // we need to patch the offset in the instruction so don't allow
-    // generate_address to try to be smart about emitting the -1.
-    // Otherwise the patching code won't know how to find the
-    // instruction to patch.
-    address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
-  } else {
-    address = generate_address(object.result(), x->offset(), field_type);
+    decorators |= C1_NEEDS_PATCHING;
   }
 
-  if (is_volatile && os::is_MP()) {
-    __ membar_release();
+  access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()), value.result(), info ? new CodeEmitInfo(info) : NULL, info);
+}
+
+void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
+  assert(x->is_pinned(),"");
+  bool needs_range_check = x->compute_needs_range_check();
+  bool use_length = x->length() != NULL;
+  bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
+  bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
+                                         !get_jobject_constant(x->value())->is_null_object() ||
+                                         x->should_profile());
+
+  LIRItem array(x->array(), this);
+  LIRItem index(x->index(), this);
+  LIRItem value(x->value(), this);
+  LIRItem length(this);
+
+  array.load_item();
+  index.load_nonconstant();
+
+  if (use_length && needs_range_check) {
+    length.set_instruction(x->length());
+    length.load_item();
+
+  }
+  if (needs_store_check || x->check_boolean()) {
+    value.load_item();
+  } else {
+    value.load_for_store(x->elt_type());
   }
 
-  if (is_oop) {
-    // Do the pre-write barrier, if any.
-    pre_barrier(LIR_OprFact::address(address),
-                LIR_OprFact::illegalOpr /* pre_val */,
-                true /* do_load*/,
-                needs_patching,
-                (info ? new CodeEmitInfo(info) : NULL));
+  set_no_result(x);
+
+  // the CodeEmitInfo must be duplicated for each different
+  // LIR-instruction because spilling can occur anywhere between two
+  // instructions and so the debug information must be different
+  CodeEmitInfo* range_check_info = state_for(x);
+  CodeEmitInfo* null_check_info = NULL;
+  if (x->needs_null_check()) {
+    null_check_info = new CodeEmitInfo(range_check_info);
   }
 
-  bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
-  if (needs_atomic_access && !needs_patching) {
-    volatile_field_store(value.result(), address, info);
+  if (GenerateRangeChecks && needs_range_check) {
+    if (use_length) {
+      __ cmp(lir_cond_belowEqual, length.result(), index.result());
+      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
   } else {
-    LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
-    __ store(value.result(), address, info, patch_code);
+      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
+      // range_check also does the null check
+      null_check_info = NULL;
+    }
   }
 
-  if (is_oop) {
-    // Store to object so mark the card of the header
-    post_barrier(object.result(), value.result());
+  if (GenerateArrayStoreCheck && needs_store_check) {
+    CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
+    array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
   }
 
-  if (!support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
-    __ membar();
+  C1DecoratorSet decorators = C1_ACCESS_ON_HEAP | C1_ACCESS_ON_ARRAY;
+  if (x->elt_type() == T_BOOLEAN && x->check_boolean()) {
+    decorators |= C1_MASK_BOOLEAN;
   }
+
+  access_store_at(decorators, x->elt_type(), array, index.result(), value.result(), NULL, null_check_info);
 }
 
+LIR_Opr LIRGenerator::access_cas_at(C1DecoratorSet decorators, BasicType type,
+                                    LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
+  BarrierSet *bs = Universe::heap()->barrier_set();
+  C1BarrierSetCodeGen *code_gen = bs->c1_code_gen();
+  return code_gen->cas_at(this, decorators, type,
+                          base, offset, cmp_value, new_value);
+
+}
+
+LIR_Opr LIRGenerator::access_swap_at(C1DecoratorSet decorators, BasicType type,
+                                     LIRItem& base, LIRItem& offset, LIRItem& value) {
+  BarrierSet *bs = Universe::heap()->barrier_set();
+  C1BarrierSetCodeGen *code_gen = bs->c1_code_gen();
+  return code_gen->swap_at(this, decorators, type,
+                           base, offset, value);
+}
+
+LIR_Opr LIRGenerator::access_add_at(C1DecoratorSet decorators, BasicType type,
+                                    LIRItem& base, LIRItem& offset, LIRItem& value) {
+  BarrierSet *bs = Universe::heap()->barrier_set();
+  C1BarrierSetCodeGen *code_gen = bs->c1_code_gen();
+  return code_gen->add_at(this, decorators, type,
+                          base, offset, value);
+}
+
+void LIRGenerator::access_store_at(C1DecoratorSet decorators, BasicType type,
+                                   LIRItem& base, LIR_Opr offset, LIR_Opr value,
+                                   CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
+  BarrierSet *bs = Universe::heap()->barrier_set();
+  C1BarrierSetCodeGen *code_gen = bs->c1_code_gen();
+  code_gen->store_at(this, decorators, type,
+                     base, offset, value, patch_info, store_emit_info);
+}
+
+LIR_Opr LIRGenerator::access_load_at(C1DecoratorSet decorators, BasicType type,
+                                     LIRItem& base, LIR_Opr offset,
+                                     CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
+  BarrierSet *bs = Universe::heap()->barrier_set();
+  C1BarrierSetCodeGen *code_gen = bs->c1_code_gen();
+  return code_gen->load_at(this, decorators, type,
+                           base, offset, patch_info, load_emit_info);
+}
 
 void LIRGenerator::do_LoadField(LoadField* x) {
   bool needs_patching = x->needs_patching();
   bool is_volatile = x->field()->is_volatile();
   BasicType field_type = x->field_type();

@@ -2273,161 +2126,23 @@
   LIRItem off(x->offset(), this);
 
   off.load_item();
   src.load_item();
 
-  LIR_Opr value = rlock_result(x, x->basic_type());
+  C1DecoratorSet decorators = C1_ACCESS_ON_HEAP | C1_ACCESS_ON_ANONYMOUS;
 
-  if (support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) {
-    __ membar();
-  }
-
-  get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
-
-#if INCLUDE_ALL_GCS
-  // We might be reading the value of the referent field of a
-  // Reference object in order to attach it back to the live
-  // object graph. If G1 is enabled then we need to record
-  // the value that is being returned in an SATB log buffer.
-  //
-  // We need to generate code similar to the following...
-  //
-  // if (offset == java_lang_ref_Reference::referent_offset) {
-  //   if (src != NULL) {
-  //     if (klass(src)->reference_type() != REF_NONE) {
-  //       pre_barrier(..., value, ...);
-  //     }
-  //   }
-  // }
-
-  if (UseG1GC && type == T_OBJECT) {
-    bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
-    bool gen_offset_check = true;    // Assume we need to generate the offset guard.
-    bool gen_source_check = true;    // Assume we need to check the src object for null.
-    bool gen_type_check = true;      // Assume we need to check the reference_type.
-
-    if (off.is_constant()) {
-      jlong off_con = (off.type()->is_int() ?
-                        (jlong) off.get_jint_constant() :
-                        off.get_jlong_constant());
-
-
-      if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
-        // The constant offset is something other than referent_offset.
-        // We can skip generating/checking the remaining guards and
-        // skip generation of the code stub.
-        gen_pre_barrier = false;
-      } else {
-        // The constant offset is the same as referent_offset -
-        // we do not need to generate a runtime offset check.
-        gen_offset_check = false;
-      }
-    }
-
-    // We don't need to generate stub if the source object is an array
-    if (gen_pre_barrier && src.type()->is_array()) {
-      gen_pre_barrier = false;
-    }
-
-    if (gen_pre_barrier) {
-      // We still need to continue with the checks.
-      if (src.is_constant()) {
-        ciObject* src_con = src.get_jobject_constant();
-        guarantee(src_con != NULL, "no source constant");
-
-        if (src_con->is_null_object()) {
-          // The constant src object is null - We can skip
-          // generating the code stub.
-          gen_pre_barrier = false;
-        } else {
-          // Non-null constant source object. We still have to generate
-          // the slow stub - but we don't need to generate the runtime
-          // null object check.
-          gen_source_check = false;
-        }
-      }
-    }
-    if (gen_pre_barrier && !PatchALot) {
-      // Can the klass of object be statically determined to be
-      // a sub-class of Reference?
-      ciType* type = src.value()->declared_type();
-      if ((type != NULL) && type->is_loaded()) {
-        if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
-          gen_type_check = false;
-        } else if (type->is_klass() &&
-                   !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
-          // Not Reference and not Object klass.
-          gen_pre_barrier = false;
-        }
-      }
-    }
-
-    if (gen_pre_barrier) {
-      LabelObj* Lcont = new LabelObj();
-
-      // We can have generate one runtime check here. Let's start with
-      // the offset check.
-      if (gen_offset_check) {
-        // if (offset != referent_offset) -> continue
-        // If offset is an int then we can do the comparison with the
-        // referent_offset constant; otherwise we need to move
-        // referent_offset into a temporary register and generate
-        // a reg-reg compare.
-
-        LIR_Opr referent_off;
-
-        if (off.type()->is_int()) {
-          referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
-        } else {
-          assert(off.type()->is_long(), "what else?");
-          referent_off = new_register(T_LONG);
-          __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
-        }
-        __ cmp(lir_cond_notEqual, off.result(), referent_off);
-        __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
-      }
-      if (gen_source_check) {
-        // offset is a const and equals referent offset
-        // if (source == null) -> continue
-        __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
-        __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
-      }
-      LIR_Opr src_klass = new_register(T_OBJECT);
-      if (gen_type_check) {
-        // We have determined that offset == referent_offset && src != null.
-        // if (src->_klass->_reference_type == REF_NONE) -> continue
-        __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
-        LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
-        LIR_Opr reference_type = new_register(T_INT);
-        __ move(reference_type_addr, reference_type);
-        __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
-        __ branch(lir_cond_equal, T_INT, Lcont->label());
-      }
-      {
-        // We have determined that src->_klass->_reference_type != REF_NONE
-        // so register the value in the referent field with the pre-barrier.
-        pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
-                    value  /* pre_val */,
-                    false  /* do_load */,
-                    false  /* patch */,
-                    NULL   /* info */);
-      }
-      __ branch_destination(Lcont->label());
+  if (type == T_BOOLEAN) {
+    decorators |= C1_MASK_BOOLEAN;
     }
+  if (x->is_volatile()) {
+    decorators |= C1_MO_VOLATILE;
   }
-#endif // INCLUDE_ALL_GCS
-
-  if (x->is_volatile() && os::is_MP()) __ membar_acquire();
 
-  /* Normalize boolean value returned by unsafe operation, i.e., value  != 0 ? value = true : value false. */
-  if (type == T_BOOLEAN) {
-    LabelObj* equalZeroLabel = new LabelObj();
-    __ cmp(lir_cond_equal, value, 0);
-    __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
-    __ move(LIR_OprFact::intConst(1), value);
-    __ branch_destination(equalZeroLabel->label());
-  }
+  LIR_Opr result = access_load_at(decorators, type,
+                                  src, off.result(),
+                                  NULL, NULL);
+  set_result(x, result);
 }
 
 
 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
   BasicType type = x->basic_type();

@@ -2443,15 +2158,36 @@
   }
   off.load_item();
 
   set_no_result(x);
 
-  if (x->is_volatile() && os::is_MP()) __ membar_release();
-  put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
-  if (!support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) __ membar();
+  C1DecoratorSet decorators = C1_ACCESS_ON_HEAP | C1_ACCESS_ON_ANONYMOUS;
+  if (type == T_BOOLEAN) {
+    decorators |= C1_MASK_BOOLEAN;
+  }
+  if (x->is_volatile()) {
+    decorators |= C1_MO_VOLATILE;
+  }
+  access_store_at(decorators, type, src, off.result(), data.result(), NULL, NULL);
 }
 
+void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
+  BasicType type = x->basic_type();
+  LIRItem src(x->object(), this);
+  LIRItem off(x->offset(), this);
+  LIRItem value(x->value(), this);
+
+  C1DecoratorSet decorators = C1_ACCESS_ON_HEAP | C1_ACCESS_ON_ANONYMOUS | C1_MO_VOLATILE;
+
+  LIR_Opr result;
+  if (x->is_add()) {
+    result = access_add_at(decorators, type, src, off, value);
+  } else {
+    result = access_swap_at(decorators, type, src, off, value);
+  }
+  set_result(x, result);
+}
 
 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
   int lng = x->length();
 
   for (int i = 0; i < lng; i++) {

@@ -3764,12 +3500,11 @@
       default                   : ShouldNotReachHere(); break;
     }
   }
 }
 
-LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
-  if (x->check_boolean()) {
+LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
     LIR_Opr value_fixed = rlock_byte(T_BYTE);
     if (TwoOperandLIRForm) {
       __ move(value, value_fixed);
       __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
     } else {

@@ -3783,8 +3518,14 @@
     int diffbit = Klass::layout_helper_boolean_diffbit();
     __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
     __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
     __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
     value = value_fixed;
+  return value;
+}
+
+LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
+  if (x->check_boolean()) {
+    value = mask_boolean(array, value, null_check_info);
   }
   return value;
 }
< prev index next >