< prev index next >

src/share/vm/opto/graphKit.cpp

Print this page
rev 12906 : [mq]: gc_interface

@@ -22,11 +22,10 @@
  *
  */
 
 #include "precompiled.hpp"
 #include "compiler/compileLog.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "memory/resourceArea.hpp"

@@ -600,12 +599,11 @@
       int offset = java_lang_Throwable::get_detailMessage_offset();
       const TypePtr* adr_typ = ex_con->add_offset(offset);
 
       Node *adr = basic_plus_adr(ex_node, ex_node, offset);
       const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
-      // Conservatively release stores of object references.
-      Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, MemNode::release);
+      Node *store = access_store_at(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, C2_ACCESS_ON_HEAP);
 
       add_exception_state(make_exception_state(ex_node));
       return;
     }
   }

@@ -1515,149 +1513,113 @@
     record_for_igvn(st);
 
   return st;
 }
 
-
-void GraphKit::pre_barrier(bool do_load,
-                           Node* ctl,
+Node* GraphKit::access_store_at(Node* ctl,
                            Node* obj,
                            Node* adr,
-                           uint  adr_idx,
+                                const TypePtr* adr_type,
                            Node* val,
-                           const TypeOopPtr* val_type,
-                           Node* pre_val,
-                           BasicType bt) {
+                                const Type* val_type,
+                                BasicType bt,
+                                C2DecoratorSet decorators) {
+  // Transformation of a value which could be NULL pointer (CastPP #NULL)
+  // could be delayed during Parse (for example, in adjust_map_after_if()).
+  // Execute transformation here to avoid barrier generation in such case.
+  if (_gvn.type(val) == TypePtr::NULL_PTR) {
+    val = _gvn.makecon(TypePtr::NULL_PTR);
+  }
 
-  BarrierSet* bs = Universe::heap()->barrier_set();
   set_control(ctl);
-  switch (bs->kind()) {
-    case BarrierSet::G1SATBCTLogging:
-      g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
-      break;
-
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
-    case BarrierSet::ModRef:
-      break;
+  if (stopped()) {
+    return top(); // Dead path ?
+  }
 
-    default      :
-      ShouldNotReachHere();
+  assert(val != NULL, "not dead path");
 
-  }
+  C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
+  return code_gen->store_at(this, obj, adr, adr_type, val, val_type, bt, decorators);
 }
 
-bool GraphKit::can_move_pre_barrier() const {
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  switch (bs->kind()) {
-    case BarrierSet::G1SATBCTLogging:
-      return true; // Can move it if no safepoint
-
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
-    case BarrierSet::ModRef:
-      return true; // There is no pre-barrier
-
-    default      :
-      ShouldNotReachHere();
+Node* GraphKit::access_load_at(Node* obj,   // containing obj
+                               Node* adr,   // actual adress to store val at
+                               const TypePtr* adr_type,
+                               const Type* val_type,
+                               BasicType bt,
+                               C2DecoratorSet decorators) {
+  if (stopped()) {
+    return top(); // Dead path ?
   }
-  return false;
+
+  C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
+  return code_gen->load_at(this, obj, adr, adr_type, val_type, bt, decorators);
 }
 
-void GraphKit::post_barrier(Node* ctl,
-                            Node* store,
+Node* GraphKit::access_cas_val_at(Node* ctl,
                             Node* obj,
                             Node* adr,
-                            uint  adr_idx,
-                            Node* val,
+                                  const TypePtr* adr_type,
+                                  int alias_idx,
+                                  Node* expected_val,
+                                  Node* new_val,
+                                  const Type* value_type,
                             BasicType bt,
-                            bool use_precise) {
-  BarrierSet* bs = Universe::heap()->barrier_set();
+                                  C2DecoratorSet decorators) {
   set_control(ctl);
-  switch (bs->kind()) {
-    case BarrierSet::G1SATBCTLogging:
-      g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
-      break;
-
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
-      write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
-      break;
-
-    case BarrierSet::ModRef:
-      break;
-
-    default      :
-      ShouldNotReachHere();
-
-  }
+  C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
+  return code_gen->cas_val_at(this, obj, adr, adr_type, alias_idx, expected_val, new_val, value_type, bt, decorators);
 }
 
-Node* GraphKit::store_oop(Node* ctl,
+Node* GraphKit::access_cas_bool_at(Node* ctl,
                           Node* obj,
                           Node* adr,
                           const TypePtr* adr_type,
-                          Node* val,
-                          const TypeOopPtr* val_type,
+                                   int alias_idx,
+                                   Node* expected_val,
+                                   Node* new_val,
+                                   const Type* value_type,
                           BasicType bt,
-                          bool use_precise,
-                          MemNode::MemOrd mo,
-                          bool mismatched) {
-  // Transformation of a value which could be NULL pointer (CastPP #NULL)
-  // could be delayed during Parse (for example, in adjust_map_after_if()).
-  // Execute transformation here to avoid barrier generation in such case.
-  if (_gvn.type(val) == TypePtr::NULL_PTR)
-    val = _gvn.makecon(TypePtr::NULL_PTR);
-
+                                   C2DecoratorSet decorators) {
   set_control(ctl);
-  if (stopped()) return top(); // Dead path ?
-
-  assert(bt == T_OBJECT, "sanity");
-  assert(val != NULL, "not dead path");
-  uint adr_idx = C->get_alias_index(adr_type);
-  assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
+  C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
+  return code_gen->cas_bool_at(this, obj, adr, adr_type, alias_idx, expected_val, new_val, value_type, bt, decorators);
+}
 
-  pre_barrier(true /* do_load */,
-              control(), obj, adr, adr_idx, val, val_type,
-              NULL /* pre_val */,
-              bt);
-
-  Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
-  post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
-  return store;
+Node* GraphKit::access_swap_at(Node* ctl,
+                               Node* obj,
+                               Node* adr,
+                               const TypePtr* adr_type,
+                               int alias_idx,
+                               Node* new_val,
+                               const Type* value_type,
+                               BasicType bt,
+                               C2DecoratorSet decorators) {
+  set_control(ctl);
+  C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
+  return code_gen->swap_at(this, obj, adr, adr_type, alias_idx, new_val, value_type, bt, decorators);
 }
 
-// Could be an array or object we don't know at compile time (unsafe ref.)
-Node* GraphKit::store_oop_to_unknown(Node* ctl,
-                             Node* obj,   // containing obj
-                             Node* adr,  // actual adress to store val at
+Node* GraphKit::access_fetch_and_add_at(Node* ctl,
+                                        Node* obj,
+                                        Node* adr,
                              const TypePtr* adr_type,
-                             Node* val,
+                                        int alias_idx,
+                                        Node* new_val,
+                                        const Type* value_type,
                              BasicType bt,
-                             MemNode::MemOrd mo,
-                             bool mismatched) {
-  Compile::AliasType* at = C->alias_type(adr_type);
-  const TypeOopPtr* val_type = NULL;
-  if (adr_type->isa_instptr()) {
-    if (at->field() != NULL) {
-      // known field.  This code is a copy of the do_put_xxx logic.
-      ciField* field = at->field();
-      if (!field->type()->is_loaded()) {
-        val_type = TypeInstPtr::BOTTOM;
-      } else {
-        val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
-      }
-    }
-  } else if (adr_type->isa_aryptr()) {
-    val_type = adr_type->is_aryptr()->elem()->make_oopptr();
-  }
-  if (val_type == NULL) {
-    val_type = TypeInstPtr::BOTTOM;
-  }
-  return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched);
+                                        C2DecoratorSet decorators) {
+  set_control(ctl);
+  C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
+  return code_gen->fetch_and_add_at(this, obj, adr, adr_type, alias_idx, new_val, value_type, bt, decorators);
 }
 
+void GraphKit::access_clone(Node* ctl, Node* src, Node* dst, Node* size, bool is_array) {
+  set_control(ctl);
+  C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
+  return code_gen->clone(this, src, dst, size, is_array);
+}
 
 //-------------------------array_element_address-------------------------
 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
                                       const TypeInt* sizetype, Node* ctrl) {
   uint shift  = exact_log2(type2aelembytes(elembt));

@@ -3756,562 +3718,21 @@
   }
   // loop's limit check predicate should be near the loop.
   add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
 }
 
-//----------------------------- store barriers ----------------------------
-#define __ ideal.
-
 void GraphKit::sync_kit(IdealKit& ideal) {
-  set_all_memory(__ merged_memory());
-  set_i_o(__ i_o());
-  set_control(__ ctrl());
+  set_all_memory(ideal.merged_memory());
+  set_i_o(ideal.i_o());
+  set_control(ideal.ctrl());
 }
 
 void GraphKit::final_sync(IdealKit& ideal) {
   // Final sync IdealKit and graphKit.
   sync_kit(ideal);
 }
 
-Node* GraphKit::byte_map_base_node() {
-  // Get base of card map
-  CardTableModRefBS* ct =
-    barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
-  if (ct->byte_map_base != NULL) {
-    return makecon(TypeRawPtr::make((address)ct->byte_map_base));
-  } else {
-    return null();
-  }
-}
-
-// vanilla/CMS post barrier
-// Insert a write-barrier store.  This is to let generational GC work; we have
-// to flag all oop-stores before the next GC point.
-void GraphKit::write_barrier_post(Node* oop_store,
-                                  Node* obj,
-                                  Node* adr,
-                                  uint  adr_idx,
-                                  Node* val,
-                                  bool use_precise) {
-  // No store check needed if we're storing a NULL or an old object
-  // (latter case is probably a string constant). The concurrent
-  // mark sweep garbage collector, however, needs to have all nonNull
-  // oop updates flagged via card-marks.
-  if (val != NULL && val->is_Con()) {
-    // must be either an oop or NULL
-    const Type* t = val->bottom_type();
-    if (t == TypePtr::NULL_PTR || t == Type::TOP)
-      // stores of null never (?) need barriers
-      return;
-  }
-
-  if (use_ReduceInitialCardMarks()
-      && obj == just_allocated_object(control())) {
-    // We can skip marks on a freshly-allocated object in Eden.
-    // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
-    // That routine informs GC to take appropriate compensating steps,
-    // upon a slow-path allocation, so as to make this card-mark
-    // elision safe.
-    return;
-  }
-
-  if (!use_precise) {
-    // All card marks for a (non-array) instance are in one place:
-    adr = obj;
-  }
-  // (Else it's an array (or unknown), and we want more precise card marks.)
-  assert(adr != NULL, "");
-
-  IdealKit ideal(this, true);
-
-  // Convert the pointer to an int prior to doing math on it
-  Node* cast = __ CastPX(__ ctrl(), adr);
-
-  // Divide by card size
-  assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef),
-         "Only one we handle so far.");
-  Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
-
-  // Combine card table base and card offset
-  Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
-
-  // Get the alias_index for raw card-mark memory
-  int adr_type = Compile::AliasIdxRaw;
-  Node*   zero = __ ConI(0); // Dirty card value
-  BasicType bt = T_BYTE;
-
-  if (UseConcMarkSweepGC && UseCondCardMark) {
-    insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
-    __ sync_kit(this);
-  }
-
-  if (UseCondCardMark) {
-    // The classic GC reference write barrier is typically implemented
-    // as a store into the global card mark table.  Unfortunately
-    // unconditional stores can result in false sharing and excessive
-    // coherence traffic as well as false transactional aborts.
-    // UseCondCardMark enables MP "polite" conditional card mark
-    // stores.  In theory we could relax the load from ctrl() to
-    // no_ctrl, but that doesn't buy much latitude.
-    Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
-    __ if_then(card_val, BoolTest::ne, zero);
-  }
-
-  // Smash zero into card
-  if( !UseConcMarkSweepGC ) {
-    __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::unordered);
-  } else {
-    // Specialized path for CM store barrier
-    __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
-  }
-
-  if (UseCondCardMark) {
-    __ end_if();
-  }
-
-  // Final sync IdealKit and GraphKit.
-  final_sync(ideal);
-}
-/*
- * Determine if the G1 pre-barrier can be removed. The pre-barrier is
- * required by SATB to make sure all objects live at the start of the
- * marking are kept alive, all reference updates need to any previous
- * reference stored before writing.
- *
- * If the previous value is NULL there is no need to save the old value.
- * References that are NULL are filtered during runtime by the barrier
- * code to avoid unnecessary queuing.
- *
- * However in the case of newly allocated objects it might be possible to
- * prove that the reference about to be overwritten is NULL during compile
- * time and avoid adding the barrier code completely.
- *
- * The compiler needs to determine that the object in which a field is about
- * to be written is newly allocated, and that no prior store to the same field
- * has happened since the allocation.
- *
- * Returns true if the pre-barrier can be removed
- */
-bool GraphKit::g1_can_remove_pre_barrier(PhaseTransform* phase, Node* adr,
-                                         BasicType bt, uint adr_idx) {
-  intptr_t offset = 0;
-  Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
-  AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
-
-  if (offset == Type::OffsetBot) {
-    return false; // cannot unalias unless there are precise offsets
-  }
-
-  if (alloc == NULL) {
-    return false; // No allocation found
-  }
-
-  intptr_t size_in_bytes = type2aelembytes(bt);
-
-  Node* mem = memory(adr_idx); // start searching here...
-
-  for (int cnt = 0; cnt < 50; cnt++) {
-
-    if (mem->is_Store()) {
-
-      Node* st_adr = mem->in(MemNode::Address);
-      intptr_t st_offset = 0;
-      Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
-
-      if (st_base == NULL) {
-        break; // inscrutable pointer
-      }
-
-      // Break we have found a store with same base and offset as ours so break
-      if (st_base == base && st_offset == offset) {
-        break;
-      }
-
-      if (st_offset != offset && st_offset != Type::OffsetBot) {
-        const int MAX_STORE = BytesPerLong;
-        if (st_offset >= offset + size_in_bytes ||
-            st_offset <= offset - MAX_STORE ||
-            st_offset <= offset - mem->as_Store()->memory_size()) {
-          // Success:  The offsets are provably independent.
-          // (You may ask, why not just test st_offset != offset and be done?
-          // The answer is that stores of different sizes can co-exist
-          // in the same sequence of RawMem effects.  We sometimes initialize
-          // a whole 'tile' of array elements with a single jint or jlong.)
-          mem = mem->in(MemNode::Memory);
-          continue; // advance through independent store memory
-        }
-      }
-
-      if (st_base != base
-          && MemNode::detect_ptr_independence(base, alloc, st_base,
-                                              AllocateNode::Ideal_allocation(st_base, phase),
-                                              phase)) {
-        // Success:  The bases are provably independent.
-        mem = mem->in(MemNode::Memory);
-        continue; // advance through independent store memory
-      }
-    } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
-
-      InitializeNode* st_init = mem->in(0)->as_Initialize();
-      AllocateNode* st_alloc = st_init->allocation();
-
-      // Make sure that we are looking at the same allocation site.
-      // The alloc variable is guaranteed to not be null here from earlier check.
-      if (alloc == st_alloc) {
-        // Check that the initialization is storing NULL so that no previous store
-        // has been moved up and directly write a reference
-        Node* captured_store = st_init->find_captured_store(offset,
-                                                            type2aelembytes(T_OBJECT),
-                                                            phase);
-        if (captured_store == NULL || captured_store == st_init->zero_memory()) {
-          return true;
-        }
-      }
-    }
-
-    // Unless there is an explicit 'continue', we must bail out here,
-    // because 'mem' is an inscrutable memory state (e.g., a call).
-    break;
-  }
-
-  return false;
-}
-
-// G1 pre/post barriers
-void GraphKit::g1_write_barrier_pre(bool do_load,
-                                    Node* obj,
-                                    Node* adr,
-                                    uint alias_idx,
-                                    Node* val,
-                                    const TypeOopPtr* val_type,
-                                    Node* pre_val,
-                                    BasicType bt) {
-
-  // Some sanity checks
-  // Note: val is unused in this routine.
-
-  if (do_load) {
-    // We need to generate the load of the previous value
-    assert(obj != NULL, "must have a base");
-    assert(adr != NULL, "where are loading from?");
-    assert(pre_val == NULL, "loaded already?");
-    assert(val_type != NULL, "need a type");
-
-    if (use_ReduceInitialCardMarks()
-        && g1_can_remove_pre_barrier(&_gvn, adr, bt, alias_idx)) {
-      return;
-    }
-
-  } else {
-    // In this case both val_type and alias_idx are unused.
-    assert(pre_val != NULL, "must be loaded already");
-    // Nothing to be done if pre_val is null.
-    if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
-    assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
-  }
-  assert(bt == T_OBJECT, "or we shouldn't be here");
-
-  IdealKit ideal(this, true);
-
-  Node* tls = __ thread(); // ThreadLocalStorage
-
-  Node* no_ctrl = NULL;
-  Node* no_base = __ top();
-  Node* zero  = __ ConI(0);
-  Node* zeroX = __ ConX(0);
-
-  float likely  = PROB_LIKELY(0.999);
-  float unlikely  = PROB_UNLIKELY(0.999);
-
-  BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
-  assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width");
-
-  // Offsets into the thread
-  const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +  // 648
-                                          SATBMarkQueue::byte_offset_of_active());
-  const int index_offset   = in_bytes(JavaThread::satb_mark_queue_offset() +  // 656
-                                          SATBMarkQueue::byte_offset_of_index());
-  const int buffer_offset  = in_bytes(JavaThread::satb_mark_queue_offset() +  // 652
-                                          SATBMarkQueue::byte_offset_of_buf());
-
-  // Now the actual pointers into the thread
-  Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
-  Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
-  Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
-
-  // Now some of the values
-  Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
-
-  // if (!marking)
-  __ if_then(marking, BoolTest::ne, zero, unlikely); {
-    BasicType index_bt = TypeX_X->basic_type();
-    assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
-    Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
-
-    if (do_load) {
-      // load original value
-      // alias_idx correct??
-      pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
-    }
-
-    // if (pre_val != NULL)
-    __ if_then(pre_val, BoolTest::ne, null()); {
-      Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
-
-      // is the queue for this thread full?
-      __ if_then(index, BoolTest::ne, zeroX, likely); {
-
-        // decrement the index
-        Node* next_index = _gvn.transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
-
-        // Now get the buffer location we will log the previous value into and store it
-        Node *log_addr = __ AddP(no_base, buffer, next_index);
-        __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
-        // update the index
-        __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
-
-      } __ else_(); {
-
-        // logging buffer is full, call the runtime
-        const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
-        __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
-      } __ end_if();  // (!index)
-    } __ end_if();  // (pre_val != NULL)
-  } __ end_if();  // (!marking)
-
-  // Final sync IdealKit and GraphKit.
-  final_sync(ideal);
-}
-
-/*
- * G1 similar to any GC with a Young Generation requires a way to keep track of
- * references from Old Generation to Young Generation to make sure all live
- * objects are found. G1 also requires to keep track of object references
- * between different regions to enable evacuation of old regions, which is done
- * as part of mixed collections. References are tracked in remembered sets and
- * is continuously updated as reference are written to with the help of the
- * post-barrier.
- *
- * To reduce the number of updates to the remembered set the post-barrier
- * filters updates to fields in objects located in the Young Generation,
- * the same region as the reference, when the NULL is being written or
- * if the card is already marked as dirty by an earlier write.
- *
- * Under certain circumstances it is possible to avoid generating the
- * post-barrier completely if it is possible during compile time to prove
- * the object is newly allocated and that no safepoint exists between the
- * allocation and the store.
- *
- * In the case of slow allocation the allocation code must handle the barrier
- * as part of the allocation in the case the allocated object is not located
- * in the nursery, this would happen for humongous objects. This is similar to
- * how CMS is required to handle this case, see the comments for the method
- * CollectedHeap::new_store_pre_barrier and OptoRuntime::new_store_pre_barrier.
- * A deferred card mark is required for these objects and handled in the above
- * mentioned methods.
- *
- * Returns true if the post barrier can be removed
- */
-bool GraphKit::g1_can_remove_post_barrier(PhaseTransform* phase, Node* store,
-                                          Node* adr) {
-  intptr_t      offset = 0;
-  Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
-  AllocateNode* alloc  = AllocateNode::Ideal_allocation(base, phase);
-
-  if (offset == Type::OffsetBot) {
-    return false; // cannot unalias unless there are precise offsets
-  }
-
-  if (alloc == NULL) {
-     return false; // No allocation found
-  }
-
-  // Start search from Store node
-  Node* mem = store->in(MemNode::Control);
-  if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
-
-    InitializeNode* st_init = mem->in(0)->as_Initialize();
-    AllocateNode*  st_alloc = st_init->allocation();
-
-    // Make sure we are looking at the same allocation
-    if (alloc == st_alloc) {
-      return true;
-    }
-  }
-
-  return false;
-}
-
-//
-// Update the card table and add card address to the queue
-//
-void GraphKit::g1_mark_card(IdealKit& ideal,
-                            Node* card_adr,
-                            Node* oop_store,
-                            uint oop_alias_idx,
-                            Node* index,
-                            Node* index_adr,
-                            Node* buffer,
-                            const TypeFunc* tf) {
-
-  Node* zero  = __ ConI(0);
-  Node* zeroX = __ ConX(0);
-  Node* no_base = __ top();
-  BasicType card_bt = T_BYTE;
-  // Smash zero into card. MUST BE ORDERED WRT TO STORE
-  __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
-
-  //  Now do the queue work
-  __ if_then(index, BoolTest::ne, zeroX); {
-
-    Node* next_index = _gvn.transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
-    Node* log_addr = __ AddP(no_base, buffer, next_index);
-
-    // Order, see storeCM.
-    __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
-    __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
-
-  } __ else_(); {
-    __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
-  } __ end_if();
-
-}
-
-void GraphKit::g1_write_barrier_post(Node* oop_store,
-                                     Node* obj,
-                                     Node* adr,
-                                     uint alias_idx,
-                                     Node* val,
-                                     BasicType bt,
-                                     bool use_precise) {
-  // If we are writing a NULL then we need no post barrier
-
-  if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
-    // Must be NULL
-    const Type* t = val->bottom_type();
-    assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
-    // No post barrier if writing NULLx
-    return;
-  }
-
-  if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) {
-    // We can skip marks on a freshly-allocated object in Eden.
-    // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
-    // That routine informs GC to take appropriate compensating steps,
-    // upon a slow-path allocation, so as to make this card-mark
-    // elision safe.
-    return;
-  }
-
-  if (use_ReduceInitialCardMarks()
-      && g1_can_remove_post_barrier(&_gvn, oop_store, adr)) {
-    return;
-  }
-
-  if (!use_precise) {
-    // All card marks for a (non-array) instance are in one place:
-    adr = obj;
-  }
-  // (Else it's an array (or unknown), and we want more precise card marks.)
-  assert(adr != NULL, "");
-
-  IdealKit ideal(this, true);
-
-  Node* tls = __ thread(); // ThreadLocalStorage
-
-  Node* no_base = __ top();
-  float likely  = PROB_LIKELY(0.999);
-  float unlikely  = PROB_UNLIKELY(0.999);
-  Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
-  Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
-  Node* zeroX = __ ConX(0);
-
-  // Get the alias_index for raw card-mark memory
-  const TypePtr* card_type = TypeRawPtr::BOTTOM;
-
-  const TypeFunc *tf = OptoRuntime::g1_wb_post_Type();
-
-  // Offsets into the thread
-  const int index_offset  = in_bytes(JavaThread::dirty_card_queue_offset() +
-                                     DirtyCardQueue::byte_offset_of_index());
-  const int buffer_offset = in_bytes(JavaThread::dirty_card_queue_offset() +
-                                     DirtyCardQueue::byte_offset_of_buf());
-
-  // Pointers into the thread
-
-  Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
-  Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));
-
-  // Now some values
-  // Use ctrl to avoid hoisting these values past a safepoint, which could
-  // potentially reset these fields in the JavaThread.
-  Node* index  = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
-  Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
-
-  // Convert the store obj pointer to an int prior to doing math on it
-  // Must use ctrl to prevent "integerized oop" existing across safepoint
-  Node* cast =  __ CastPX(__ ctrl(), adr);
-
-  // Divide pointer by card size
-  Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
-
-  // Combine card table base and card offset
-  Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
-
-  // If we know the value being stored does it cross regions?
-
-  if (val != NULL) {
-    // Does the store cause us to cross regions?
-
-    // Should be able to do an unsigned compare of region_size instead of
-    // and extra shift. Do we have an unsigned compare??
-    // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
-    Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
-
-    // if (xor_res == 0) same region so skip
-    __ if_then(xor_res, BoolTest::ne, zeroX); {
-
-      // No barrier if we are storing a NULL
-      __ if_then(val, BoolTest::ne, null(), unlikely); {
-
-        // Ok must mark the card if not already dirty
-
-        // load the original value of the card
-        Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
-
-        __ if_then(card_val, BoolTest::ne, young_card); {
-          sync_kit(ideal);
-          // Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier.
-          insert_mem_bar(Op_MemBarVolatile, oop_store);
-          __ sync_kit(this);
-
-          Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
-          __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
-            g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
-          } __ end_if();
-        } __ end_if();
-      } __ end_if();
-    } __ end_if();
-  } else {
-    // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.
-    // We don't need a barrier here if the destination is a newly allocated object
-    // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden
-    // are set to 'g1_young_gen' (see G1SATBCardTableModRefBS::verify_g1_young_region()).
-    assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
-    Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
-    __ if_then(card_val, BoolTest::ne, young_card); {
-      g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
-    } __ end_if();
-  }
-
-  // Final sync IdealKit and GraphKit.
-  final_sync(ideal);
-}
-#undef __
-
-
 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
   Node* len = load_array_length(load_String_value(ctrl, str));
   Node* coder = load_String_coder(ctrl, str);
   // Divide length by 2 if coder is UTF16
   return _gvn.transform(new RShiftINode(len, coder));

@@ -4323,13 +3744,12 @@
                                                      false, NULL, 0);
   const TypePtr* value_field_type = string_type->add_offset(value_offset);
   const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
                                                   TypeAry::make(TypeInt::BYTE, TypeInt::POS),
                                                   ciTypeArrayKlass::make(T_BYTE), true, 0);
-  int value_field_idx = C->get_alias_index(value_field_type);
-  Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
-                         value_type, T_OBJECT, value_field_idx, MemNode::unordered);
+  Node* p = basic_plus_adr(str, str, value_offset);
+  Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT, C2_ACCESS_ON_HEAP);
   // String.value field is known to be @Stable.
   if (UseImplicitStableValues) {
     load = cast_array_to_stable(load, value_type);
   }
   return load;

@@ -4351,12 +3771,12 @@
 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
   int value_offset = java_lang_String::value_offset_in_bytes();
   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                      false, NULL, 0);
   const TypePtr* value_field_type = string_type->add_offset(value_offset);
-  store_oop_to_object(ctrl, str,  basic_plus_adr(str, value_offset), value_field_type,
-      value, TypeAryPtr::BYTES, T_OBJECT, MemNode::unordered);
+  access_store_at(ctrl, str,  basic_plus_adr(str, value_offset), value_field_type,
+                  value, TypeAryPtr::BYTES, T_OBJECT, C2_ACCESS_ON_HEAP);
 }
 
 void GraphKit::store_String_coder(Node* ctrl, Node* str, Node* value) {
   int coder_offset = java_lang_String::coder_offset_in_bytes();
   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
< prev index next >