1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "opto/arraycopynode.hpp"
  27 #include "opto/graphKit.hpp"
  28 #include "opto/idealKit.hpp"
  29 #include "opto/macro.hpp"
  30 #include "opto/type.hpp"
  31 #include "gc/g1/c2G1BSCodeGen.hpp"
  32 #include "gc/g1/g1BarrierSet.hpp"
  33 #include "gc/g1/g1CardTable.hpp"
  34 #include "gc/g1/heapRegion.hpp"
  35 #include "utilities/macros.hpp"
  36 
  37 const TypeFunc *C2G1BSCodeGen::g1_wb_pre_Type() {
  38   const Type **fields = TypeTuple::fields(2);
  39   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
  40   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
  41   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
  42 
  43   // create result type (range)
  44   fields = TypeTuple::fields(0);
  45   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
  46 
  47   return TypeFunc::make(domain, range);
  48 }
  49 
  50 const TypeFunc *C2G1BSCodeGen::g1_wb_post_Type() {
  51   const Type **fields = TypeTuple::fields(2);
  52   fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL;  // Card addr
  53   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL;  // thread
  54   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
  55 
  56   // create result type (range)
  57   fields = TypeTuple::fields(0);
  58   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
  59 
  60   return TypeFunc::make(domain, range);
  61 }
  62 
  63 #define __ ideal.
  64 /*
  65  * Determine if the G1 pre-barrier can be removed. The pre-barrier is
  66  * required by SATB to make sure all objects live at the start of the
  67  * marking are kept alive, all reference updates need to any previous
  68  * reference stored before writing.
  69  *
  70  * If the previous value is NULL there is no need to save the old value.
  71  * References that are NULL are filtered during runtime by the barrier
  72  * code to avoid unnecessary queuing.
  73  *
  74  * However in the case of newly allocated objects it might be possible to
  75  * prove that the reference about to be overwritten is NULL during compile
  76  * time and avoid adding the barrier code completely.
  77  *
  78  * The compiler needs to determine that the object in which a field is about
  79  * to be written is newly allocated, and that no prior store to the same field
  80  * has happened since the allocation.
  81  *
  82  * Returns true if the pre-barrier can be removed
  83  */
  84 bool C2G1BSCodeGen::g1_can_remove_pre_barrier(GraphKit* kit,
  85                                               PhaseTransform* phase,
  86                                               Node* adr,
  87                                               BasicType bt,
  88                                               uint adr_idx) {
  89   intptr_t offset = 0;
  90   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
  91   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
  92 
  93   if (offset == Type::OffsetBot) {
  94     return false; // cannot unalias unless there are precise offsets
  95   }
  96 
  97   if (alloc == NULL) {
  98     return false; // No allocation found
  99   }
 100 
 101   intptr_t size_in_bytes = type2aelembytes(bt);
 102 
 103   Node* mem = kit->memory(adr_idx); // start searching here...
 104 
 105   for (int cnt = 0; cnt < 50; cnt++) {
 106 
 107     if (mem->is_Store()) {
 108 
 109       Node* st_adr = mem->in(MemNode::Address);
 110       intptr_t st_offset = 0;
 111       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
 112 
 113       if (st_base == NULL) {
 114         break; // inscrutable pointer
 115       }
 116 
 117       // Break we have found a store with same base and offset as ours so break
 118       if (st_base == base && st_offset == offset) {
 119         break;
 120       }
 121 
 122       if (st_offset != offset && st_offset != Type::OffsetBot) {
 123         const int MAX_STORE = BytesPerLong;
 124         if (st_offset >= offset + size_in_bytes ||
 125             st_offset <= offset - MAX_STORE ||
 126             st_offset <= offset - mem->as_Store()->memory_size()) {
 127           // Success:  The offsets are provably independent.
 128           // (You may ask, why not just test st_offset != offset and be done?
 129           // The answer is that stores of different sizes can co-exist
 130           // in the same sequence of RawMem effects.  We sometimes initialize
 131           // a whole 'tile' of array elements with a single jint or jlong.)
 132           mem = mem->in(MemNode::Memory);
 133           continue; // advance through independent store memory
 134         }
 135       }
 136 
 137       if (st_base != base
 138           && MemNode::detect_ptr_independence(base, alloc, st_base,
 139                                               AllocateNode::Ideal_allocation(st_base, phase),
 140                                               phase)) {
 141         // Success:  The bases are provably independent.
 142         mem = mem->in(MemNode::Memory);
 143         continue; // advance through independent store memory
 144       }
 145     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 146 
 147       InitializeNode* st_init = mem->in(0)->as_Initialize();
 148       AllocateNode* st_alloc = st_init->allocation();
 149 
 150       // Make sure that we are looking at the same allocation site.
 151       // The alloc variable is guaranteed to not be null here from earlier check.
 152       if (alloc == st_alloc) {
 153         // Check that the initialization is storing NULL so that no previous store
 154         // has been moved up and directly write a reference
 155         Node* captured_store = st_init->find_captured_store(offset,
 156                                                             type2aelembytes(T_OBJECT),
 157                                                             phase);
 158         if (captured_store == NULL || captured_store == st_init->zero_memory()) {
 159           return true;
 160         }
 161       }
 162     }
 163 
 164     // Unless there is an explicit 'continue', we must bail out here,
 165     // because 'mem' is an inscrutable memory state (e.g., a call).
 166     break;
 167   }
 168 
 169   return false;
 170 }
 171 
 172 // G1 pre/post barriers
 173 void C2G1BSCodeGen::pre_barrier(GraphKit* kit,
 174                                 bool do_load,
 175                                 Node* ctl,
 176                                 Node* obj,
 177                                 Node* adr,
 178                                 uint alias_idx,
 179                                 Node* val,
 180                                 const TypeOopPtr* val_type,
 181                                 Node* pre_val,
 182                                 BasicType bt) {
 183   // Some sanity checks
 184   // Note: val is unused in this routine.
 185 
 186   if (do_load) {
 187     // We need to generate the load of the previous value
 188     assert(obj != NULL, "must have a base");
 189     assert(adr != NULL, "where are loading from?");
 190     assert(pre_val == NULL, "loaded already?");
 191     assert(val_type != NULL, "need a type");
 192 
 193     if (use_ReduceInitialCardMarks()
 194         && g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
 195       return;
 196     }
 197 
 198   } else {
 199     // In this case both val_type and alias_idx are unused.
 200     assert(pre_val != NULL, "must be loaded already");
 201     // Nothing to be done if pre_val is null.
 202     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
 203     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
 204   }
 205   assert(bt == T_OBJECT, "or we shouldn't be here");
 206 
 207   IdealKit ideal(kit, true);
 208 
 209   Node* tls = __ thread(); // ThreadLocalStorage
 210 
 211   Node* no_ctrl = NULL;
 212   Node* no_base = __ top();
 213   Node* zero  = __ ConI(0);
 214   Node* zeroX = __ ConX(0);
 215 
 216   float likely  = PROB_LIKELY(0.999);
 217   float unlikely  = PROB_UNLIKELY(0.999);
 218 
 219   BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
 220   assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width");
 221 
 222   // Offsets into the thread
 223   const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +  // 648
 224                                           SATBMarkQueue::byte_offset_of_active());
 225   const int index_offset   = in_bytes(JavaThread::satb_mark_queue_offset() +  // 656
 226                                           SATBMarkQueue::byte_offset_of_index());
 227   const int buffer_offset  = in_bytes(JavaThread::satb_mark_queue_offset() +  // 652
 228                                           SATBMarkQueue::byte_offset_of_buf());
 229 
 230   // Now the actual pointers into the thread
 231   Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
 232   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
 233   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
 234 
 235   // Now some of the values
 236   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
 237 
 238   // if (!marking)
 239   __ if_then(marking, BoolTest::ne, zero, unlikely); {
 240     BasicType index_bt = TypeX_X->basic_type();
 241     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
 242     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
 243 
 244     if (do_load) {
 245       // load original value
 246       // alias_idx correct??
 247       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
 248     }
 249 
 250     // if (pre_val != NULL)
 251     __ if_then(pre_val, BoolTest::ne, kit->null()); {
 252       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 253 
 254       // is the queue for this thread full?
 255       __ if_then(index, BoolTest::ne, zeroX, likely); {
 256 
 257         // decrement the index
 258         Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 259 
 260         // Now get the buffer location we will log the previous value into and store it
 261         Node *log_addr = __ AddP(no_base, buffer, next_index);
 262         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
 263         // update the index
 264         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
 265 
 266       } __ else_(); {
 267 
 268         // logging buffer is full, call the runtime
 269         const TypeFunc *tf = g1_wb_pre_Type();
 270         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSet::g1_wb_pre), "g1_wb_pre", pre_val, tls);
 271       } __ end_if();  // (!index)
 272     } __ end_if();  // (pre_val != NULL)
 273   } __ end_if();  // (!marking)
 274 
 275   // Final sync IdealKit and GraphKit.
 276   kit->final_sync(ideal);
 277 }
 278 
 279 /*
 280  * G1 similar to any GC with a Young Generation requires a way to keep track of
 281  * references from Old Generation to Young Generation to make sure all live
 282  * objects are found. G1 also requires to keep track of object references
 283  * between different regions to enable evacuation of old regions, which is done
 284  * as part of mixed collections. References are tracked in remembered sets and
 285  * is continuously updated as reference are written to with the help of the
 286  * post-barrier.
 287  *
 288  * To reduce the number of updates to the remembered set the post-barrier
 289  * filters updates to fields in objects located in the Young Generation,
 290  * the same region as the reference, when the NULL is being written or
 291  * if the card is already marked as dirty by an earlier write.
 292  *
 293  * Under certain circumstances it is possible to avoid generating the
 294  * post-barrier completely if it is possible during compile time to prove
 295  * the object is newly allocated and that no safepoint exists between the
 296  * allocation and the store.
 297  *
 298  * In the case of slow allocation the allocation code must handle the barrier
 299  * as part of the allocation in the case the allocated object is not located
 300  * in the nursery, this would happen for humongous objects. This is similar to
 301  * how CMS is required to handle this case, see the comments for the method
 302  * CollectedHeap::new_deferred_store_barrier and OptoRuntime::new_deferred_store_barrier.
 303  * A deferred card mark is required for these objects and handled in the above
 304  * mentioned methods.
 305  *
 306  * Returns true if the post barrier can be removed
 307  */
 308 bool C2G1BSCodeGen::g1_can_remove_post_barrier(GraphKit* kit,
 309                                                PhaseTransform* phase, Node* store,
 310                                                Node* adr) {
 311   intptr_t      offset = 0;
 312   Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 313   AllocateNode* alloc  = AllocateNode::Ideal_allocation(base, phase);
 314 
 315   if (offset == Type::OffsetBot) {
 316     return false; // cannot unalias unless there are precise offsets
 317   }
 318 
 319   if (alloc == NULL) {
 320      return false; // No allocation found
 321   }
 322 
 323   // Start search from Store node
 324   Node* mem = store->in(MemNode::Control);
 325   if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 326 
 327     InitializeNode* st_init = mem->in(0)->as_Initialize();
 328     AllocateNode*  st_alloc = st_init->allocation();
 329 
 330     // Make sure we are looking at the same allocation
 331     if (alloc == st_alloc) {
 332       return true;
 333     }
 334   }
 335 
 336   return false;
 337 }
 338 
 339 //
 340 // Update the card table and add card address to the queue
 341 //
 342 void C2G1BSCodeGen::g1_mark_card(GraphKit* kit,
 343                                  IdealKit& ideal,
 344                                  Node* card_adr,
 345                                  Node* oop_store,
 346                                  uint oop_alias_idx,
 347                                  Node* index,
 348                                  Node* index_adr,
 349                                  Node* buffer,
 350                                  const TypeFunc* tf) {
 351   Node* zero  = __ ConI(0);
 352   Node* zeroX = __ ConX(0);
 353   Node* no_base = __ top();
 354   BasicType card_bt = T_BYTE;
 355   // Smash zero into card. MUST BE ORDERED WRT TO STORE
 356   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
 357 
 358   //  Now do the queue work
 359   __ if_then(index, BoolTest::ne, zeroX); {
 360 
 361     Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 362     Node* log_addr = __ AddP(no_base, buffer, next_index);
 363 
 364     // Order, see storeCM.
 365     __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
 366     __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
 367 
 368   } __ else_(); {
 369     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSet::g1_wb_post), "g1_wb_post", card_adr, __ thread());
 370   } __ end_if();
 371 
 372 }
 373 
 374 void C2G1BSCodeGen::post_barrier(GraphKit* kit,
 375                                  Node* ctl,
 376                                  Node* oop_store,
 377                                  Node* obj,
 378                                  Node* adr,
 379                                  uint alias_idx,
 380                                  Node* val,
 381                                  BasicType bt,
 382                                  bool use_precise) {
 383   // If we are writing a NULL then we need no post barrier
 384 
 385   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
 386     // Must be NULL
 387     const Type* t = val->bottom_type();
 388     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
 389     // No post barrier if writing NULLx
 390     return;
 391   }
 392 
 393   if (use_ReduceInitialCardMarks() && obj == kit->just_allocated_object(kit->control())) {
 394     // We can skip marks on a freshly-allocated object in Eden.
 395     // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
 396     // That routine informs GC to take appropriate compensating steps,
 397     // upon a slow-path allocation, so as to make this card-mark
 398     // elision safe.
 399     return;
 400   }
 401 
 402   if (use_ReduceInitialCardMarks()
 403       && g1_can_remove_post_barrier(kit, &kit->gvn(), oop_store, adr)) {
 404     return;
 405   }
 406 
 407   if (!use_precise) {
 408     // All card marks for a (non-array) instance are in one place:
 409     adr = obj;
 410   }
 411   // (Else it's an array (or unknown), and we want more precise card marks.)
 412   assert(adr != NULL, "");
 413 
 414   IdealKit ideal(kit, true);
 415 
 416   Node* tls = __ thread(); // ThreadLocalStorage
 417 
 418   Node* no_base = __ top();
 419   float unlikely  = PROB_UNLIKELY(0.999);
 420   Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val());
 421   Node* dirty_card = __ ConI((jint)G1CardTable::dirty_card_val());
 422   Node* zeroX = __ ConX(0);
 423 
 424   const TypeFunc *tf = g1_wb_post_Type();
 425 
 426   // Offsets into the thread
 427   const int index_offset  = in_bytes(JavaThread::dirty_card_queue_offset() +
 428                                      DirtyCardQueue::byte_offset_of_index());
 429   const int buffer_offset = in_bytes(JavaThread::dirty_card_queue_offset() +
 430                                      DirtyCardQueue::byte_offset_of_buf());
 431 
 432   // Pointers into the thread
 433 
 434   Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
 435   Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));
 436 
 437   // Now some values
 438   // Use ctrl to avoid hoisting these values past a safepoint, which could
 439   // potentially reset these fields in the JavaThread.
 440   Node* index  = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
 441   Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 442 
 443   // Convert the store obj pointer to an int prior to doing math on it
 444   // Must use ctrl to prevent "integerized oop" existing across safepoint
 445   Node* cast =  __ CastPX(__ ctrl(), adr);
 446 
 447   // Divide pointer by card size
 448   Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
 449 
 450   // Combine card table base and card offset
 451   Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset );
 452 
 453   // If we know the value being stored does it cross regions?
 454 
 455   if (val != NULL) {
 456     // Does the store cause us to cross regions?
 457 
 458     // Should be able to do an unsigned compare of region_size instead of
 459     // and extra shift. Do we have an unsigned compare??
 460     // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
 461     Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
 462 
 463     // if (xor_res == 0) same region so skip
 464     __ if_then(xor_res, BoolTest::ne, zeroX); {
 465 
 466       // No barrier if we are storing a NULL
 467       __ if_then(val, BoolTest::ne, kit->null(), unlikely); {
 468 
 469         // Ok must mark the card if not already dirty
 470 
 471         // load the original value of the card
 472         Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 473 
 474         __ if_then(card_val, BoolTest::ne, young_card); {
 475           kit->sync_kit(ideal);
 476           kit->insert_mem_bar(Op_MemBarVolatile, oop_store);
 477           __ sync_kit(kit);
 478 
 479           Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 480           __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
 481             g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
 482           } __ end_if();
 483         } __ end_if();
 484       } __ end_if();
 485     } __ end_if();
 486   } else {
 487     // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.
 488     // We don't need a barrier here if the destination is a newly allocated object
 489     // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden
 490     // are set to 'g1_young_gen' (see G1CardTable::verify_g1_young_region()).
 491     assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
 492     Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 493     __ if_then(card_val, BoolTest::ne, young_card); {
 494       g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
 495     } __ end_if();
 496   }
 497 
 498   // Final sync IdealKit and GraphKit.
 499   kit->final_sync(ideal);
 500 }
 501 
 502 // Helper that guards and inserts a pre-barrier.
 503 void C2G1BSCodeGen::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
 504                                        Node* pre_val, bool need_mem_bar) {
 505   // We could be accessing the referent field of a reference object. If so, when G1
 506   // is enabled, we need to log the value in the referent field in an SATB buffer.
 507   // This routine performs some compile time filters and generates suitable
 508   // runtime filters that guard the pre-barrier code.
 509   // Also add memory barrier for non volatile load from the referent field
 510   // to prevent commoning of loads across safepoint.
 511 
 512   // Some compile time checks.
 513 
 514   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
 515   const TypeX* otype = offset->find_intptr_t_type();
 516   if (otype != NULL && otype->is_con() &&
 517       otype->get_con() != java_lang_ref_Reference::referent_offset) {
 518     // Constant offset but not the reference_offset so just return
 519     return;
 520   }
 521 
 522   // We only need to generate the runtime guards for instances.
 523   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
 524   if (btype != NULL) {
 525     if (btype->isa_aryptr()) {
 526       // Array type so nothing to do
 527       return;
 528     }
 529 
 530     const TypeInstPtr* itype = btype->isa_instptr();
 531     if (itype != NULL) {
 532       // Can the klass of base_oop be statically determined to be
 533       // _not_ a sub-class of Reference and _not_ Object?
 534       ciKlass* klass = itype->klass();
 535       if ( klass->is_loaded() &&
 536           !klass->is_subtype_of(kit->env()->Reference_klass()) &&
 537           !kit->env()->Object_klass()->is_subtype_of(klass)) {
 538         return;
 539       }
 540     }
 541   }
 542 
 543   // The compile time filters did not reject base_oop/offset so
 544   // we need to generate the following runtime filters
 545   //
 546   // if (offset == java_lang_ref_Reference::_reference_offset) {
 547   //   if (instance_of(base, java.lang.ref.Reference)) {
 548   //     pre_barrier(_, pre_val, ...);
 549   //   }
 550   // }
 551 
 552   float likely   = PROB_LIKELY(  0.999);
 553   float unlikely = PROB_UNLIKELY(0.999);
 554 
 555   IdealKit ideal(kit);
 556 
 557   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
 558 
 559   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
 560       // Update graphKit memory and control from IdealKit.
 561       kit->sync_kit(ideal);
 562 
 563       Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
 564       Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
 565 
 566       // Update IdealKit memory and control from graphKit.
 567       __ sync_kit(kit);
 568 
 569       Node* one = __ ConI(1);
 570       // is_instof == 0 if base_oop == NULL
 571       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
 572 
 573         // Update graphKit from IdeakKit.
 574         kit->sync_kit(ideal);
 575 
 576         // Use the pre-barrier to record the value in the referent field
 577         pre_barrier(kit, false /* do_load */,
 578                     __ ctrl(),
 579                     NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 580                     pre_val /* pre_val */,
 581                     T_OBJECT);
 582         if (need_mem_bar) {
 583           // Add memory barrier to prevent commoning reads from this field
 584           // across safepoint since GC can change its value.
 585           kit->insert_mem_bar(Op_MemBarCPUOrder);
 586         }
 587         // Update IdealKit from graphKit.
 588         __ sync_kit(kit);
 589 
 590       } __ end_if(); // _ref_type != ref_none
 591   } __ end_if(); // offset == referent_offset
 592 
 593   // Final sync IdealKit and GraphKit.
 594   kit->final_sync(ideal);
 595 }
 596 
 597 #undef __
 598 
 599 Node* C2G1BSCodeGen::load_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, const Type* val_type, BasicType bt, C2DecoratorSet decorators) {
 600   bool is_obj = bt == T_OBJECT || bt == T_ARRAY;
 601   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 602   bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0;
 603   bool on_heap = (decorators & C2_ACCESS_ON_HEAP) != 0;
 604   bool on_weak = (decorators & C2_ACCESS_ON_WEAK) != 0;
 605   bool is_relaxed = (decorators & C2_MO_RELAXED) != 0;
 606   bool need_cpu_mem_bar = !is_relaxed || mismatched || !on_heap;
 607 
 608   Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : kit->top();
 609   Node* load = C2CardTableModRefBSCodeGen::load_at_resolved(kit, obj, adr, adr_type, val_type, bt, decorators);
 610 
 611   // If we are reading the value of the referent field of a Reference
 612   // object (either by using Unsafe directly or through reflection)
 613   // then, if G1 is enabled, we need to record the referent in an
 614   // SATB log buffer using the pre-barrier mechanism.
 615   // Also we need to add memory barrier to prevent commoning reads
 616   // from this field across safepoint since GC can change its value.
 617   bool need_read_barrier = on_heap && (on_weak || (anonymous && offset != kit->top() && obj != kit->top()));
 618 
 619   if (!is_obj || !need_read_barrier) {
 620     return load;
 621   }
 622 
 623   if (on_weak) {
 624     // Use the pre-barrier to record the value in the referent field
 625     pre_barrier(kit, false /* do_load */,
 626                 kit->control(),
 627                 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 628                 load /* pre_val */,
 629                 T_OBJECT);
 630     // Add memory barrier to prevent commoning reads from this field
 631     // across safepoint since GC can change its value.
 632     kit->insert_mem_bar(Op_MemBarCPUOrder);
 633   } else if (anonymous) {
 634     // We do not require a mem bar inside pre_barrier if need_mem_bar
 635     // is set: the barriers would be emitted by us.
 636     insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
 637   }
 638 
 639   return load;
 640 }
 641 
 642 bool C2G1BSCodeGen::is_gc_barrier_node(Node* node) {
 643   if (C2CardTableModRefBSCodeGen::is_gc_barrier_node(node)) {
 644     return true;
 645   }
 646   if (node->Opcode() != Op_CallLeaf) {
 647     return false;
 648   }
 649   CallLeafNode *call = node->as_CallLeaf();
 650   if (call->_name == NULL) {
 651     return false;
 652   }
 653 
 654   return strcmp(call->_name, "g1_wb_pre") == 0 || strcmp(call->_name, "g1_wb_post") == 0;
 655 }
 656 
 657 void C2G1BSCodeGen::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) {
 658   assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
 659     assert(node->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
 660     // It could be only one user, URShift node, in Object.clone() intrinsic
 661     // but the new allocation is passed to arraycopy stub and it could not
 662     // be scalar replaced. So we don't check the case.
 663 
 664     // An other case of only one user (Xor) is when the value check for NULL
 665     // in G1 post barrier is folded after CCP so the code which used URShift
 666     // is removed.
 667 
 668     // Take Region node before eliminating post barrier since it also
 669     // eliminates CastP2X node when it has only one user.
 670     Node* this_region = node->in(0);
 671     assert(this_region != NULL, "");
 672 
 673     // Remove G1 post barrier.
 674 
 675     // Search for CastP2X->Xor->URShift->Cmp path which
 676     // checks if the store done to a different from the value's region.
 677     // And replace Cmp with #0 (false) to collapse G1 post barrier.
 678     Node* xorx = node->find_out_with(Op_XorX);
 679     if (xorx != NULL) {
 680       Node* shift = xorx->unique_out();
 681       Node* cmpx = shift->unique_out();
 682       assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
 683       cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
 684       "missing region check in G1 post barrier");
 685       macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
 686 
 687       // Remove G1 pre barrier.
 688 
 689       // Search "if (marking != 0)" check and set it to "false".
 690       // There is no G1 pre barrier if previous stored value is NULL
 691       // (for example, after initialization).
 692       if (this_region->is_Region() && this_region->req() == 3) {
 693         int ind = 1;
 694         if (!this_region->in(ind)->is_IfFalse()) {
 695           ind = 2;
 696         }
 697         if (this_region->in(ind)->is_IfFalse()) {
 698           Node* bol = this_region->in(ind)->in(0)->in(1);
 699           assert(bol->is_Bool(), "");
 700           cmpx = bol->in(1);
 701           if (bol->as_Bool()->_test._test == BoolTest::ne &&
 702               cmpx->is_Cmp() && cmpx->in(2) == macro->intcon(0) &&
 703               cmpx->in(1)->is_Load()) {
 704             Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
 705             const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
 706                                                 SATBMarkQueue::byte_offset_of_active());
 707             if (adr->is_AddP() && adr->in(AddPNode::Base) == macro->top() &&
 708                 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 709                 adr->in(AddPNode::Offset) == macro->MakeConX(marking_offset)) {
 710               macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
 711             }
 712           }
 713         }
 714       }
 715     } else {
 716       assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
 717       // This is a G1 post barrier emitted by the Object.clone() intrinsic.
 718       // Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card
 719       // is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier.
 720       Node* shift = node->find_out_with(Op_URShiftX);
 721       assert(shift != NULL, "missing G1 post barrier");
 722       Node* addp = shift->unique_out();
 723       Node* load = addp->find_out_with(Op_LoadB);
 724       assert(load != NULL, "missing G1 post barrier");
 725       Node* cmpx = load->unique_out();
 726       assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
 727              cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
 728              "missing card value check in G1 post barrier");
 729       macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
 730       // There is no G1 pre barrier in this case
 731     }
 732     // Now CastP2X can be removed since it is used only on dead path
 733     // which currently still alive until igvn optimize it.
 734     assert(node->outcnt() == 0 || node->unique_out()->Opcode() == Op_URShiftX, "");
 735     macro->replace_node(node, macro->top());
 736 }