1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/c2/g1BarrierSetC2.hpp"
  27 #include "gc/g1/g1BarrierSet.hpp"
  28 #include "gc/g1/g1BarrierSetRuntime.hpp"
  29 #include "gc/g1/g1CardTable.hpp"
  30 #include "gc/g1/g1ThreadLocalData.hpp"
  31 #include "gc/g1/heapRegion.hpp"
  32 #include "opto/arraycopynode.hpp"
  33 #include "opto/compile.hpp"
  34 #include "opto/escape.hpp"
  35 #include "opto/graphKit.hpp"
  36 #include "opto/idealKit.hpp"
  37 #include "opto/macro.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/type.hpp"
  40 #include "utilities/macros.hpp"
  41 
  42 const TypeFunc *G1BarrierSetC2::write_ref_field_pre_entry_Type() {
  43   const Type **fields = TypeTuple::fields(2);
  44   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
  45   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
  46   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
  47 
  48   // create result type (range)
  49   fields = TypeTuple::fields(0);
  50   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
  51 
  52   return TypeFunc::make(domain, range);
  53 }
  54 
  55 const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() {
  56   const Type **fields = TypeTuple::fields(2);
  57   fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL;  // Card addr
  58   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL;  // thread
  59   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
  60 
  61   // create result type (range)
  62   fields = TypeTuple::fields(0);
  63   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
  64 
  65   return TypeFunc::make(domain, range);
  66 }
  67 
  68 const TypeFunc *G1BarrierSetC2::write_ref_stats_Type() {
  69   const Type **fields = TypeTuple::fields(2);
  70   fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL;  // HeapWord* dst
  71   fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL;  // oopDesc* val
  72   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
  73 
  74   // create result type (range)
  75   fields = TypeTuple::fields(0);
  76   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
  77 
  78   return TypeFunc::make(domain, range);
  79 }
  80 
  81 #define __ ideal.
  82 /*
  83  * Determine if the G1 pre-barrier can be removed. The pre-barrier is
  84  * required by SATB to make sure all objects live at the start of the
  85  * marking are kept alive, all reference updates need to any previous
  86  * reference stored before writing.
  87  *
  88  * If the previous value is NULL there is no need to save the old value.
  89  * References that are NULL are filtered during runtime by the barrier
  90  * code to avoid unnecessary queuing.
  91  *
  92  * However in the case of newly allocated objects it might be possible to
  93  * prove that the reference about to be overwritten is NULL during compile
  94  * time and avoid adding the barrier code completely.
  95  *
  96  * The compiler needs to determine that the object in which a field is about
  97  * to be written is newly allocated, and that no prior store to the same field
  98  * has happened since the allocation.
  99  *
 100  * Returns true if the pre-barrier can be removed
 101  */
 102 bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
 103                                                PhaseTransform* phase,
 104                                                Node* adr,
 105                                                BasicType bt,
 106                                                uint adr_idx) const {
 107   intptr_t offset = 0;
 108   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 109   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
 110 
 111   if (offset == Type::OffsetBot) {
 112     return false; // cannot unalias unless there are precise offsets
 113   }
 114 
 115   if (alloc == NULL) {
 116     return false; // No allocation found
 117   }
 118 
 119   intptr_t size_in_bytes = type2aelembytes(bt);
 120 
 121   Node* mem = kit->memory(adr_idx); // start searching here...
 122 
 123   for (int cnt = 0; cnt < 50; cnt++) {
 124 
 125     if (mem->is_Store()) {
 126 
 127       Node* st_adr = mem->in(MemNode::Address);
 128       intptr_t st_offset = 0;
 129       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
 130 
 131       if (st_base == NULL) {
 132         break; // inscrutable pointer
 133       }
 134 
 135       // Break we have found a store with same base and offset as ours so break
 136       if (st_base == base && st_offset == offset) {
 137         break;
 138       }
 139 
 140       if (st_offset != offset && st_offset != Type::OffsetBot) {
 141         const int MAX_STORE = BytesPerLong;
 142         if (st_offset >= offset + size_in_bytes ||
 143             st_offset <= offset - MAX_STORE ||
 144             st_offset <= offset - mem->as_Store()->memory_size()) {
 145           // Success:  The offsets are provably independent.
 146           // (You may ask, why not just test st_offset != offset and be done?
 147           // The answer is that stores of different sizes can co-exist
 148           // in the same sequence of RawMem effects.  We sometimes initialize
 149           // a whole 'tile' of array elements with a single jint or jlong.)
 150           mem = mem->in(MemNode::Memory);
 151           continue; // advance through independent store memory
 152         }
 153       }
 154 
 155       if (st_base != base
 156           && MemNode::detect_ptr_independence(base, alloc, st_base,
 157                                               AllocateNode::Ideal_allocation(st_base, phase),
 158                                               phase)) {
 159         // Success:  The bases are provably independent.
 160         mem = mem->in(MemNode::Memory);
 161         continue; // advance through independent store memory
 162       }
 163     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 164 
 165       InitializeNode* st_init = mem->in(0)->as_Initialize();
 166       AllocateNode* st_alloc = st_init->allocation();
 167 
 168       // Make sure that we are looking at the same allocation site.
 169       // The alloc variable is guaranteed to not be null here from earlier check.
 170       if (alloc == st_alloc) {
 171         // Check that the initialization is storing NULL so that no previous store
 172         // has been moved up and directly write a reference
 173         Node* captured_store = st_init->find_captured_store(offset,
 174                                                             type2aelembytes(T_OBJECT),
 175                                                             phase);
 176         if (captured_store == NULL || captured_store == st_init->zero_memory()) {
 177           return true;
 178         }
 179       }
 180     }
 181 
 182     // Unless there is an explicit 'continue', we must bail out here,
 183     // because 'mem' is an inscrutable memory state (e.g., a call).
 184     break;
 185   }
 186 
 187   return false;
 188 }
 189 
 190 // G1 pre/post barriers
 191 void G1BarrierSetC2::pre_barrier(GraphKit* kit,
 192                                  bool do_load,
 193                                  Node* ctl,
 194                                  Node* obj,
 195                                  Node* adr,
 196                                  uint alias_idx,
 197                                  Node* val,
 198                                  const TypeOopPtr* val_type,
 199                                  Node* pre_val,
 200                                  BasicType bt) const {
 201   // Some sanity checks
 202   // Note: val is unused in this routine.
 203 
 204   if (do_load) {
 205     // We need to generate the load of the previous value
 206     assert(obj != NULL, "must have a base");
 207     assert(adr != NULL, "where are loading from?");
 208     assert(pre_val == NULL, "loaded already?");
 209     assert(val_type != NULL, "need a type");
 210 
 211     if (use_ReduceInitialCardMarks()
 212         && g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
 213       return;
 214     }
 215 
 216   } else {
 217     // In this case both val_type and alias_idx are unused.
 218     assert(pre_val != NULL, "must be loaded already");
 219     // Nothing to be done if pre_val is null.
 220     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
 221     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
 222   }
 223   assert(bt == T_OBJECT, "or we shouldn't be here");
 224 
 225   IdealKit ideal(kit, true);
 226 
 227   Node* tls = __ thread(); // ThreadLocalStorage
 228 
 229   Node* no_base = __ top();
 230   Node* zero  = __ ConI(0);
 231   Node* zeroX = __ ConX(0);
 232 
 233   float likely  = PROB_LIKELY(0.999);
 234   float unlikely  = PROB_UNLIKELY(0.999);
 235 
 236   BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
 237   assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width");
 238 
 239   // Offsets into the thread
 240   const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
 241   const int index_offset   = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
 242   const int buffer_offset  = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
 243 
 244   // Now the actual pointers into the thread
 245   Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
 246   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
 247   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
 248 
 249   // Now some of the values
 250   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
 251 
 252   // if (!marking)
 253   __ if_then(marking, BoolTest::ne, zero, unlikely); {
 254     BasicType index_bt = TypeX_X->basic_type();
 255     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
 256     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
 257 
 258     if (do_load) {
 259       // load original value
 260       // alias_idx correct??
 261       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
 262     }
 263 
 264     // if (pre_val != NULL)
 265     __ if_then(pre_val, BoolTest::ne, kit->null()); {
 266       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 267 
 268       // is the queue for this thread full?
 269       __ if_then(index, BoolTest::ne, zeroX, likely); {
 270 
 271         // decrement the index
 272         Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 273 
 274         // Now get the buffer location we will log the previous value into and store it
 275         Node *log_addr = __ AddP(no_base, buffer, next_index);
 276         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
 277         // update the index
 278         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
 279 
 280       } __ else_(); {
 281 
 282         // logging buffer is full, call the runtime
 283         const TypeFunc *tf = write_ref_field_pre_entry_Type();
 284         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), "write_ref_field_pre_entry", pre_val, tls);
 285       } __ end_if();  // (!index)
 286     } __ end_if();  // (pre_val != NULL)
 287   } __ end_if();  // (!marking)
 288 
 289   // Final sync IdealKit and GraphKit.
 290   kit->final_sync(ideal);
 291 }
 292 
 293 /*
 294  * G1 similar to any GC with a Young Generation requires a way to keep track of
 295  * references from Old Generation to Young Generation to make sure all live
 296  * objects are found. G1 also requires to keep track of object references
 297  * between different regions to enable evacuation of old regions, which is done
 298  * as part of mixed collections. References are tracked in remembered sets and
 299  * is continuously updated as reference are written to with the help of the
 300  * post-barrier.
 301  *
 302  * To reduce the number of updates to the remembered set the post-barrier
 303  * filters updates to fields in objects located in the Young Generation,
 304  * the same region as the reference, when the NULL is being written or
 305  * if the card is already marked as dirty by an earlier write.
 306  *
 307  * Under certain circumstances it is possible to avoid generating the
 308  * post-barrier completely if it is possible during compile time to prove
 309  * the object is newly allocated and that no safepoint exists between the
 310  * allocation and the store.
 311  *
 312  * In the case of slow allocation the allocation code must handle the barrier
 313  * as part of the allocation in the case the allocated object is not located
 314  * in the nursery, this would happen for humongous objects. This is similar to
 315  * how CMS is required to handle this case, see the comments for the method
 316  * CollectedHeap::new_deferred_store_barrier and OptoRuntime::new_deferred_store_barrier.
 317  * A deferred card mark is required for these objects and handled in the above
 318  * mentioned methods.
 319  *
 320  * Returns true if the post barrier can be removed
 321  */
 322 bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit,
 323                                                 PhaseTransform* phase, Node* store,
 324                                                 Node* adr) const {
 325   intptr_t      offset = 0;
 326   Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 327   AllocateNode* alloc  = AllocateNode::Ideal_allocation(base, phase);
 328 
 329   if (offset == Type::OffsetBot) {
 330     return false; // cannot unalias unless there are precise offsets
 331   }
 332 
 333   if (alloc == NULL) {
 334      return false; // No allocation found
 335   }
 336 
 337   // Start search from Store node
 338   Node* mem = store->in(MemNode::Control);
 339   if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 340 
 341     InitializeNode* st_init = mem->in(0)->as_Initialize();
 342     AllocateNode*  st_alloc = st_init->allocation();
 343 
 344     // Make sure we are looking at the same allocation
 345     if (alloc == st_alloc) {
 346       return true;
 347     }
 348   }
 349 
 350   return false;
 351 }
 352 
 353 //
 354 // Update the card table and add card address to the queue
 355 //
 356 void G1BarrierSetC2::g1_mark_card(GraphKit* kit,
 357                                   IdealKit& ideal,
 358                                   Node* card_adr,
 359                                   Node* oop_store,
 360                                   uint oop_alias_idx,
 361                                   Node* index,
 362                                   Node* index_adr,
 363                                   Node* buffer,
 364                                   const TypeFunc* tf) const {
 365   Node* zero  = __ ConI(0);
 366   Node* zeroX = __ ConX(0);
 367   Node* no_base = __ top();
 368   BasicType card_bt = T_BYTE;
 369   // Smash zero into card. MUST BE ORDERED WRT TO STORE
 370   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
 371 
 372   //  Now do the queue work
 373   __ if_then(index, BoolTest::ne, zeroX); {
 374 
 375     Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 376     Node* log_addr = __ AddP(no_base, buffer, next_index);
 377 
 378     // Order, see storeCM.
 379     __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
 380     __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
 381 
 382   } __ else_(); {
 383     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), "write_ref_field_post_entry", card_adr, __ thread());
 384   } __ end_if();
 385 
 386 }
 387 
 388 void G1BarrierSetC2::post_barrier(GraphKit* kit,
 389                                   Node* ctl,
 390                                   Node* oop_store,
 391                                   Node* obj,
 392                                   Node* adr,
 393                                   uint alias_idx,
 394                                   Node* val,
 395                                   BasicType bt,
 396                                   bool use_precise) const {
 397   // If we are writing a NULL then we need no post barrier
 398 
 399   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
 400     // Must be NULL
 401     const Type* t = val->bottom_type();
 402     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
 403     // No post barrier if writing NULLx
 404     return;
 405   }
 406 
 407   if (use_ReduceInitialCardMarks() && obj == kit->just_allocated_object(kit->control())) {
 408     // We can skip marks on a freshly-allocated object in Eden.
 409     // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
 410     // That routine informs GC to take appropriate compensating steps,
 411     // upon a slow-path allocation, so as to make this card-mark
 412     // elision safe.
 413     return;
 414   }
 415 
 416   if (use_ReduceInitialCardMarks()
 417       && g1_can_remove_post_barrier(kit, &kit->gvn(), oop_store, adr)) {
 418     return;
 419   }
 420 
 421   if (!use_precise) {
 422     // All card marks for a (non-array) instance are in one place:
 423     adr = obj;
 424   }
 425   // (Else it's an array (or unknown), and we want more precise card marks.)
 426   assert(adr != NULL, "");
 427 
 428   IdealKit ideal(kit, true);
 429 
 430   Node* tls = __ thread(); // ThreadLocalStorage
 431 
 432   Node* no_base = __ top();
 433   float unlikely  = PROB_UNLIKELY(0.999);
 434   Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val());
 435   Node* dirty_card = __ ConI((jint)G1CardTable::dirty_card_val());
 436   Node* zeroX = __ ConX(0);
 437 
 438   const TypeFunc *tf = write_ref_field_post_entry_Type();
 439 
 440   // Offsets into the thread
 441   const int index_offset  = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
 442   const int buffer_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
 443 
 444   // Pointers into the thread
 445 
 446   Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
 447   Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));
 448 
 449   // Now some values
 450   // Use ctrl to avoid hoisting these values past a safepoint, which could
 451   // potentially reset these fields in the JavaThread.
 452   Node* index  = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
 453   Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 454 
 455   // Convert the store obj pointer to an int prior to doing math on it
 456   // Must use ctrl to prevent "integerized oop" existing across safepoint
 457   Node* cast =  __ CastPX(__ ctrl(), adr);
 458 
 459   // Divide pointer by card size
 460   Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
 461 
 462   // Combine card table base and card offset
 463   Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset );
 464 
 465   // If we know the value being stored does it cross regions?
 466 
 467   if (val != NULL) {
 468     if (UsePerfData && G1WriteBarrierStats) {
 469       const TypeFunc *stats_tf = write_ref_stats_Type();
 470       __ make_leaf_call(stats_tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_stats), "write_ref_stats", adr, val);
 471     }
 472     // Does the store cause us to cross regions?
 473 
 474     // Should be able to do an unsigned compare of region_size instead of
 475     // and extra shift. Do we have an unsigned compare??
 476     // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
 477     Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
 478 
 479     // if (xor_res == 0) same region so skip
 480     __ if_then(xor_res, BoolTest::ne, zeroX); {
 481 
 482       // No barrier if we are storing a NULL
 483       __ if_then(val, BoolTest::ne, kit->null(), unlikely); {
 484 
 485         // Ok must mark the card if not already dirty
 486 
 487         // load the original value of the card
 488         Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 489 
 490         __ if_then(card_val, BoolTest::ne, young_card); {
 491           kit->sync_kit(ideal);
 492           kit->insert_store_load_for_barrier();
 493           __ sync_kit(kit);
 494 
 495           Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 496           __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
 497             g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
 498           } __ end_if();
 499         } __ end_if();
 500       } __ end_if();
 501     } __ end_if();
 502   } else {
 503     // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.
 504     // We don't need a barrier here if the destination is a newly allocated object
 505     // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden
 506     // are set to 'g1_young_gen' (see G1CardTable::verify_g1_young_region()).
 507     assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
 508     Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 509     __ if_then(card_val, BoolTest::ne, young_card); {
 510       g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
 511     } __ end_if();
 512   }
 513 
 514   // Final sync IdealKit and GraphKit.
 515   kit->final_sync(ideal);
 516 }
 517 
 518 // Helper that guards and inserts a pre-barrier.
 519 void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
 520                                         Node* pre_val, bool need_mem_bar) const {
 521   // We could be accessing the referent field of a reference object. If so, when G1
 522   // is enabled, we need to log the value in the referent field in an SATB buffer.
 523   // This routine performs some compile time filters and generates suitable
 524   // runtime filters that guard the pre-barrier code.
 525   // Also add memory barrier for non volatile load from the referent field
 526   // to prevent commoning of loads across safepoint.
 527 
 528   // Some compile time checks.
 529 
 530   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
 531   const TypeX* otype = offset->find_intptr_t_type();
 532   if (otype != NULL && otype->is_con() &&
 533       otype->get_con() != java_lang_ref_Reference::referent_offset) {
 534     // Constant offset but not the reference_offset so just return
 535     return;
 536   }
 537 
 538   // We only need to generate the runtime guards for instances.
 539   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
 540   if (btype != NULL) {
 541     if (btype->isa_aryptr()) {
 542       // Array type so nothing to do
 543       return;
 544     }
 545 
 546     const TypeInstPtr* itype = btype->isa_instptr();
 547     if (itype != NULL) {
 548       // Can the klass of base_oop be statically determined to be
 549       // _not_ a sub-class of Reference and _not_ Object?
 550       ciKlass* klass = itype->klass();
 551       if ( klass->is_loaded() &&
 552           !klass->is_subtype_of(kit->env()->Reference_klass()) &&
 553           !kit->env()->Object_klass()->is_subtype_of(klass)) {
 554         return;
 555       }
 556     }
 557   }
 558 
 559   // The compile time filters did not reject base_oop/offset so
 560   // we need to generate the following runtime filters
 561   //
 562   // if (offset == java_lang_ref_Reference::_reference_offset) {
 563   //   if (instance_of(base, java.lang.ref.Reference)) {
 564   //     pre_barrier(_, pre_val, ...);
 565   //   }
 566   // }
 567 
 568   float likely   = PROB_LIKELY(  0.999);
 569   float unlikely = PROB_UNLIKELY(0.999);
 570 
 571   IdealKit ideal(kit);
 572 
 573   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
 574 
 575   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
 576       // Update graphKit memory and control from IdealKit.
 577       kit->sync_kit(ideal);
 578 
 579       Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
 580       Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
 581 
 582       // Update IdealKit memory and control from graphKit.
 583       __ sync_kit(kit);
 584 
 585       Node* one = __ ConI(1);
 586       // is_instof == 0 if base_oop == NULL
 587       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
 588 
 589         // Update graphKit from IdeakKit.
 590         kit->sync_kit(ideal);
 591 
 592         // Use the pre-barrier to record the value in the referent field
 593         pre_barrier(kit, false /* do_load */,
 594                     __ ctrl(),
 595                     NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 596                     pre_val /* pre_val */,
 597                     T_OBJECT);
 598         if (need_mem_bar) {
 599           // Add memory barrier to prevent commoning reads from this field
 600           // across safepoint since GC can change its value.
 601           kit->insert_mem_bar(Op_MemBarCPUOrder);
 602         }
 603         // Update IdealKit from graphKit.
 604         __ sync_kit(kit);
 605 
 606       } __ end_if(); // _ref_type != ref_none
 607   } __ end_if(); // offset == referent_offset
 608 
 609   // Final sync IdealKit and GraphKit.
 610   kit->final_sync(ideal);
 611 }
 612 
 613 #undef __
 614 
 615 Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 616   DecoratorSet decorators = access.decorators();
 617   Node* adr = access.addr().node();
 618   Node* obj = access.base();
 619 
 620   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 621   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 622   bool in_heap = (decorators & IN_HEAP) != 0;
 623   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
 624   bool is_unordered = (decorators & MO_UNORDERED) != 0;
 625   bool need_cpu_mem_bar = !is_unordered || mismatched || !in_heap;
 626 
 627   Node* top = Compile::current()->top();
 628   Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
 629   Node* load = CardTableBarrierSetC2::load_at_resolved(access, val_type);
 630 
 631   // If we are reading the value of the referent field of a Reference
 632   // object (either by using Unsafe directly or through reflection)
 633   // then, if G1 is enabled, we need to record the referent in an
 634   // SATB log buffer using the pre-barrier mechanism.
 635   // Also we need to add memory barrier to prevent commoning reads
 636   // from this field across safepoint since GC can change its value.
 637   bool need_read_barrier = in_heap && (on_weak ||
 638                                        (unknown && offset != top && obj != top));
 639 
 640   if (!access.is_oop() || !need_read_barrier) {
 641     return load;
 642   }
 643 
 644   assert(access.is_parse_access(), "entry not supported at optimization time");
 645   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 646   GraphKit* kit = parse_access.kit();
 647 
 648   if (on_weak) {
 649     // Use the pre-barrier to record the value in the referent field
 650     pre_barrier(kit, false /* do_load */,
 651                 kit->control(),
 652                 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 653                 load /* pre_val */, T_OBJECT);
 654     // Add memory barrier to prevent commoning reads from this field
 655     // across safepoint since GC can change its value.
 656     kit->insert_mem_bar(Op_MemBarCPUOrder);
 657   } else if (unknown) {
 658     // We do not require a mem bar inside pre_barrier if need_mem_bar
 659     // is set: the barriers would be emitted by us.
 660     insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
 661   }
 662 
 663   return load;
 664 }
 665 
 666 bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const {
 667   if (CardTableBarrierSetC2::is_gc_barrier_node(node)) {
 668     return true;
 669   }
 670   if (node->Opcode() != Op_CallLeaf) {
 671     return false;
 672   }
 673   CallLeafNode *call = node->as_CallLeaf();
 674   if (call->_name == NULL) {
 675     return false;
 676   }
 677 
 678   return strcmp(call->_name, "write_ref_field_pre_entry") == 0 || strcmp(call->_name, "write_ref_field_post_entry") == 0 || strcmp(call->_name, "write_ref_stats") == 0;
 679 }
 680 
 681 void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
 682   assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
 683   assert(node->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
 684   // It could be only one user, URShift node, in Object.clone() intrinsic
 685   // but the new allocation is passed to arraycopy stub and it could not
 686   // be scalar replaced. So we don't check the case.
 687 
 688   // An other case of only one user (Xor) is when the value check for NULL
 689   // in G1 post barrier is folded after CCP so the code which used URShift
 690   // is removed.
 691 
 692   // Take Region node before eliminating post barrier since it also
 693   // eliminates CastP2X node when it has only one user.
 694   Node* this_region = node->in(0);
 695   assert(this_region != NULL, "");
 696 
 697   // Remove G1 post barrier.
 698 
 699   // Search for CastP2X->Xor->URShift->Cmp path which
 700   // checks if the store done to a different from the value's region.
 701   // And replace Cmp with #0 (false) to collapse G1 post barrier.
 702   Node* xorx = node->find_out_with(Op_XorX);
 703   if (xorx != NULL) {
 704     Node* shift = xorx->unique_out();
 705     Node* cmpx = shift->unique_out();
 706     assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
 707     cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
 708     "missing region check in G1 post barrier");
 709     macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
 710 
 711     // Remove G1 pre barrier.
 712 
 713     // Search "if (marking != 0)" check and set it to "false".
 714     // There is no G1 pre barrier if previous stored value is NULL
 715     // (for example, after initialization).
 716     if (this_region->is_Region() && this_region->req() == 3) {
 717       int ind = 1;
 718       if (!this_region->in(ind)->is_IfFalse()) {
 719         ind = 2;
 720       }
 721       if (this_region->in(ind)->is_IfFalse() &&
 722           this_region->in(ind)->in(0)->Opcode() == Op_If) {
 723         Node* bol = this_region->in(ind)->in(0)->in(1);
 724         assert(bol->is_Bool(), "");
 725         cmpx = bol->in(1);
 726         if (bol->as_Bool()->_test._test == BoolTest::ne &&
 727             cmpx->is_Cmp() && cmpx->in(2) == macro->intcon(0) &&
 728             cmpx->in(1)->is_Load()) {
 729           Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
 730           const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
 731           if (adr->is_AddP() && adr->in(AddPNode::Base) == macro->top() &&
 732               adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 733               adr->in(AddPNode::Offset) == macro->MakeConX(marking_offset)) {
 734             macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
 735           }
 736         }
 737       }
 738     }
 739   } else {
 740     assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
 741     // This is a G1 post barrier emitted by the Object.clone() intrinsic.
 742     // Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card
 743     // is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier.
 744     Node* shift = node->find_out_with(Op_URShiftX);
 745     assert(shift != NULL, "missing G1 post barrier");
 746     Node* addp = shift->unique_out();
 747     Node* load = addp->find_out_with(Op_LoadB);
 748     assert(load != NULL, "missing G1 post barrier");
 749     Node* cmpx = load->unique_out();
 750     assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
 751            cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
 752            "missing card value check in G1 post barrier");
 753     macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
 754     // There is no G1 pre barrier in this case
 755   }
 756   // Now CastP2X can be removed since it is used only on dead path
 757   // which currently still alive until igvn optimize it.
 758   assert(node->outcnt() == 0 || node->unique_out()->Opcode() == Op_URShiftX, "");
 759   macro->replace_node(node, macro->top());
 760 }
 761 
 762 Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const {
 763   if (!use_ReduceInitialCardMarks() &&
 764       c != NULL && c->is_Region() && c->req() == 3) {
 765     for (uint i = 1; i < c->req(); i++) {
 766       if (c->in(i) != NULL && c->in(i)->is_Region() &&
 767           c->in(i)->req() == 3) {
 768         Node* r = c->in(i);
 769         for (uint j = 1; j < r->req(); j++) {
 770           if (r->in(j) != NULL && r->in(j)->is_Proj() &&
 771               r->in(j)->in(0) != NULL &&
 772               r->in(j)->in(0)->Opcode() == Op_CallLeaf &&
 773               r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)) {
 774             Node* call = r->in(j)->in(0);
 775             c = c->in(i == 1 ? 2 : 1);
 776             if (c != NULL) {
 777               c = c->in(0);
 778               if (c != NULL) {
 779                 c = c->in(0);
 780                 assert(call->in(0) == NULL ||
 781                        call->in(0)->in(0) == NULL ||
 782                        call->in(0)->in(0)->in(0) == NULL ||
 783                        call->in(0)->in(0)->in(0)->in(0) == NULL ||
 784                        call->in(0)->in(0)->in(0)->in(0)->in(0) == NULL ||
 785                        c == call->in(0)->in(0)->in(0)->in(0)->in(0), "bad barrier shape");
 786                 return c;
 787               }
 788             }
 789           }
 790         }
 791       }
 792     }
 793   }
 794   return c;
 795 }
 796 
 797 #ifdef ASSERT
 798 void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
 799   if (phase != BarrierSetC2::BeforeCodeGen) {
 800     return;
 801   }
 802   // Verify G1 pre-barriers
 803   const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
 804 
 805   ResourceArea *area = Thread::current()->resource_area();
 806   Unique_Node_List visited(area);
 807   Node_List worklist(area);
 808   // We're going to walk control flow backwards starting from the Root
 809   worklist.push(compile->root());
 810   while (worklist.size() > 0) {
 811     Node* x = worklist.pop();
 812     if (x == NULL || x == compile->top()) continue;
 813     if (visited.member(x)) {
 814       continue;
 815     } else {
 816       visited.push(x);
 817     }
 818 
 819     if (x->is_Region()) {
 820       for (uint i = 1; i < x->req(); i++) {
 821         worklist.push(x->in(i));
 822       }
 823     } else {
 824       worklist.push(x->in(0));
 825       // We are looking for the pattern:
 826       //                            /->ThreadLocal
 827       // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
 828       //              \->ConI(0)
 829       // We want to verify that the If and the LoadB have the same control
 830       // See GraphKit::g1_write_barrier_pre()
 831       if (x->is_If()) {
 832         IfNode *iff = x->as_If();
 833         if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
 834           CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
 835           if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
 836               && cmp->in(1)->is_Load()) {
 837             LoadNode* load = cmp->in(1)->as_Load();
 838             if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
 839                 && load->in(2)->in(3)->is_Con()
 840                 && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
 841 
 842               Node* if_ctrl = iff->in(0);
 843               Node* load_ctrl = load->in(0);
 844 
 845               if (if_ctrl != load_ctrl) {
 846                 // Skip possible CProj->NeverBranch in infinite loops
 847                 if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
 848                     && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {
 849                   if_ctrl = if_ctrl->in(0)->in(0);
 850                 }
 851               }
 852               assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
 853             }
 854           }
 855         }
 856       }
 857     }
 858   }
 859 }
 860 #endif
 861 
 862 bool G1BarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
 863   if (opcode == Op_StoreP) {
 864     Node* adr = n->in(MemNode::Address);
 865     const Type* adr_type = gvn->type(adr);
 866     // Pointer stores in G1 barriers looks like unsafe access.
 867     // Ignore such stores to be able scalar replace non-escaping
 868     // allocations.
 869     if (adr_type->isa_rawptr() && adr->is_AddP()) {
 870       Node* base = conn_graph->get_addp_base(adr);
 871       if (base->Opcode() == Op_LoadP &&
 872           base->in(MemNode::Address)->is_AddP()) {
 873         adr = base->in(MemNode::Address);
 874         Node* tls = conn_graph->get_addp_base(adr);
 875         if (tls->Opcode() == Op_ThreadLocal) {
 876           int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 877           const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
 878           if (offs == buf_offset) {
 879             return true; // G1 pre barrier previous oop value store.
 880           }
 881           if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
 882             return true; // G1 post barrier card address store.
 883           }
 884         }
 885       }
 886     }
 887   }
 888   return false;
 889 }