1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shenandoah/shenandoahHeap.hpp"
  26 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  27 #include "gc/shenandoah/shenandoahRuntime.hpp"
  28 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  29 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  30 #include "opto/graphKit.hpp"
  31 #include "opto/idealKit.hpp"
  32 #include "opto/macro.hpp"
  33 
  34 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
  35   return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
  36 }
  37 
  38 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
  39   : _shenandoah_barriers(new (comp_arena) GrowableArray<ShenandoahWriteBarrierNode*>(comp_arena, 8,  0, NULL)) {
  40 }
  41 
  42 int ShenandoahBarrierSetC2State::shenandoah_barriers_count() const {
  43   return _shenandoah_barriers->length();
  44 }
  45 
  46 ShenandoahWriteBarrierNode* ShenandoahBarrierSetC2State::shenandoah_barrier(int idx) const {
  47   return _shenandoah_barriers->at(idx);
  48 }
  49 
  50 
  51 void ShenandoahBarrierSetC2State::add_shenandoah_barrier(ShenandoahWriteBarrierNode * n) {
  52   assert(!_shenandoah_barriers->contains(n), "duplicate entry in barrier list");
  53   _shenandoah_barriers->append(n);
  54 }
  55 
  56 void ShenandoahBarrierSetC2State::remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n) {
  57   if (_shenandoah_barriers->contains(n)) {
  58     _shenandoah_barriers->remove(n);
  59   }
  60 }
  61 
  62 #define __ kit->
  63 
  64 Node* ShenandoahBarrierSetC2::shenandoah_read_barrier(GraphKit* kit, Node* obj) const {
  65   if (ShenandoahReadBarrier) {
  66     obj = shenandoah_read_barrier_impl(kit, obj, false, true, true);
  67   }
  68   return obj;
  69 }
  70 
  71 Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const {
  72   if (ShenandoahStoreValEnqueueBarrier) {
  73     obj = shenandoah_write_barrier(kit, obj);
  74     obj = shenandoah_enqueue_barrier(kit, obj);
  75   }
  76   if (ShenandoahStoreValReadBarrier) {
  77     obj = shenandoah_read_barrier_impl(kit, obj, true, false, false);
  78   }
  79   return obj;
  80 }
  81 
  82 Node* ShenandoahBarrierSetC2::shenandoah_read_barrier_acmp(GraphKit* kit, Node* obj) {
  83   return shenandoah_read_barrier_impl(kit, obj, true, true, false);
  84 }
  85 
  86 Node* ShenandoahBarrierSetC2::shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const {
  87 
  88   const Type* obj_type = obj->bottom_type();
  89   if (obj_type->higher_equal(TypePtr::NULL_PTR)) {
  90     return obj;
  91   }
  92   const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
  93   Node* mem = use_mem ? __ memory(adr_type) : __ immutable_memory();
  94 
  95   if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, mem, allow_fromspace)) {
  96     // We know it is null, no barrier needed.
  97     return obj;
  98   }
  99 
 100 
 101   if (obj_type->meet(TypePtr::NULL_PTR) == obj_type->remove_speculative()) {
 102 
 103     // We don't know if it's null or not. Need null-check.
 104     enum { _not_null_path = 1, _null_path, PATH_LIMIT };
 105     RegionNode* region = new RegionNode(PATH_LIMIT);
 106     Node*       phi    = new PhiNode(region, obj_type);
 107     Node* null_ctrl = __ top();
 108     Node* not_null_obj = __ null_check_oop(obj, &null_ctrl);
 109 
 110     region->init_req(_null_path, null_ctrl);
 111     phi   ->init_req(_null_path, __ zerocon(T_OBJECT));
 112 
 113     Node* ctrl = use_ctrl ? __ control() : NULL;
 114     ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, not_null_obj, allow_fromspace);
 115     Node* n = __ gvn().transform(rb);
 116 
 117     region->init_req(_not_null_path, __ control());
 118     phi   ->init_req(_not_null_path, n);
 119 
 120     __ set_control(__ gvn().transform(region));
 121     __ record_for_igvn(region);
 122     return __ gvn().transform(phi);
 123 
 124   } else {
 125     // We know it is not null. Simple barrier is sufficient.
 126     Node* ctrl = use_ctrl ? __ control() : NULL;
 127     ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, obj, allow_fromspace);
 128     Node* n = __ gvn().transform(rb);
 129     __ record_for_igvn(n);
 130     return n;
 131   }
 132 }
 133 
 134 Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const {
 135   ShenandoahWriteBarrierNode* wb = new ShenandoahWriteBarrierNode(kit->C, kit->control(), kit->memory(adr_type), obj);
 136   Node* n = __ gvn().transform(wb);
 137   if (n == wb) { // New barrier needs memory projection.
 138     Node* proj = __ gvn().transform(new ShenandoahWBMemProjNode(n));
 139     __ set_memory(proj, adr_type);
 140   }
 141   return n;
 142 }
 143 
 144 Node* ShenandoahBarrierSetC2::shenandoah_write_barrier(GraphKit* kit, Node* obj) const {
 145 
 146   if (ShenandoahWriteBarrier) {
 147     obj = shenandoah_write_barrier_impl(kit, obj);
 148   }
 149   return obj;
 150 }
 151 
 152 Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const {
 153   if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, NULL, true)) {
 154     return obj;
 155   }
 156   const Type* obj_type = obj->bottom_type();
 157   const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
 158   Node* n = shenandoah_write_barrier_helper(kit, obj, adr_type);
 159   __ record_for_igvn(n);
 160   return n;
 161 }
 162 
 163 void ShenandoahBarrierSetC2::shenandoah_update_matrix(GraphKit* kit, Node* adr, Node* val) const {
 164   if (!UseShenandoahMatrix) {
 165     return;
 166   }
 167 
 168   assert(val != NULL, "checked before");
 169   if (adr == NULL) {
 170     return; // Nothing to do
 171   }
 172   assert(adr != NULL, "must not happen");
 173   if (val->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
 174     // Nothing to do.
 175     return;
 176   }
 177 
 178   ShenandoahConnectionMatrix* matrix = ShenandoahHeap::heap()->connection_matrix();
 179 
 180   enum { _set_path = 1, _already_set_path, _val_null_path, PATH_LIMIT };
 181   RegionNode* region = new RegionNode(PATH_LIMIT);
 182   Node* prev_mem = __ memory(Compile::AliasIdxRaw);
 183   Node* memphi    = PhiNode::make(region, prev_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
 184   Node* null_ctrl = __ top();
 185   Node* not_null_val = __ null_check_oop(val, &null_ctrl);
 186 
 187   // Null path: nothing to do.
 188   region->init_req(_val_null_path, null_ctrl);
 189   memphi->init_req(_val_null_path, prev_mem);
 190 
 191   // Not null path. Update the matrix.
 192 
 193   // This uses a fast calculation for the matrix address. For a description,
 194   // see src/share/vm/gc/shenandoah/shenandoahConnectionMatrix.inline.hpp,
 195   // ShenandoahConnectionMatrix::compute_address(const void* from, const void* to).
 196   address heap_base = ShenandoahHeap::heap()->base();
 197   jint stride = matrix->stride_jint();
 198   jint rs = ShenandoahHeapRegion::region_size_bytes_shift_jint();
 199 
 200   guarantee(stride < ShenandoahHeapRegion::region_size_bytes_jint(), "sanity");
 201   guarantee(is_aligned(heap_base, ShenandoahHeapRegion::region_size_bytes()), "sanity");
 202 
 203   Node* magic_con = __ MakeConX((jlong) matrix->matrix_addr() - ((jlong) heap_base >> rs) * (stride + 1));
 204 
 205   // Compute addr part
 206   Node* adr_idx = __ gvn().transform(new CastP2XNode(__ control(), adr));
 207   adr_idx = __ gvn().transform(new URShiftXNode(adr_idx, __ intcon(rs)));
 208 
 209   // Compute new_val part
 210   Node* val_idx = __ gvn().transform(new CastP2XNode(__ control(), not_null_val));
 211   val_idx = __ gvn().transform(new URShiftXNode(val_idx, __ intcon(rs)));
 212   val_idx = __ gvn().transform(new MulXNode(val_idx, __ MakeConX(stride)));
 213 
 214   // Add everything up
 215   adr_idx = __ gvn().transform(new AddXNode(adr_idx, val_idx));
 216   adr_idx = __ gvn().transform(new CastX2PNode(adr_idx));
 217   Node* matrix_adr = __ gvn().transform(new AddPNode(__ top(), adr_idx, magic_con));
 218 
 219   // Load current value
 220   const TypePtr* adr_type = TypeRawPtr::BOTTOM;
 221   Node* current = __ gvn().transform(LoadNode::make(__ gvn(), __ control(), __ memory(Compile::AliasIdxRaw),
 222                                                     matrix_adr, adr_type, TypeInt::INT, T_BYTE, MemNode::unordered));
 223 
 224   // Check if already set
 225   Node* cmp_set = __ gvn().transform(new CmpINode(current, __ intcon(0)));
 226   Node* cmp_set_bool = __ gvn().transform(new BoolNode(cmp_set, BoolTest::eq));
 227   IfNode* cmp_iff = __ create_and_map_if(__ control(), cmp_set_bool, PROB_MIN, COUNT_UNKNOWN);
 228 
 229   Node* if_not_set = __ gvn().transform(new IfTrueNode(cmp_iff));
 230   Node* if_set = __ gvn().transform(new IfFalseNode(cmp_iff));
 231 
 232   // Already set, exit
 233   __ set_control(if_set);
 234   region->init_req(_already_set_path, __ control());
 235   memphi->init_req(_already_set_path, prev_mem);
 236 
 237   // Not set: do the store, and finish up
 238   __ set_control(if_not_set);
 239   Node* store = __ gvn().transform(StoreNode::make(__ gvn(), __ control(), __ memory(Compile::AliasIdxRaw),
 240                                                    matrix_adr, adr_type, __ intcon(1), T_BYTE, MemNode::unordered));
 241   region->init_req(_set_path, __ control());
 242   memphi->init_req(_set_path, store);
 243 
 244   // Merge control flows and memory.
 245   __ set_control(__ gvn().transform(region));
 246   __ record_for_igvn(region);
 247   __ set_memory(__ gvn().transform(memphi), Compile::AliasIdxRaw);
 248 }
 249 
 250 bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr,
 251                                                          BasicType bt, uint adr_idx) const {
 252   intptr_t offset = 0;
 253   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 254   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
 255 
 256   if (offset == Type::OffsetBot) {
 257     return false; // cannot unalias unless there are precise offsets
 258   }
 259 
 260   if (alloc == NULL) {
 261     return false; // No allocation found
 262   }
 263 
 264   intptr_t size_in_bytes = type2aelembytes(bt);
 265 
 266   Node* mem = __ memory(adr_idx); // start searching here...
 267 
 268   for (int cnt = 0; cnt < 50; cnt++) {
 269 
 270     if (mem->is_Store()) {
 271 
 272       Node* st_adr = mem->in(MemNode::Address);
 273       intptr_t st_offset = 0;
 274       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
 275 
 276       if (st_base == NULL) {
 277         break; // inscrutable pointer
 278       }
 279 
 280       // Break we have found a store with same base and offset as ours so break
 281       if (st_base == base && st_offset == offset) {
 282         break;
 283       }
 284 
 285       if (st_offset != offset && st_offset != Type::OffsetBot) {
 286         const int MAX_STORE = BytesPerLong;
 287         if (st_offset >= offset + size_in_bytes ||
 288             st_offset <= offset - MAX_STORE ||
 289             st_offset <= offset - mem->as_Store()->memory_size()) {
 290           // Success:  The offsets are provably independent.
 291           // (You may ask, why not just test st_offset != offset and be done?
 292           // The answer is that stores of different sizes can co-exist
 293           // in the same sequence of RawMem effects.  We sometimes initialize
 294           // a whole 'tile' of array elements with a single jint or jlong.)
 295           mem = mem->in(MemNode::Memory);
 296           continue; // advance through independent store memory
 297         }
 298       }
 299 
 300       if (st_base != base
 301           && MemNode::detect_ptr_independence(base, alloc, st_base,
 302                                               AllocateNode::Ideal_allocation(st_base, phase),
 303                                               phase)) {
 304         // Success:  The bases are provably independent.
 305         mem = mem->in(MemNode::Memory);
 306         continue; // advance through independent store memory
 307       }
 308     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 309 
 310       InitializeNode* st_init = mem->in(0)->as_Initialize();
 311       AllocateNode* st_alloc = st_init->allocation();
 312 
 313       // Make sure that we are looking at the same allocation site.
 314       // The alloc variable is guaranteed to not be null here from earlier check.
 315       if (alloc == st_alloc) {
 316         // Check that the initialization is storing NULL so that no previous store
 317         // has been moved up and directly write a reference
 318         Node* captured_store = st_init->find_captured_store(offset,
 319                                                             type2aelembytes(T_OBJECT),
 320                                                             phase);
 321         if (captured_store == NULL || captured_store == st_init->zero_memory()) {
 322           return true;
 323         }
 324       }
 325     }
 326 
 327     // Unless there is an explicit 'continue', we must bail out here,
 328     // because 'mem' is an inscrutable memory state (e.g., a call).
 329     break;
 330   }
 331 
 332   return false;
 333 }
 334 
 335 #undef __
 336 #define __ ideal.
 337 
 338 void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
 339                                                     bool do_load,
 340                                                     Node* obj,
 341                                                     Node* adr,
 342                                                     uint alias_idx,
 343                                                     Node* val,
 344                                                     const TypeOopPtr* val_type,
 345                                                     Node* pre_val,
 346                                                     BasicType bt) const {
 347 
 348   // Some sanity checks
 349   // Note: val is unused in this routine.
 350 
 351   if (do_load) {
 352     // We need to generate the load of the previous value
 353     assert(obj != NULL, "must have a base");
 354     assert(adr != NULL, "where are loading from?");
 355     assert(pre_val == NULL, "loaded already?");
 356     assert(val_type != NULL, "need a type");
 357 
 358     if (ReduceInitialCardMarks
 359         && satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
 360       return;
 361     }
 362 
 363   } else {
 364     // In this case both val_type and alias_idx are unused.
 365     assert(pre_val != NULL, "must be loaded already");
 366     // Nothing to be done if pre_val is null.
 367     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
 368     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
 369   }
 370   assert(bt == T_OBJECT, "or we shouldn't be here");
 371 
 372   IdealKit ideal(kit, true);
 373 
 374   Node* tls = __ thread(); // ThreadLocalStorage
 375 
 376   Node* no_ctrl = NULL;
 377   Node* no_base = __ top();
 378   Node* zero  = __ ConI(0);
 379   Node* zeroX = __ ConX(0);
 380 
 381   float likely  = PROB_LIKELY(0.999);
 382   float unlikely  = PROB_UNLIKELY(0.999);
 383 
 384   BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
 385   assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width");
 386 
 387   // Offsets into the thread
 388   const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
 389   const int index_offset   = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
 390   const int buffer_offset  = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
 391 
 392   // Now the actual pointers into the thread
 393   Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
 394   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
 395   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
 396 
 397   // Now some of the values
 398   Node* marking;
 399   Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
 400   Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
 401   marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
 402   assert(ShenandoahWriteBarrierNode::is_gc_state_load(ld), "Should match the shape");
 403 
 404   // if (!marking)
 405   __ if_then(marking, BoolTest::ne, zero, unlikely); {
 406     BasicType index_bt = TypeX_X->basic_type();
 407     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
 408     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
 409 
 410     if (do_load) {
 411       // load original value
 412       // alias_idx correct??
 413       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
 414     }
 415 
 416     // if (pre_val != NULL)
 417     __ if_then(pre_val, BoolTest::ne, kit->null()); {
 418       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 419 
 420       // is the queue for this thread full?
 421       __ if_then(index, BoolTest::ne, zeroX, likely); {
 422 
 423         // decrement the index
 424         Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 425 
 426         // Now get the buffer location we will log the previous value into and store it
 427         Node *log_addr = __ AddP(no_base, buffer, next_index);
 428         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
 429         // update the index
 430         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
 431 
 432       } __ else_(); {
 433 
 434         // logging buffer is full, call the runtime
 435         const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type();
 436         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls);
 437       } __ end_if();  // (!index)
 438     } __ end_if();  // (pre_val != NULL)
 439   } __ end_if();  // (!marking)
 440 
 441   // Final sync IdealKit and GraphKit.
 442   kit->final_sync(ideal);
 443 
 444   if (ShenandoahSATBBarrier && adr != NULL) {
 445     Node* c = kit->control();
 446     Node* call = c->in(1)->in(1)->in(1)->in(0);
 447     assert(call->is_shenandoah_wb_pre_call(), "shenandoah_wb_pre call expected");
 448     call->add_req(adr);
 449   }
 450 }
 451 
 452 void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit,
 453                                                           bool do_load,
 454                                                           Node* obj,
 455                                                           Node* adr,
 456                                                           uint alias_idx,
 457                                                           Node* val,
 458                                                           const TypeOopPtr* val_type,
 459                                                           Node* pre_val,
 460                                                           BasicType bt) const {
 461 
 462   IdealKit ideal(kit);
 463 
 464   // Some sanity checks
 465   // Note: val is unused in this routine.
 466 
 467   if (val != NULL) {
 468     shenandoah_update_matrix(kit, adr, val);
 469     ideal.sync_kit(kit);
 470   }
 471 
 472   kit->sync_kit(ideal);
 473   if (ShenandoahSATBBarrier) {
 474     satb_write_barrier_pre(kit, do_load, obj, adr, alias_idx, val, val_type, pre_val, bt);
 475   }
 476   ideal.sync_kit(kit);
 477 
 478   kit->final_sync(ideal);
 479 }
 480 
 481 Node* ShenandoahBarrierSetC2::shenandoah_enqueue_barrier(GraphKit* kit, Node* pre_val) const {
 482   return kit->gvn().transform(new ShenandoahEnqueueBarrierNode(pre_val));
 483 }
 484 
 485 // Helper that guards and inserts a pre-barrier.
 486 void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
 487                                                 Node* pre_val, bool need_mem_bar) const {
 488   // We could be accessing the referent field of a reference object. If so, when G1
 489   // is enabled, we need to log the value in the referent field in an SATB buffer.
 490   // This routine performs some compile time filters and generates suitable
 491   // runtime filters that guard the pre-barrier code.
 492   // Also add memory barrier for non volatile load from the referent field
 493   // to prevent commoning of loads across safepoint.
 494 
 495   // Some compile time checks.
 496 
 497   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
 498   const TypeX* otype = offset->find_intptr_t_type();
 499   if (otype != NULL && otype->is_con() &&
 500       otype->get_con() != java_lang_ref_Reference::referent_offset) {
 501     // Constant offset but not the reference_offset so just return
 502     return;
 503   }
 504 
 505   // We only need to generate the runtime guards for instances.
 506   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
 507   if (btype != NULL) {
 508     if (btype->isa_aryptr()) {
 509       // Array type so nothing to do
 510       return;
 511     }
 512 
 513     const TypeInstPtr* itype = btype->isa_instptr();
 514     if (itype != NULL) {
 515       // Can the klass of base_oop be statically determined to be
 516       // _not_ a sub-class of Reference and _not_ Object?
 517       ciKlass* klass = itype->klass();
 518       if ( klass->is_loaded() &&
 519           !klass->is_subtype_of(kit->env()->Reference_klass()) &&
 520           !kit->env()->Object_klass()->is_subtype_of(klass)) {
 521         return;
 522       }
 523     }
 524   }
 525 
 526   // The compile time filters did not reject base_oop/offset so
 527   // we need to generate the following runtime filters
 528   //
 529   // if (offset == java_lang_ref_Reference::_reference_offset) {
 530   //   if (instance_of(base, java.lang.ref.Reference)) {
 531   //     pre_barrier(_, pre_val, ...);
 532   //   }
 533   // }
 534 
 535   float likely   = PROB_LIKELY(  0.999);
 536   float unlikely = PROB_UNLIKELY(0.999);
 537 
 538   IdealKit ideal(kit);
 539 
 540   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
 541 
 542   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
 543       // Update graphKit memory and control from IdealKit.
 544       kit->sync_kit(ideal);
 545 
 546       Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
 547       Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
 548 
 549       // Update IdealKit memory and control from graphKit.
 550       __ sync_kit(kit);
 551 
 552       Node* one = __ ConI(1);
 553       // is_instof == 0 if base_oop == NULL
 554       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
 555 
 556         // Update graphKit from IdeakKit.
 557         kit->sync_kit(ideal);
 558 
 559         // Use the pre-barrier to record the value in the referent field
 560         satb_write_barrier_pre(kit, false /* do_load */,
 561                                NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 562                                pre_val /* pre_val */,
 563                                T_OBJECT);
 564         if (need_mem_bar) {
 565           // Add memory barrier to prevent commoning reads from this field
 566           // across safepoint since GC can change its value.
 567           kit->insert_mem_bar(Op_MemBarCPUOrder);
 568         }
 569         // Update IdealKit from graphKit.
 570         __ sync_kit(kit);
 571 
 572       } __ end_if(); // _ref_type != ref_none
 573   } __ end_if(); // offset == referent_offset
 574 
 575   // Final sync IdealKit and GraphKit.
 576   kit->final_sync(ideal);
 577 }
 578 
 579 #undef __
 580 
 581 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() {
 582   const Type **fields = TypeTuple::fields(2);
 583   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
 584   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
 585   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 586 
 587   // create result type (range)
 588   fields = TypeTuple::fields(0);
 589   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
 590 
 591   return TypeFunc::make(domain, range);
 592 }
 593 
 594 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() {
 595   const Type **fields = TypeTuple::fields(1);
 596   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
 597   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
 598 
 599   // create result type (range)
 600   fields = TypeTuple::fields(0);
 601   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
 602 
 603   return TypeFunc::make(domain, range);
 604 }
 605 
 606 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_write_barrier_Type() {
 607   const Type **fields = TypeTuple::fields(1);
 608   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
 609   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
 610 
 611   // create result type (range)
 612   fields = TypeTuple::fields(1);
 613   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
 614   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 615 
 616   return TypeFunc::make(domain, range);
 617 }
 618 
 619 Node* ShenandoahBarrierSetC2::store_at(C2Access& access, C2AccessValue& val) const {
 620   // TODO: Implement using proper barriers.
 621   return BarrierSetC2::store_at(access, val);
 622 }
 623 
 624 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
 625   DecoratorSet decorators = access.decorators();
 626   GraphKit* kit = access.kit();
 627 
 628   const TypePtr* adr_type = access.addr().type();
 629   Node* adr = access.addr().node();
 630 
 631   bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 632   bool on_heap = (decorators & IN_HEAP) != 0;
 633 
 634   if (!access.is_oop() || (!on_heap && !anonymous)) {
 635     return BarrierSetC2::store_at_resolved(access, val);
 636   }
 637 
 638   uint adr_idx = kit->C->get_alias_index(adr_type);
 639   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
 640   Node* value = val.node();
 641   value = shenandoah_storeval_barrier(kit, value);
 642   val.set_node(value);
 643   shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
 644               static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());
 645   return BarrierSetC2::store_at_resolved(access, val);
 646 }
 647 
 648 
 649 Node* ShenandoahBarrierSetC2::load_at(C2Access& access, const Type* val_type) const {
 650   // TODO: Implement using proper barriers.
 651   return BarrierSetC2::load_at(access, val_type);
 652 }
 653 
 654 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 655   DecoratorSet decorators = access.decorators();
 656   GraphKit* kit = access.kit();
 657 
 658   Node* adr = access.addr().node();
 659   Node* obj = access.base();
 660 
 661   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 662   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 663   bool on_heap = (decorators & IN_HEAP) != 0;
 664   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
 665   bool is_unordered = (decorators & MO_UNORDERED) != 0;
 666   bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap;
 667 
 668   Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : kit->top();
 669   Node* load = BarrierSetC2::load_at_resolved(access, val_type);
 670 
 671   // If we are reading the value of the referent field of a Reference
 672   // object (either by using Unsafe directly or through reflection)
 673   // then, if SATB is enabled, we need to record the referent in an
 674   // SATB log buffer using the pre-barrier mechanism.
 675   // Also we need to add memory barrier to prevent commoning reads
 676   // from this field across safepoint since GC can change its value.
 677   bool need_read_barrier = ShenandoahKeepAliveBarrier &&
 678     (on_heap && (on_weak || (unknown && offset != kit->top() && obj != kit->top())));
 679 
 680   if (!access.is_oop() || !need_read_barrier) {
 681     return load;
 682   }
 683 
 684   if (on_weak) {
 685     // Use the pre-barrier to record the value in the referent field
 686     satb_write_barrier_pre(kit, false /* do_load */,
 687                            NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 688                            load /* pre_val */, T_OBJECT);
 689     // Add memory barrier to prevent commoning reads from this field
 690     // across safepoint since GC can change its value.
 691     kit->insert_mem_bar(Op_MemBarCPUOrder);
 692   } else if (unknown) {
 693     // We do not require a mem bar inside pre_barrier if need_mem_bar
 694     // is set: the barriers would be emitted by us.
 695     insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
 696   }
 697 
 698   return load;
 699 }
 700 
 701 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
 702                                                    Node* val, const Type* value_type) const {
 703   GraphKit* kit = access.kit();
 704   if (access.is_oop()) {
 705     val = shenandoah_storeval_barrier(kit, val);
 706     shenandoah_write_barrier_pre(kit, false /* do_load */,
 707                                  NULL, NULL, max_juint, NULL, NULL,
 708                                  expected_val /* pre_val */, T_OBJECT);
 709 
 710   }
 711   return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, val, value_type);
 712 }
 713 
 714 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at(C2AtomicAccess& access, Node* expected_val,
 715                                                     Node* new_val, const Type* val_type) const {
 716   // TODO: Implement using proper barriers.
 717   return BarrierSetC2::atomic_cmpxchg_val_at(access, expected_val, new_val, val_type);
 718 }
 719 
 720 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
 721                                                               Node* val, const Type* value_type) const {
 722   GraphKit* kit = access.kit();
 723   if (access.is_oop()) {
 724     val = shenandoah_storeval_barrier(kit, val);
 725     shenandoah_write_barrier_pre(kit, false /* do_load */,
 726                                  NULL, NULL, max_juint, NULL, NULL,
 727                                  expected_val /* pre_val */, T_OBJECT);
 728   }
 729   return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, val, value_type);
 730 }
 731 
 732 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicAccess& access, Node* expected_val,
 733                                                      Node* new_val, const Type* val_type) const {
 734   // TODO: Implement using proper barriers.
 735   return BarrierSetC2::atomic_cmpxchg_bool_at(access, expected_val, new_val, val_type);
 736 }
 737 
 738 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* val, const Type* value_type) const {
 739   GraphKit* kit = access.kit();
 740   if (access.is_oop()) {
 741     val = shenandoah_storeval_barrier(kit, val);
 742   }
 743   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
 744   if (access.is_oop()) {
 745     shenandoah_write_barrier_pre(kit, false /* do_load */,
 746                                  NULL, NULL, max_juint, NULL, NULL,
 747                                  result /* pre_val */, T_OBJECT);
 748   }
 749   return result;
 750 }
 751 
 752 Node* ShenandoahBarrierSetC2::atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
 753   // TODO: Implement using proper barriers.
 754   return BarrierSetC2::atomic_xchg_at(access, new_val, value_type);
 755 }
 756 
 757 Node* ShenandoahBarrierSetC2::atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
 758   // TODO: Implement using proper barriers.
 759   return BarrierSetC2::atomic_add_at(access, new_val, value_type);
 760 }
 761 
 762 void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
 763   // TODO: Implement using proper barriers.
 764   BarrierSetC2::clone(kit, src, dst, size, is_array);
 765 }
 766 
 767 Node* ShenandoahBarrierSetC2::resolve_for_read(GraphKit* kit, Node* n) const {
 768   return shenandoah_read_barrier(kit, n);
 769 }
 770 
 771 Node* ShenandoahBarrierSetC2::resolve_for_write(GraphKit* kit, Node* n) const {
 772   return shenandoah_write_barrier(kit, n);
 773 }
 774 
 775 Node* ShenandoahBarrierSetC2::cmpoop_cmp(GraphKit* kit, Node* a, Node* b) const {
 776   if (ShenandoahAcmpBarrier && (ShenandoahVerifyOptoBarriers || ShenandoahWBAcmpBarrier)) {
 777     a = kit->access_resolve_for_write(a);
 778     b = kit->access_resolve_for_write(b);
 779   }
 780   return kit->gvn().transform(new CmpPNode(b, a));
 781 }
 782 
 783 /**
 784  * In Shenandoah, we need barriers on acmp (and similar instructions that compare two
 785  * oops) to avoid false negatives. If it compares a from-space and a to-space
 786  * copy of an object, a regular acmp would return false, even though both are
 787  * the same. The acmp barrier compares the two objects, and when they are
 788  * *not equal* it does a read-barrier on both, and compares them again. When it
 789  * failed because of different copies of the object, we know that the object
 790  * must already have been evacuated (and therefore doesn't require a write-barrier).
 791  */
 792 void ShenandoahBarrierSetC2::cmpoop_if(GraphKit* kit, Node* tst, float true_prob, float cnt,
 793                                        Node*& taken_branch, Node*& untaken_branch,
 794                                        Node*& taken_memory, Node*& untaken_memory) const {
 795   IfNode* iff = kit->create_and_map_if(kit->control(), tst, true_prob, cnt);
 796   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
 797   taken_branch   = new IfTrueNode(iff);
 798   untaken_branch = new IfFalseNode(iff);
 799 
 800   taken_branch = kit->gvn().transform(taken_branch);
 801   untaken_branch = kit->gvn().transform(untaken_branch);
 802 
 803   assert(taken_memory == NULL && untaken_memory == NULL, "unexpected memory inputs");
 804   if (!ShenandoahAcmpBarrier || ShenandoahVerifyOptoBarriers || ShenandoahWBAcmpBarrier) {
 805     return;
 806   }
 807   if (taken_branch->is_top() || untaken_branch->is_top()) {
 808     // one of the branches is known to be untaken
 809     return;
 810   }
 811   assert(taken_branch->is_IfProj() && untaken_branch->is_IfProj(), "if projections only");
 812   assert(taken_branch->in(0) == untaken_branch->in(0), "should come from same if");
 813   BoolNode* bol = iff->in(1)->as_Bool();
 814   Node* cmp = bol->in(1);
 815   assert(cmp->Opcode() == Op_CmpP, "only oops comparisons");
 816   Node* a = cmp->in(1);
 817   Node* b = cmp->in(2);
 818   const Type* a_type = kit->gvn().type(a);
 819   const Type* b_type = kit->gvn().type(b);
 820   if (a_type->higher_equal(TypePtr::NULL_PTR) || b_type->higher_equal(TypePtr::NULL_PTR)) {
 821     // We know one arg is gonna be null. No need for barriers.
 822     return;
 823   }
 824 
 825   const TypePtr* a_adr_type = ShenandoahBarrierNode::brooks_pointer_type(a_type);
 826   const TypePtr* b_adr_type = ShenandoahBarrierNode::brooks_pointer_type(b_type);
 827   if ((! ShenandoahBarrierNode::needs_barrier(&kit->gvn(), NULL, a, kit->memory(a_adr_type), false)) &&
 828       (! ShenandoahBarrierNode::needs_barrier(&kit->gvn(), NULL, b, kit->memory(b_adr_type), false))) {
 829     // We know both args are in to-space already. No acmp barrier needed.
 830     return;
 831   }
 832 
 833   Node* equal_path = iff->proj_out(true);
 834   Node* not_equal_path = iff->proj_out(false);
 835 
 836   if (bol->_test._test == BoolTest::ne) {
 837     swap(equal_path, not_equal_path);
 838   }
 839 
 840   Node* init_equal_path = equal_path;
 841   Node* init_not_equal_path = not_equal_path;
 842 
 843   uint alias_a = kit->C->get_alias_index(a_adr_type);
 844   uint alias_b = kit->C->get_alias_index(b_adr_type);
 845 
 846   Node* equal_memory = NULL;
 847   Node* not_equal_memory = NULL;
 848 
 849   RegionNode* region = new RegionNode(3);
 850   region->init_req(1, equal_path);
 851   PhiNode* mem_phi = NULL;
 852   if (alias_a == alias_b) {
 853     mem_phi = PhiNode::make(region, kit->memory(alias_a), Type::MEMORY, kit->C->get_adr_type(alias_a));
 854   } else {
 855     Node* mem = kit->reset_memory();
 856     mem_phi = PhiNode::make(region, mem, Type::MEMORY, TypePtr::BOTTOM);
 857     kit->set_all_memory(mem);
 858   }
 859 
 860   kit->set_control(not_equal_path);
 861 
 862   Node* mb = NULL;
 863   if (alias_a == alias_b) {
 864     Node* mem = kit->reset_memory();
 865     mb = MemBarNode::make(kit->C, Op_MemBarAcquire, alias_a);
 866     mb->init_req(TypeFunc::Control, kit->control());
 867     mb->init_req(TypeFunc::Memory, mem);
 868     Node* membar = kit->gvn().transform(mb);
 869     kit->set_control(kit->gvn().transform(new ProjNode(membar, TypeFunc::Control)));
 870     Node* newmem = kit->gvn().transform(new ProjNode(membar, TypeFunc::Memory));
 871     kit->set_all_memory(mem);
 872     kit->set_memory(newmem, alias_a);
 873   } else {
 874     mb = kit->insert_mem_bar(Op_MemBarAcquire);
 875   }
 876 
 877   ShenandoahBarrierSetC2* bs = (ShenandoahBarrierSetC2*) BarrierSet::barrier_set()->barrier_set_c2();
 878   a = bs->shenandoah_read_barrier_acmp(kit, a);
 879   b = bs->shenandoah_read_barrier_acmp(kit, b);
 880 
 881   Node* cmp2 = kit->gvn().transform(new CmpPNode(a, b));
 882   Node* bol2 = bol->clone();
 883   bol2->set_req(1, cmp2);
 884   bol2 = kit->gvn().transform(bol2);
 885   Node* iff2 = iff->clone();
 886   iff2->set_req(0, kit->control());
 887   iff2->set_req(1, bol2);
 888   kit->gvn().set_type(iff2, kit->gvn().type(iff));
 889   Node* equal_path2 = equal_path->clone();
 890   equal_path2->set_req(0, iff2);
 891   equal_path2 = kit->gvn().transform(equal_path2);
 892   Node* not_equal_path2 = not_equal_path->clone();
 893   not_equal_path2->set_req(0, iff2);
 894   not_equal_path2 = kit->gvn().transform(not_equal_path2);
 895 
 896   region->init_req(2, equal_path2);
 897   not_equal_memory = kit->reset_memory();
 898   not_equal_path = not_equal_path2;
 899 
 900   kit->set_all_memory(not_equal_memory);
 901 
 902   if (alias_a == alias_b) {
 903     mem_phi->init_req(2, kit->memory(alias_a));
 904     kit->set_memory(mem_phi, alias_a);
 905   } else {
 906     mem_phi->init_req(2, kit->reset_memory());
 907   }
 908 
 909   kit->record_for_igvn(mem_phi);
 910   kit->gvn().set_type(mem_phi, Type::MEMORY);
 911 
 912   if (alias_a == alias_b) {
 913     equal_memory = kit->reset_memory();
 914   } else {
 915     equal_memory = mem_phi;
 916   }
 917 
 918   assert(kit->map()->memory() == NULL, "no live memory state");
 919   equal_path = kit->gvn().transform(region);
 920 
 921   if (taken_branch == init_equal_path) {
 922     assert(untaken_branch == init_not_equal_path, "inconsistent");
 923     taken_branch = equal_path;
 924     untaken_branch = not_equal_path;
 925     taken_memory = equal_memory;
 926     untaken_memory = not_equal_memory;
 927   } else {
 928     assert(taken_branch == init_not_equal_path, "inconsistent");
 929     assert(untaken_branch == init_equal_path, "inconsistent");
 930     taken_branch = not_equal_path;
 931     untaken_branch = equal_path;
 932     taken_memory = not_equal_memory;
 933     untaken_memory = equal_memory;
 934   }
 935 }
 936 
 937 // Support for GC barriers emitted during parsing
 938 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
 939   if (node->Opcode() != Op_CallLeaf) {
 940     return false;
 941   }
 942   CallLeafNode *call = node->as_CallLeaf();
 943   if (call->_name == NULL) {
 944     return false;
 945   }
 946 
 947   return strcmp(call->_name, "shenandoah_clone_barrier") == 0 ||
 948          strcmp(call->_name, "shenandoah_cas_obj") == 0 ||
 949          strcmp(call->_name, "shenandoah_wb_pre") == 0;
 950 }
 951 
 952 Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
 953   // Currently not needed.
 954   return c;
 955 }
 956 
 957 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(BasicType type) const {
 958   bool is_oop = type == T_OBJECT || type == T_ARRAY;
 959   return is_oop && UseShenandoahMatrix;
 960 }
 961 
 962 // Support for macro expanded GC barriers
 963 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
 964   if (node->Opcode() == Op_ShenandoahWriteBarrier) {
 965     state()->add_shenandoah_barrier((ShenandoahWriteBarrierNode*) node);
 966   }
 967 }
 968 
 969 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
 970   if (node->Opcode() == Op_ShenandoahWriteBarrier) {
 971     state()->remove_shenandoah_barrier((ShenandoahWriteBarrierNode*) node);
 972   }
 973 }
 974 
 975 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
 976   Compile::current()->shenandoah_eliminate_matrix_update(node, &macro->igvn());
 977 }
 978 
 979 void ShenandoahBarrierSetC2::enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const {
 980 }
 981 
 982 void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful) const {
 983   for (int i = state()->shenandoah_barriers_count()-1; i >= 0; i--) {
 984     ShenandoahWriteBarrierNode* n = state()->shenandoah_barrier(i);
 985     if (!useful.member(n)) {
 986       state()->remove_shenandoah_barrier(n);
 987     }
 988   }
 989 
 990 }
 991 
 992 void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {}
 993 
 994 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
 995   return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena);
 996 }
 997 
 998 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const {
 999   return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
1000 }
1001 
1002 // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
1003 // expanded later, then now is the time to do so.
1004 bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const { return false; }
1005 void ShenandoahBarrierSetC2::verify_gc_barriers(bool post_parse) const {
1006 #ifdef ASSERT
1007   if (ShenandoahVerifyOptoBarriers && !post_parse) {
1008     ShenandoahBarrierNode::verify(Compile::current()->root());
1009   }
1010 #endif
1011 }