1 /*
   2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/barrierSet.hpp"
  26 #include "gc/shenandoah/shenandoahHeap.hpp"
  27 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  28 #include "gc/shenandoah/shenandoahRuntime.hpp"
  29 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  30 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  31 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  32 #include "opto/graphKit.hpp"
  33 #include "opto/idealKit.hpp"
  34 #include "opto/macro.hpp"
  35 
  36 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
  37   return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
  38 }
  39 
  40 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
  41   : _shenandoah_barriers(new (comp_arena) GrowableArray<ShenandoahWriteBarrierNode*>(comp_arena, 8,  0, NULL)) {
  42 }
  43 
  44 int ShenandoahBarrierSetC2State::shenandoah_barriers_count() const {
  45   return _shenandoah_barriers->length();
  46 }
  47 
  48 ShenandoahWriteBarrierNode* ShenandoahBarrierSetC2State::shenandoah_barrier(int idx) const {
  49   return _shenandoah_barriers->at(idx);
  50 }
  51 
  52 void ShenandoahBarrierSetC2State::add_shenandoah_barrier(ShenandoahWriteBarrierNode * n) {
  53   assert(!_shenandoah_barriers->contains(n), "duplicate entry in barrier list");
  54   _shenandoah_barriers->append(n);
  55 }
  56 
  57 void ShenandoahBarrierSetC2State::remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n) {
  58   if (_shenandoah_barriers->contains(n)) {
  59     _shenandoah_barriers->remove(n);
  60   }
  61 }
  62 
  63 #define __ kit->
  64 
  65 Node* ShenandoahBarrierSetC2::shenandoah_read_barrier(GraphKit* kit, Node* obj) const {
  66   if (ShenandoahReadBarrier) {
  67     obj = shenandoah_read_barrier_impl(kit, obj, false, true, true);
  68   }
  69   return obj;
  70 }
  71 
  72 Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const {
  73   if (ShenandoahStoreValEnqueueBarrier) {
  74     obj = shenandoah_write_barrier(kit, obj);
  75     obj = shenandoah_enqueue_barrier(kit, obj);
  76   }
  77   if (ShenandoahStoreValReadBarrier) {
  78     obj = shenandoah_read_barrier_impl(kit, obj, true, false, false);
  79   }
  80   return obj;
  81 }
  82 
  83 Node* ShenandoahBarrierSetC2::shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const {
  84   const Type* obj_type = obj->bottom_type();
  85   if (obj_type->higher_equal(TypePtr::NULL_PTR)) {
  86     return obj;
  87   }
  88   const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
  89   Node* mem = use_mem ? __ memory(adr_type) : __ immutable_memory();
  90 
  91   if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, mem, allow_fromspace)) {
  92     // We know it is null, no barrier needed.
  93     return obj;
  94   }
  95 
  96   if (obj_type->meet(TypePtr::NULL_PTR) == obj_type->remove_speculative()) {
  97 
  98     // We don't know if it's null or not. Need null-check.
  99     enum { _not_null_path = 1, _null_path, PATH_LIMIT };
 100     RegionNode* region = new RegionNode(PATH_LIMIT);
 101     Node*       phi    = new PhiNode(region, obj_type);
 102     Node* null_ctrl = __ top();
 103     Node* not_null_obj = __ null_check_oop(obj, &null_ctrl);
 104 
 105     region->init_req(_null_path, null_ctrl);
 106     phi   ->init_req(_null_path, __ zerocon(T_OBJECT));
 107 
 108     Node* ctrl = use_ctrl ? __ control() : NULL;
 109     ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, not_null_obj, allow_fromspace);
 110     Node* n = __ gvn().transform(rb);
 111 
 112     region->init_req(_not_null_path, __ control());
 113     phi   ->init_req(_not_null_path, n);
 114 
 115     __ set_control(__ gvn().transform(region));
 116     __ record_for_igvn(region);
 117     return __ gvn().transform(phi);
 118 
 119   } else {
 120     // We know it is not null. Simple barrier is sufficient.
 121     Node* ctrl = use_ctrl ? __ control() : NULL;
 122     ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, obj, allow_fromspace);
 123     Node* n = __ gvn().transform(rb);
 124     __ record_for_igvn(n);
 125     return n;
 126   }
 127 }
 128 
 129 Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const {
 130   ShenandoahWriteBarrierNode* wb = new ShenandoahWriteBarrierNode(kit->C, kit->control(), kit->memory(adr_type), obj);
 131   Node* n = __ gvn().transform(wb);
 132   if (n == wb) { // New barrier needs memory projection.
 133     Node* proj = __ gvn().transform(new ShenandoahWBMemProjNode(n));
 134     __ set_memory(proj, adr_type);
 135   }
 136   return n;
 137 }
 138 
 139 Node* ShenandoahBarrierSetC2::shenandoah_write_barrier(GraphKit* kit, Node* obj) const {
 140   if (ShenandoahWriteBarrier) {
 141     obj = shenandoah_write_barrier_impl(kit, obj);
 142   }
 143   return obj;
 144 }
 145 
 146 Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const {
 147   if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, NULL, true)) {
 148     return obj;
 149   }
 150   const Type* obj_type = obj->bottom_type();
 151   const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
 152   Node* n = shenandoah_write_barrier_helper(kit, obj, adr_type);
 153   __ record_for_igvn(n);
 154   return n;
 155 }
 156 
 157 bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr,
 158                                                          BasicType bt, uint adr_idx) const {
 159   intptr_t offset = 0;
 160   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 161   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
 162 
 163   if (offset == Type::OffsetBot) {
 164     return false; // cannot unalias unless there are precise offsets
 165   }
 166 
 167   if (alloc == NULL) {
 168     return false; // No allocation found
 169   }
 170 
 171   intptr_t size_in_bytes = type2aelembytes(bt);
 172 
 173   Node* mem = __ memory(adr_idx); // start searching here...
 174 
 175   for (int cnt = 0; cnt < 50; cnt++) {
 176 
 177     if (mem->is_Store()) {
 178 
 179       Node* st_adr = mem->in(MemNode::Address);
 180       intptr_t st_offset = 0;
 181       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
 182 
 183       if (st_base == NULL) {
 184         break; // inscrutable pointer
 185       }
 186 
 187       // Break we have found a store with same base and offset as ours so break
 188       if (st_base == base && st_offset == offset) {
 189         break;
 190       }
 191 
 192       if (st_offset != offset && st_offset != Type::OffsetBot) {
 193         const int MAX_STORE = BytesPerLong;
 194         if (st_offset >= offset + size_in_bytes ||
 195             st_offset <= offset - MAX_STORE ||
 196             st_offset <= offset - mem->as_Store()->memory_size()) {
 197           // Success:  The offsets are provably independent.
 198           // (You may ask, why not just test st_offset != offset and be done?
 199           // The answer is that stores of different sizes can co-exist
 200           // in the same sequence of RawMem effects.  We sometimes initialize
 201           // a whole 'tile' of array elements with a single jint or jlong.)
 202           mem = mem->in(MemNode::Memory);
 203           continue; // advance through independent store memory
 204         }
 205       }
 206 
 207       if (st_base != base
 208           && MemNode::detect_ptr_independence(base, alloc, st_base,
 209                                               AllocateNode::Ideal_allocation(st_base, phase),
 210                                               phase)) {
 211         // Success:  The bases are provably independent.
 212         mem = mem->in(MemNode::Memory);
 213         continue; // advance through independent store memory
 214       }
 215     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 216 
 217       InitializeNode* st_init = mem->in(0)->as_Initialize();
 218       AllocateNode* st_alloc = st_init->allocation();
 219 
 220       // Make sure that we are looking at the same allocation site.
 221       // The alloc variable is guaranteed to not be null here from earlier check.
 222       if (alloc == st_alloc) {
 223         // Check that the initialization is storing NULL so that no previous store
 224         // has been moved up and directly write a reference
 225         Node* captured_store = st_init->find_captured_store(offset,
 226                                                             type2aelembytes(T_OBJECT),
 227                                                             phase);
 228         if (captured_store == NULL || captured_store == st_init->zero_memory()) {
 229           return true;
 230         }
 231       }
 232     }
 233 
 234     // Unless there is an explicit 'continue', we must bail out here,
 235     // because 'mem' is an inscrutable memory state (e.g., a call).
 236     break;
 237   }
 238 
 239   return false;
 240 }
 241 
 242 #undef __
 243 #define __ ideal.
 244 
 245 void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
 246                                                     bool do_load,
 247                                                     Node* obj,
 248                                                     Node* adr,
 249                                                     uint alias_idx,
 250                                                     Node* val,
 251                                                     const TypeOopPtr* val_type,
 252                                                     Node* pre_val,
 253                                                     BasicType bt) const {
 254   // Some sanity checks
 255   // Note: val is unused in this routine.
 256 
 257   if (do_load) {
 258     // We need to generate the load of the previous value
 259     assert(obj != NULL, "must have a base");
 260     assert(adr != NULL, "where are loading from?");
 261     assert(pre_val == NULL, "loaded already?");
 262     assert(val_type != NULL, "need a type");
 263 
 264     if (ReduceInitialCardMarks
 265         && satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
 266       return;
 267     }
 268 
 269   } else {
 270     // In this case both val_type and alias_idx are unused.
 271     assert(pre_val != NULL, "must be loaded already");
 272     // Nothing to be done if pre_val is null.
 273     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
 274     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
 275   }
 276   assert(bt == T_OBJECT, "or we shouldn't be here");
 277 
 278   IdealKit ideal(kit, true);
 279 
 280   Node* tls = __ thread(); // ThreadLocalStorage
 281 
 282   Node* no_base = __ top();
 283   Node* zero  = __ ConI(0);
 284   Node* zeroX = __ ConX(0);
 285 
 286   float likely  = PROB_LIKELY(0.999);
 287   float unlikely  = PROB_UNLIKELY(0.999);
 288 
 289   // Offsets into the thread
 290   const int index_offset   = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
 291   const int buffer_offset  = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
 292 
 293   // Now the actual pointers into the thread
 294   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
 295   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
 296 
 297   // Now some of the values
 298   Node* marking;
 299   Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
 300   Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
 301   marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
 302   assert(ShenandoahWriteBarrierNode::is_gc_state_load(ld), "Should match the shape");
 303 
 304   // if (!marking)
 305   __ if_then(marking, BoolTest::ne, zero, unlikely); {
 306     BasicType index_bt = TypeX_X->basic_type();
 307     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
 308     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
 309 
 310     if (do_load) {
 311       // load original value
 312       // alias_idx correct??
 313       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
 314     }
 315 
 316     // if (pre_val != NULL)
 317     __ if_then(pre_val, BoolTest::ne, kit->null()); {
 318       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 319 
 320       // is the queue for this thread full?
 321       __ if_then(index, BoolTest::ne, zeroX, likely); {
 322 
 323         // decrement the index
 324         Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 325 
 326         // Now get the buffer location we will log the previous value into and store it
 327         Node *log_addr = __ AddP(no_base, buffer, next_index);
 328         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
 329         // update the index
 330         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
 331 
 332       } __ else_(); {
 333 
 334         // logging buffer is full, call the runtime
 335         const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type();
 336         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls);
 337       } __ end_if();  // (!index)
 338     } __ end_if();  // (pre_val != NULL)
 339   } __ end_if();  // (!marking)
 340 
 341   // Final sync IdealKit and GraphKit.
 342   kit->final_sync(ideal);
 343 
 344   if (ShenandoahSATBBarrier && adr != NULL) {
 345     Node* c = kit->control();
 346     Node* call = c->in(1)->in(1)->in(1)->in(0);
 347     assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected");
 348     call->add_req(adr);
 349   }
 350 }
 351 
 352 bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) {
 353   return call->is_CallLeaf() &&
 354          call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry);
 355 }
 356 
 357 bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) {
 358   if (n->Opcode() != Op_If) {
 359     return false;
 360   }
 361 
 362   Node* bol = n->in(1);
 363   assert(bol->is_Bool(), "");
 364   Node* cmpx = bol->in(1);
 365   if (bol->as_Bool()->_test._test == BoolTest::ne &&
 366       cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) &&
 367       is_shenandoah_state_load(cmpx->in(1)->in(1)) &&
 368       cmpx->in(1)->in(2)->is_Con() &&
 369       cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) {
 370     return true;
 371   }
 372 
 373   return false;
 374 }
 375 
 376 bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) {
 377   if (!n->is_Load()) return false;
 378   const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset());
 379   return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal
 380          && n->in(2)->in(3)->is_Con()
 381          && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset;
 382 }
 383 
 384 void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit,
 385                                                           bool do_load,
 386                                                           Node* obj,
 387                                                           Node* adr,
 388                                                           uint alias_idx,
 389                                                           Node* val,
 390                                                           const TypeOopPtr* val_type,
 391                                                           Node* pre_val,
 392                                                           BasicType bt) const {
 393   if (ShenandoahSATBBarrier) {
 394     IdealKit ideal(kit);
 395     kit->sync_kit(ideal);
 396 
 397     satb_write_barrier_pre(kit, do_load, obj, adr, alias_idx, val, val_type, pre_val, bt);
 398 
 399     ideal.sync_kit(kit);
 400     kit->final_sync(ideal);
 401   }
 402 }
 403 
 404 Node* ShenandoahBarrierSetC2::shenandoah_enqueue_barrier(GraphKit* kit, Node* pre_val) const {
 405   return kit->gvn().transform(new ShenandoahEnqueueBarrierNode(pre_val));
 406 }
 407 
 408 // Helper that guards and inserts a pre-barrier.
 409 void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
 410                                                 Node* pre_val, bool need_mem_bar) const {
 411   // We could be accessing the referent field of a reference object. If so, when G1
 412   // is enabled, we need to log the value in the referent field in an SATB buffer.
 413   // This routine performs some compile time filters and generates suitable
 414   // runtime filters that guard the pre-barrier code.
 415   // Also add memory barrier for non volatile load from the referent field
 416   // to prevent commoning of loads across safepoint.
 417 
 418   // Some compile time checks.
 419 
 420   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
 421   const TypeX* otype = offset->find_intptr_t_type();
 422   if (otype != NULL && otype->is_con() &&
 423       otype->get_con() != java_lang_ref_Reference::referent_offset) {
 424     // Constant offset but not the reference_offset so just return
 425     return;
 426   }
 427 
 428   // We only need to generate the runtime guards for instances.
 429   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
 430   if (btype != NULL) {
 431     if (btype->isa_aryptr()) {
 432       // Array type so nothing to do
 433       return;
 434     }
 435 
 436     const TypeInstPtr* itype = btype->isa_instptr();
 437     if (itype != NULL) {
 438       // Can the klass of base_oop be statically determined to be
 439       // _not_ a sub-class of Reference and _not_ Object?
 440       ciKlass* klass = itype->klass();
 441       if ( klass->is_loaded() &&
 442           !klass->is_subtype_of(kit->env()->Reference_klass()) &&
 443           !kit->env()->Object_klass()->is_subtype_of(klass)) {
 444         return;
 445       }
 446     }
 447   }
 448 
 449   // The compile time filters did not reject base_oop/offset so
 450   // we need to generate the following runtime filters
 451   //
 452   // if (offset == java_lang_ref_Reference::_reference_offset) {
 453   //   if (instance_of(base, java.lang.ref.Reference)) {
 454   //     pre_barrier(_, pre_val, ...);
 455   //   }
 456   // }
 457 
 458   float likely   = PROB_LIKELY(  0.999);
 459   float unlikely = PROB_UNLIKELY(0.999);
 460 
 461   IdealKit ideal(kit);
 462 
 463   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
 464 
 465   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
 466       // Update graphKit memory and control from IdealKit.
 467       kit->sync_kit(ideal);
 468 
 469       Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
 470       Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
 471 
 472       // Update IdealKit memory and control from graphKit.
 473       __ sync_kit(kit);
 474 
 475       Node* one = __ ConI(1);
 476       // is_instof == 0 if base_oop == NULL
 477       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
 478 
 479         // Update graphKit from IdeakKit.
 480         kit->sync_kit(ideal);
 481 
 482         // Use the pre-barrier to record the value in the referent field
 483         satb_write_barrier_pre(kit, false /* do_load */,
 484                                NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 485                                pre_val /* pre_val */,
 486                                T_OBJECT);
 487         if (need_mem_bar) {
 488           // Add memory barrier to prevent commoning reads from this field
 489           // across safepoint since GC can change its value.
 490           kit->insert_mem_bar(Op_MemBarCPUOrder);
 491         }
 492         // Update IdealKit from graphKit.
 493         __ sync_kit(kit);
 494 
 495       } __ end_if(); // _ref_type != ref_none
 496   } __ end_if(); // offset == referent_offset
 497 
 498   // Final sync IdealKit and GraphKit.
 499   kit->final_sync(ideal);
 500 }
 501 
 502 #undef __
 503 
 504 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() {
 505   const Type **fields = TypeTuple::fields(2);
 506   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
 507   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
 508   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 509 
 510   // create result type (range)
 511   fields = TypeTuple::fields(0);
 512   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
 513 
 514   return TypeFunc::make(domain, range);
 515 }
 516 
 517 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() {
 518   const Type **fields = TypeTuple::fields(1);
 519   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
 520   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
 521 
 522   // create result type (range)
 523   fields = TypeTuple::fields(0);
 524   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
 525 
 526   return TypeFunc::make(domain, range);
 527 }
 528 
 529 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_write_barrier_Type() {
 530   const Type **fields = TypeTuple::fields(1);
 531   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
 532   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
 533 
 534   // create result type (range)
 535   fields = TypeTuple::fields(1);
 536   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
 537   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 538 
 539   return TypeFunc::make(domain, range);
 540 }
 541 
 542 Node* ShenandoahBarrierSetC2::store_at(C2Access& access, C2AccessValue& val) const {
 543   // TODO: Implement using proper barriers.
 544   return BarrierSetC2::store_at(access, val);
 545 }
 546 
 547 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
 548   DecoratorSet decorators = access.decorators();
 549   GraphKit* kit = access.kit();
 550 
 551   const TypePtr* adr_type = access.addr().type();
 552   Node* adr = access.addr().node();
 553 
 554   bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 555   bool on_heap = (decorators & IN_HEAP) != 0;
 556 
 557   if (!access.is_oop() || (!on_heap && !anonymous)) {
 558     return BarrierSetC2::store_at_resolved(access, val);
 559   }
 560 
 561   uint adr_idx = kit->C->get_alias_index(adr_type);
 562   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
 563   Node* value = val.node();
 564   value = shenandoah_storeval_barrier(kit, value);
 565   val.set_node(value);
 566   shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
 567               static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());
 568   return BarrierSetC2::store_at_resolved(access, val);
 569 }
 570 
 571 Node* ShenandoahBarrierSetC2::load_at(C2Access& access, const Type* val_type) const {
 572   // TODO: Implement using proper barriers.
 573   return BarrierSetC2::load_at(access, val_type);
 574 }
 575 
 576 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 577   DecoratorSet decorators = access.decorators();
 578   GraphKit* kit = access.kit();
 579 
 580   Node* adr = access.addr().node();
 581   Node* obj = access.base();
 582 
 583   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 584   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 585   bool on_heap = (decorators & IN_HEAP) != 0;
 586   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
 587   bool is_unordered = (decorators & MO_UNORDERED) != 0;
 588   bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap;
 589 
 590   Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : kit->top();
 591   Node* load = BarrierSetC2::load_at_resolved(access, val_type);
 592 
 593   // If we are reading the value of the referent field of a Reference
 594   // object (either by using Unsafe directly or through reflection)
 595   // then, if SATB is enabled, we need to record the referent in an
 596   // SATB log buffer using the pre-barrier mechanism.
 597   // Also we need to add memory barrier to prevent commoning reads
 598   // from this field across safepoint since GC can change its value.
 599   bool need_read_barrier = ShenandoahKeepAliveBarrier &&
 600     (on_heap && (on_weak || (unknown && offset != kit->top() && obj != kit->top())));
 601 
 602   if (!access.is_oop() || !need_read_barrier) {
 603     return load;
 604   }
 605 
 606   if (on_weak) {
 607     // Use the pre-barrier to record the value in the referent field
 608     satb_write_barrier_pre(kit, false /* do_load */,
 609                            NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 610                            load /* pre_val */, T_OBJECT);
 611     // Add memory barrier to prevent commoning reads from this field
 612     // across safepoint since GC can change its value.
 613     kit->insert_mem_bar(Op_MemBarCPUOrder);
 614   } else if (unknown) {
 615     // We do not require a mem bar inside pre_barrier if need_mem_bar
 616     // is set: the barriers would be emitted by us.
 617     insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
 618   }
 619 
 620   return load;
 621 }
 622 
 623 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
 624                                                    Node* val, const Type* value_type) const {
 625   GraphKit* kit = access.kit();
 626   if (access.is_oop()) {
 627     val = shenandoah_storeval_barrier(kit, val);
 628     shenandoah_write_barrier_pre(kit, false /* do_load */,
 629                                  NULL, NULL, max_juint, NULL, NULL,
 630                                  expected_val /* pre_val */, T_OBJECT);
 631 
 632   }
 633   return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, val, value_type);
 634 }
 635 
 636 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at(C2AtomicAccess& access, Node* expected_val,
 637                                                     Node* new_val, const Type* val_type) const {
 638   // TODO: Implement using proper barriers.
 639   return BarrierSetC2::atomic_cmpxchg_val_at(access, expected_val, new_val, val_type);
 640 }
 641 
 642 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
 643                                                               Node* val, const Type* value_type) const {
 644   GraphKit* kit = access.kit();
 645   if (access.is_oop()) {
 646     val = shenandoah_storeval_barrier(kit, val);
 647     shenandoah_write_barrier_pre(kit, false /* do_load */,
 648                                  NULL, NULL, max_juint, NULL, NULL,
 649                                  expected_val /* pre_val */, T_OBJECT);
 650   }
 651   return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, val, value_type);
 652 }
 653 
 654 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicAccess& access, Node* expected_val,
 655                                                      Node* new_val, const Type* val_type) const {
 656   // TODO: Implement using proper barriers.
 657   return BarrierSetC2::atomic_cmpxchg_bool_at(access, expected_val, new_val, val_type);
 658 }
 659 
 660 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* val, const Type* value_type) const {
 661   GraphKit* kit = access.kit();
 662   if (access.is_oop()) {
 663     val = shenandoah_storeval_barrier(kit, val);
 664   }
 665   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
 666   if (access.is_oop()) {
 667     shenandoah_write_barrier_pre(kit, false /* do_load */,
 668                                  NULL, NULL, max_juint, NULL, NULL,
 669                                  result /* pre_val */, T_OBJECT);
 670   }
 671   return result;
 672 }
 673 
 674 Node* ShenandoahBarrierSetC2::atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
 675   // TODO: Implement using proper barriers.
 676   return BarrierSetC2::atomic_xchg_at(access, new_val, value_type);
 677 }
 678 
 679 Node* ShenandoahBarrierSetC2::atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
 680   // TODO: Implement using proper barriers.
 681   return BarrierSetC2::atomic_add_at(access, new_val, value_type);
 682 }
 683 
 684 void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
 685   // TODO: Implement using proper barriers.
 686   BarrierSetC2::clone(kit, src, dst, size, is_array);
 687 }
 688 
 689 Node* ShenandoahBarrierSetC2::resolve_for_read(GraphKit* kit, Node* n) const {
 690   return shenandoah_read_barrier(kit, n);
 691 }
 692 
 693 Node* ShenandoahBarrierSetC2::resolve_for_write(GraphKit* kit, Node* n) const {
 694   return shenandoah_write_barrier(kit, n);
 695 }
 696 
 697 // Support for GC barriers emitted during parsing
 698 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
 699   if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) {
 700     return false;
 701   }
 702   CallLeafNode *call = node->as_CallLeaf();
 703   if (call->_name == NULL) {
 704     return false;
 705   }
 706 
 707   return strcmp(call->_name, "shenandoah_clone_barrier") == 0 ||
 708          strcmp(call->_name, "shenandoah_cas_obj") == 0 ||
 709          strcmp(call->_name, "shenandoah_wb_pre") == 0;
 710 }
 711 
 712 Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
 713   // Currently not needed.
 714   return c;
 715 }
 716 
 717 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(BasicType type) const {
 718   return false;
 719 }
 720 
 721 // Support for macro expanded GC barriers
 722 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
 723   if (node->Opcode() == Op_ShenandoahWriteBarrier) {
 724     state()->add_shenandoah_barrier((ShenandoahWriteBarrierNode*) node);
 725   }
 726 }
 727 
 728 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
 729   if (node->Opcode() == Op_ShenandoahWriteBarrier) {
 730     state()->remove_shenandoah_barrier((ShenandoahWriteBarrierNode*) node);
 731   }
 732 }
 733 
 734 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const {
 735   if (is_shenandoah_wb_pre_call(n)) {
 736     shenandoah_eliminate_wb_pre(n, &macro->igvn());
 737   }
 738 }
 739 
 740 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const {
 741   assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), "");
 742   Node* c = call->as_Call()->proj_out(TypeFunc::Control);
 743   c = c->unique_ctrl_out();
 744   assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
 745   c = c->unique_ctrl_out();
 746   assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
 747   Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
 748   assert(iff->is_If(), "expect test");
 749   if (!is_shenandoah_marking_if(igvn, iff)) {
 750     c = c->unique_ctrl_out();
 751     assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
 752     iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
 753     assert(is_shenandoah_marking_if(igvn, iff), "expect marking test");
 754   }
 755   Node* cmpx = iff->in(1)->in(1);
 756   igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ));
 757   igvn->rehash_node_delayed(call);
 758   call->del_req(call->req()-1);
 759 }
 760 
 761 void ShenandoahBarrierSetC2::enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const {
 762 }
 763 
 764 void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful) const {
 765   for (int i = state()->shenandoah_barriers_count()-1; i >= 0; i--) {
 766     ShenandoahWriteBarrierNode* n = state()->shenandoah_barrier(i);
 767     if (!useful.member(n)) {
 768       state()->remove_shenandoah_barrier(n);
 769     }
 770   }
 771 
 772 }
 773 
 774 void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {}
 775 
 776 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
 777   return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena);
 778 }
 779 
 780 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const {
 781   return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
 782 }
 783 
 784 // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
 785 // expanded later, then now is the time to do so.
 786 bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const { return false; }
 787 void ShenandoahBarrierSetC2::verify_gc_barriers(bool post_parse) const {
 788 #ifdef ASSERT
 789   if (ShenandoahVerifyOptoBarriers && !post_parse) {
 790     ShenandoahBarrierNode::verify(Compile::current()->root());
 791   }
 792 #endif
 793 }
 794 
 795 Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN *phase, Node* n, bool can_reshape) const {
 796   if (is_shenandoah_wb_pre_call(n)) {
 797     uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
 798     if (n->req() > cnt) {
 799       Node* addp = n->in(cnt);
 800       if (has_only_shenandoah_wb_pre_uses(addp)) {
 801         n->del_req(cnt);
 802         if (can_reshape) {
 803           phase->is_IterGVN()->_worklist.push(addp);
 804         }
 805         return n;
 806       }
 807     }
 808   }
 809   return NULL;
 810 }
 811 
 812 bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
 813   if (!UseShenandoahGC) {
 814     return false;
 815   }
 816   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 817     Node* u = n->fast_out(i);
 818     if (!is_shenandoah_wb_pre_call(u)) {
 819       return false;
 820     }
 821   }
 822   return n->outcnt() > 0;
 823 }