1 /*
   2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  27 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahForwarding.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.hpp"
  31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  32 #include "gc/shenandoah/shenandoahRuntime.hpp"
  33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/block.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/castnode.hpp"
  38 #include "opto/movenode.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/subnode.hpp"
  43 
  44 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  45   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  46   if ((state->enqueue_barriers_count() +
  47        state->load_reference_barriers_count()) > 0) {
  48     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  49     C->clear_major_progress();
  50     PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
  51     if (C->failing()) return false;
  52     PhaseIdealLoop::verify(igvn);
  53     DEBUG_ONLY(verify_raw_mem(C->root());)
  54     if (attempt_more_loopopts) {
  55       C->set_major_progress();
  56       int cnt = 0;
  57       if (!C->optimize_loops(cnt, igvn, LoopOptsShenandoahPostExpand)) {
  58         return false;
  59       }
  60       C->clear_major_progress();
  61     }
  62   }
  63   return true;
  64 }
  65 
  66 bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) {
  67   if (!UseShenandoahGC) {
  68     return false;
  69   }
  70   assert(iff->is_If(), "bad input");
  71   if (iff->Opcode() != Op_If) {
  72     return false;
  73   }
  74   Node* bol = iff->in(1);
  75   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  76     return false;
  77   }
  78   Node* cmp = bol->in(1);
  79   if (cmp->Opcode() != Op_CmpI) {
  80     return false;
  81   }
  82   Node* in1 = cmp->in(1);
  83   Node* in2 = cmp->in(2);
  84   if (in2->find_int_con(-1) != 0) {
  85     return false;
  86   }
  87   if (in1->Opcode() != Op_AndI) {
  88     return false;
  89   }
  90   in2 = in1->in(2);
  91   if (in2->find_int_con(-1) != mask) {
  92     return false;
  93   }
  94   in1 = in1->in(1);
  95 
  96   return is_gc_state_load(in1);
  97 }
  98 
  99 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 100   return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 101 }
 102 
 103 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 104   if (!UseShenandoahGC) {
 105     return false;
 106   }
 107   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 108     return false;
 109   }
 110   Node* addp = n->in(MemNode::Address);
 111   if (!addp->is_AddP()) {
 112     return false;
 113   }
 114   Node* base = addp->in(AddPNode::Address);
 115   Node* off = addp->in(AddPNode::Offset);
 116   if (base->Opcode() != Op_ThreadLocal) {
 117     return false;
 118   }
 119   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 120     return false;
 121   }
 122   return true;
 123 }
 124 
 125 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 126   assert(phase->is_dominator(stop, start), "bad inputs");
 127   ResourceMark rm;
 128   Unique_Node_List wq;
 129   wq.push(start);
 130   for (uint next = 0; next < wq.size(); next++) {
 131     Node *m = wq.at(next);
 132     if (m == stop) {
 133       continue;
 134     }
 135     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 136       return true;
 137     }
 138     if (m->is_Region()) {
 139       for (uint i = 1; i < m->req(); i++) {
 140         wq.push(m->in(i));
 141       }
 142     } else {
 143       wq.push(m->in(0));
 144     }
 145   }
 146   return false;
 147 }
 148 
 149 bool ShenandoahBarrierC2Support::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) {
 150   assert(is_gc_state_load(n), "inconsistent");
 151   Node* addp = n->in(MemNode::Address);
 152   Node* dominator = NULL;
 153   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 154     Node* u = addp->fast_out(i);
 155     assert(is_gc_state_load(u), "inconsistent");
 156     if (u != n && phase->is_dominator(u->in(0), n->in(0))) {
 157       if (dominator == NULL) {
 158         dominator = u;
 159       } else {
 160         if (phase->dom_depth(u->in(0)) < phase->dom_depth(dominator->in(0))) {
 161           dominator = u;
 162         }
 163       }
 164     }
 165   }
 166   if (dominator == NULL || has_safepoint_between(n->in(0), dominator->in(0), phase)) {
 167     return false;
 168   }
 169   phase->igvn().replace_node(n, dominator);
 170 
 171   return true;
 172 }
 173 
 174 #ifdef ASSERT
 175 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 176   assert(phis.size() == 0, "");
 177 
 178   while (true) {
 179     if (in->bottom_type() == TypePtr::NULL_PTR) {
 180       if (trace) {tty->print_cr("NULL");}
 181     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 182       if (trace) {tty->print_cr("Non oop");}
 183     } else if (in->bottom_type()->make_ptr()->make_oopptr() == TypeInstPtr::MIRROR) {
 184       if (trace) {tty->print_cr("Java mirror");}
 185     } else {
 186       if (in->is_ConstraintCast()) {
 187         in = in->in(1);
 188         continue;
 189       } else if (in->is_AddP()) {
 190         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 191         in = in->in(AddPNode::Address);
 192         continue;
 193       } else if (in->is_Con()) {
 194         if (trace) {
 195           tty->print("Found constant");
 196           in->dump();
 197         }
 198       } else if (in->Opcode() == Op_Parm) {
 199         if (trace) {
 200           tty->print("Found argument");
 201         }
 202       } else if (in->Opcode() == Op_CreateEx) {
 203         if (trace) {
 204           tty->print("Found create-exception");
 205         }
 206       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 207         if (trace) {
 208           tty->print("Found raw LoadP (OSR argument?)");
 209         }
 210       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 211         if (t == ShenandoahOopStore) {
 212           uint i = 0;
 213           for (; i < phis.size(); i++) {
 214             Node* n = phis.node_at(i);
 215             if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
 216               break;
 217             }
 218           }
 219           if (i == phis.size()) {
 220             return false;
 221           }
 222         }
 223         barriers_used.push(in);
 224         if (trace) {tty->print("Found barrier"); in->dump();}
 225       } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
 226         if (t != ShenandoahOopStore) {
 227           in = in->in(1);
 228           continue;
 229         }
 230         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 231         phis.push(in, in->req());
 232         in = in->in(1);
 233         continue;
 234       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 235         if (trace) {
 236           tty->print("Found alloc");
 237           in->in(0)->dump();
 238         }
 239       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 240         if (trace) {
 241           tty->print("Found Java call");
 242         }
 243       } else if (in->is_Phi()) {
 244         if (!visited.test_set(in->_idx)) {
 245           if (trace) {tty->print("Pushed phi:"); in->dump();}
 246           phis.push(in, 2);
 247           in = in->in(1);
 248           continue;
 249         }
 250         if (trace) {tty->print("Already seen phi:"); in->dump();}
 251       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 252         if (!visited.test_set(in->_idx)) {
 253           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 254           phis.push(in, CMoveNode::IfTrue);
 255           in = in->in(CMoveNode::IfFalse);
 256           continue;
 257         }
 258         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 259       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 260         in = in->in(1);
 261         continue;
 262       } else {
 263         return false;
 264       }
 265     }
 266     bool cont = false;
 267     while (phis.is_nonempty()) {
 268       uint idx = phis.index();
 269       Node* phi = phis.node();
 270       if (idx >= phi->req()) {
 271         if (trace) {tty->print("Popped phi:"); phi->dump();}
 272         phis.pop();
 273         continue;
 274       }
 275       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 276       in = phi->in(idx);
 277       phis.set_index(idx+1);
 278       cont = true;
 279       break;
 280     }
 281     if (!cont) {
 282       break;
 283     }
 284   }
 285   return true;
 286 }
 287 
 288 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 289   if (n1 != NULL) {
 290     n1->dump(+10);
 291   }
 292   if (n2 != NULL) {
 293     n2->dump(+10);
 294   }
 295   fatal("%s", msg);
 296 }
 297 
 298 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 299   ResourceMark rm;
 300   Unique_Node_List wq;
 301   GrowableArray<Node*> barriers;
 302   Unique_Node_List barriers_used;
 303   Node_Stack phis(0);
 304   VectorSet visited(Thread::current()->resource_area());
 305   const bool trace = false;
 306   const bool verify_no_useless_barrier = false;
 307 
 308   wq.push(root);
 309   for (uint next = 0; next < wq.size(); next++) {
 310     Node *n = wq.at(next);
 311     if (n->is_Load()) {
 312       const bool trace = false;
 313       if (trace) {tty->print("Verifying"); n->dump();}
 314       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 315         if (trace) {tty->print_cr("Load range/klass");}
 316       } else {
 317         const TypePtr* adr_type = n->as_Load()->adr_type();
 318 
 319         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 320           if (trace) {tty->print_cr("Mark load");}
 321         } else if (adr_type->isa_instptr() &&
 322                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 323                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
 324           if (trace) {tty->print_cr("Reference.get()");}
 325         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 326           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 327         }
 328       }
 329     } else if (n->is_Store()) {
 330       const bool trace = false;
 331 
 332       if (trace) {tty->print("Verifying"); n->dump();}
 333       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 334         Node* adr = n->in(MemNode::Address);
 335         bool verify = true;
 336 
 337         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 338           adr = adr->in(AddPNode::Address);
 339           if (adr->is_AddP()) {
 340             assert(adr->in(AddPNode::Base)->is_top(), "");
 341             adr = adr->in(AddPNode::Address);
 342             if (adr->Opcode() == Op_LoadP &&
 343                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 344                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 345                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 346               if (trace) {tty->print_cr("SATB prebarrier");}
 347               verify = false;
 348             }
 349           }
 350         }
 351 
 352         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 353           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 354         }
 355       }
 356       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 357         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 358       }
 359     } else if (n->Opcode() == Op_CmpP) {
 360       const bool trace = false;
 361 
 362       Node* in1 = n->in(1);
 363       Node* in2 = n->in(2);
 364       if (in1->bottom_type()->isa_oopptr()) {
 365         if (trace) {tty->print("Verifying"); n->dump();}
 366 
 367         bool mark_inputs = false;
 368         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 369             (in1->is_Con() || in2->is_Con())) {
 370           if (trace) {tty->print_cr("Comparison against a constant");}
 371           mark_inputs = true;
 372         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 373                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 374           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 375           mark_inputs = true;
 376         } else {
 377           assert(in2->bottom_type()->isa_oopptr(), "");
 378 
 379           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 380               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 381             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 382           }
 383         }
 384         if (verify_no_useless_barrier &&
 385             mark_inputs &&
 386             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 387              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 388           phis.clear();
 389           visited.Reset();
 390         }
 391       }
 392     } else if (n->is_LoadStore()) {
 393       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 394           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 395         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 396       }
 397 
 398       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 399         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 400       }
 401     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 402       CallNode* call = n->as_Call();
 403 
 404       static struct {
 405         const char* name;
 406         struct {
 407           int pos;
 408           verify_type t;
 409         } args[6];
 410       } calls[] = {
 411         "aescrypt_encryptBlock",
 412         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 413           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 414         "aescrypt_decryptBlock",
 415         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 416           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 417         "multiplyToLen",
 418         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 419           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 420         "squareToLen",
 421         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 422           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 423         "montgomery_multiply",
 424         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 425           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 426         "montgomery_square",
 427         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 428           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 429         "mulAdd",
 430         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 431           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 432         "vectorizedMismatch",
 433         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 434           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 435         "updateBytesCRC32",
 436         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 437           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 438         "updateBytesAdler32",
 439         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 440           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 441         "updateBytesCRC32C",
 442         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 443           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 444         "counterMode_AESCrypt",
 445         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 446           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 447         "cipherBlockChaining_encryptAESCrypt",
 448         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 449           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 450         "cipherBlockChaining_decryptAESCrypt",
 451         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 452           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 453         "shenandoah_clone_barrier",
 454         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 455           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 456         "ghash_processBlocks",
 457         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 458           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 459         "sha1_implCompress",
 460         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 461           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 462         "sha256_implCompress",
 463         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 464           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 465         "sha512_implCompress",
 466         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 467           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 468         "sha1_implCompressMB",
 469         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 470           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 471         "sha256_implCompressMB",
 472         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 473           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 474         "sha512_implCompressMB",
 475         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 476           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 477         "encodeBlock",
 478         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 479           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 480       };
 481 
 482       if (call->is_call_to_arraycopystub()) {
 483         Node* dest = NULL;
 484         const TypeTuple* args = n->as_Call()->_tf->domain();
 485         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 486           if (args->field_at(i)->isa_ptr()) {
 487             j++;
 488             if (j == 2) {
 489               dest = n->in(i);
 490               break;
 491             }
 492           }
 493         }
 494         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 495             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 496           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 497         }
 498       } else if (strlen(call->_name) > 5 &&
 499                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 500         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 501           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 502         }
 503       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 504         // skip
 505       } else {
 506         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 507         int i = 0;
 508         for (; i < calls_len; i++) {
 509           if (!strcmp(calls[i].name, call->_name)) {
 510             break;
 511           }
 512         }
 513         if (i != calls_len) {
 514           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 515           for (uint j = 0; j < args_len; j++) {
 516             int pos = calls[i].args[j].pos;
 517             if (pos == -1) {
 518               break;
 519             }
 520             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 521               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 522             }
 523           }
 524           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 525             if (call->in(j)->bottom_type()->make_ptr() &&
 526                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 527               uint k = 0;
 528               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 529               if (k == args_len) {
 530                 fatal("arg %d for call %s not covered", j, call->_name);
 531               }
 532             }
 533           }
 534         } else {
 535           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 536             if (call->in(j)->bottom_type()->make_ptr() &&
 537                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 538               fatal("%s not covered", call->_name);
 539             }
 540           }
 541         }
 542       }
 543     } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 544       // skip
 545     } else if (n->is_AddP()
 546                || n->is_Phi()
 547                || n->is_ConstraintCast()
 548                || n->Opcode() == Op_Return
 549                || n->Opcode() == Op_CMoveP
 550                || n->Opcode() == Op_CMoveN
 551                || n->Opcode() == Op_Rethrow
 552                || n->is_MemBar()
 553                || n->Opcode() == Op_Conv2B
 554                || n->Opcode() == Op_SafePoint
 555                || n->is_CallJava()
 556                || n->Opcode() == Op_Unlock
 557                || n->Opcode() == Op_EncodeP
 558                || n->Opcode() == Op_DecodeN) {
 559       // nothing to do
 560     } else {
 561       static struct {
 562         int opcode;
 563         struct {
 564           int pos;
 565           verify_type t;
 566         } inputs[2];
 567       } others[] = {
 568         Op_FastLock,
 569         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 570         Op_Lock,
 571         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 572         Op_ArrayCopy,
 573         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 574         Op_StrCompressedCopy,
 575         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 576         Op_StrInflatedCopy,
 577         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 578         Op_AryEq,
 579         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 580         Op_StrIndexOf,
 581         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 582         Op_StrComp,
 583         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 584         Op_StrEquals,
 585         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 586         Op_EncodeISOArray,
 587         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 588         Op_HasNegatives,
 589         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 590         Op_CastP2X,
 591         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 592         Op_StrIndexOfChar,
 593         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 594       };
 595 
 596       const int others_len = sizeof(others) / sizeof(others[0]);
 597       int i = 0;
 598       for (; i < others_len; i++) {
 599         if (others[i].opcode == n->Opcode()) {
 600           break;
 601         }
 602       }
 603       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 604       if (i != others_len) {
 605         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 606         for (uint j = 0; j < inputs_len; j++) {
 607           int pos = others[i].inputs[j].pos;
 608           if (pos == -1) {
 609             break;
 610           }
 611           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 612             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 613           }
 614         }
 615         for (uint j = 1; j < stop; j++) {
 616           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 617               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 618             uint k = 0;
 619             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 620             if (k == inputs_len) {
 621               fatal("arg %d for node %s not covered", j, n->Name());
 622             }
 623           }
 624         }
 625       } else {
 626         for (uint j = 1; j < stop; j++) {
 627           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 628               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 629             fatal("%s not covered", n->Name());
 630           }
 631         }
 632       }
 633     }
 634 
 635     if (n->is_SafePoint()) {
 636       SafePointNode* sfpt = n->as_SafePoint();
 637       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 638         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 639           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 640             phis.clear();
 641             visited.Reset();
 642           }
 643         }
 644       }
 645     }
 646   }
 647 
 648   if (verify_no_useless_barrier) {
 649     for (int i = 0; i < barriers.length(); i++) {
 650       Node* n = barriers.at(i);
 651       if (!barriers_used.member(n)) {
 652         tty->print("XXX useless barrier"); n->dump(-2);
 653         ShouldNotReachHere();
 654       }
 655     }
 656   }
 657 }
 658 #endif
 659 
 660 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 661   // That both nodes have the same control is not sufficient to prove
 662   // domination, verify that there's no path from d to n
 663   ResourceMark rm;
 664   Unique_Node_List wq;
 665   wq.push(d);
 666   for (uint next = 0; next < wq.size(); next++) {
 667     Node *m = wq.at(next);
 668     if (m == n) {
 669       return false;
 670     }
 671     if (m->is_Phi() && m->in(0)->is_Loop()) {
 672       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 673     } else {
 674       for (uint i = 0; i < m->req(); i++) {
 675         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 676           wq.push(m->in(i));
 677         }
 678       }
 679     }
 680   }
 681   return true;
 682 }
 683 
 684 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 685   if (d_c != n_c) {
 686     return phase->is_dominator(d_c, n_c);
 687   }
 688   return is_dominator_same_ctrl(d_c, d, n, phase);
 689 }
 690 
 691 Node* next_mem(Node* mem, int alias) {
 692   Node* res = NULL;
 693   if (mem->is_Proj()) {
 694     res = mem->in(0);
 695   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 696     res = mem->in(TypeFunc::Memory);
 697   } else if (mem->is_Phi()) {
 698     res = mem->in(1);
 699   } else if (mem->is_MergeMem()) {
 700     res = mem->as_MergeMem()->memory_at(alias);
 701   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 702     assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 703     res = mem->in(MemNode::Memory);
 704   } else {
 705 #ifdef ASSERT
 706     mem->dump();
 707 #endif
 708     ShouldNotReachHere();
 709   }
 710   return res;
 711 }
 712 
 713 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 714   Node* iffproj = NULL;
 715   while (c != dom) {
 716     Node* next = phase->idom(c);
 717     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 718     if (c->is_Region()) {
 719       ResourceMark rm;
 720       Unique_Node_List wq;
 721       wq.push(c);
 722       for (uint i = 0; i < wq.size(); i++) {
 723         Node *n = wq.at(i);
 724         if (n == next) {
 725           continue;
 726         }
 727         if (n->is_Region()) {
 728           for (uint j = 1; j < n->req(); j++) {
 729             wq.push(n->in(j));
 730           }
 731         } else {
 732           wq.push(n->in(0));
 733         }
 734       }
 735       for (uint i = 0; i < wq.size(); i++) {
 736         Node *n = wq.at(i);
 737         assert(n->is_CFG(), "");
 738         if (n->is_Multi()) {
 739           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 740             Node* u = n->fast_out(j);
 741             if (u->is_CFG()) {
 742               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 743                 return NodeSentinel;
 744               }
 745             }
 746           }
 747         }
 748       }
 749     } else  if (c->is_Proj()) {
 750       if (c->is_IfProj()) {
 751         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 752           // continue;
 753         } else {
 754           if (!allow_one_proj) {
 755             return NodeSentinel;
 756           }
 757           if (iffproj == NULL) {
 758             iffproj = c;
 759           } else {
 760             return NodeSentinel;
 761           }
 762         }
 763       } else if (c->Opcode() == Op_JumpProj) {
 764         return NodeSentinel; // unsupported
 765       } else if (c->Opcode() == Op_CatchProj) {
 766         return NodeSentinel; // unsupported
 767       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 768         return NodeSentinel; // unsupported
 769       } else {
 770         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 771       }
 772     }
 773     c = next;
 774   }
 775   return iffproj;
 776 }
 777 
 778 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 779   ResourceMark rm;
 780   VectorSet wq(Thread::current()->resource_area());
 781   wq.set(mem->_idx);
 782   mem_ctrl = phase->ctrl_or_self(mem);
 783   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 784     mem = next_mem(mem, alias);
 785     if (wq.test_set(mem->_idx)) {
 786       return NULL;
 787     }
 788     mem_ctrl = phase->ctrl_or_self(mem);
 789   }
 790   if (mem->is_MergeMem()) {
 791     mem = mem->as_MergeMem()->memory_at(alias);
 792     mem_ctrl = phase->ctrl_or_self(mem);
 793   }
 794   return mem;
 795 }
 796 
 797 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 798   Node* mem = NULL;
 799   Node* c = ctrl;
 800   do {
 801     if (c->is_Region()) {
 802       Node* phi_bottom = NULL;
 803       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 804         Node* u = c->fast_out(i);
 805         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 806           if (u->adr_type() == TypePtr::BOTTOM) {
 807             mem = u;
 808           }
 809         }
 810       }
 811     } else {
 812       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 813         CallProjections projs;
 814         c->as_Call()->extract_projections(&projs, true, false);
 815         if (projs.fallthrough_memproj != NULL) {
 816           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 817             if (projs.catchall_memproj == NULL) {
 818               mem = projs.fallthrough_memproj;
 819             } else {
 820               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 821                 mem = projs.fallthrough_memproj;
 822               } else {
 823                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 824                 mem = projs.catchall_memproj;
 825               }
 826             }
 827           }
 828         } else {
 829           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 830           if (proj != NULL &&
 831               proj->adr_type() == TypePtr::BOTTOM) {
 832             mem = proj;
 833           }
 834         }
 835       } else {
 836         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 837           Node* u = c->fast_out(i);
 838           if (u->is_Proj() &&
 839               u->bottom_type() == Type::MEMORY &&
 840               u->adr_type() == TypePtr::BOTTOM) {
 841               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 842               assert(mem == NULL, "only one proj");
 843               mem = u;
 844           }
 845         }
 846         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 847       }
 848     }
 849     c = phase->idom(c);
 850   } while (mem == NULL);
 851   return mem;
 852 }
 853 
 854 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 855   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 856     Node* u = n->fast_out(i);
 857     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 858       uses.push(u);
 859     }
 860   }
 861 }
 862 
 863 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 864   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 865   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 866   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 867   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 868   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 869   phase->lazy_replace(outer, new_outer);
 870   phase->lazy_replace(le, new_le);
 871   inner->clear_strip_mined();
 872 }
 873 
 874 void ShenandoahBarrierC2Support::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
 875                                                   PhaseIdealLoop* phase) {
 876   IdealLoopTree* loop = phase->get_loop(ctrl);
 877   Node* thread = new ThreadLocalNode();
 878   phase->register_new_node(thread, ctrl);
 879   Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 880   phase->set_ctrl(offset, phase->C->root());
 881   Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset);
 882   phase->register_new_node(gc_state_addr, ctrl);
 883   uint gc_state_idx = Compile::AliasIdxRaw;
 884   const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
 885   debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
 886 
 887   Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered);
 888   phase->register_new_node(gc_state, ctrl);
 889   Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED));
 890   phase->register_new_node(heap_stable_and, ctrl);
 891   Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT));
 892   phase->register_new_node(heap_stable_cmp, ctrl);
 893   Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne);
 894   phase->register_new_node(heap_stable_test, ctrl);
 895   IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 896   phase->register_control(heap_stable_iff, loop, ctrl);
 897 
 898   heap_stable_ctrl = new IfFalseNode(heap_stable_iff);
 899   phase->register_control(heap_stable_ctrl, loop, heap_stable_iff);
 900   ctrl = new IfTrueNode(heap_stable_iff);
 901   phase->register_control(ctrl, loop, heap_stable_iff);
 902 
 903   assert(is_heap_stable_test(heap_stable_iff), "Should match the shape");
 904 }
 905 
 906 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 907   const Type* val_t = phase->igvn().type(val);
 908   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 909     IdealLoopTree* loop = phase->get_loop(ctrl);
 910     Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT));
 911     phase->register_new_node(null_cmp, ctrl);
 912     Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
 913     phase->register_new_node(null_test, ctrl);
 914     IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 915     phase->register_control(null_iff, loop, ctrl);
 916     ctrl = new IfTrueNode(null_iff);
 917     phase->register_control(ctrl, loop, null_iff);
 918     null_ctrl = new IfFalseNode(null_iff);
 919     phase->register_control(null_ctrl, loop, null_iff);
 920   }
 921 }
 922 
 923 Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
 924   IdealLoopTree *loop = phase->get_loop(c);
 925   Node* iff = unc_ctrl->in(0);
 926   assert(iff->is_If(), "broken");
 927   Node* new_iff = iff->clone();
 928   new_iff->set_req(0, c);
 929   phase->register_control(new_iff, loop, c);
 930   Node* iffalse = new IfFalseNode(new_iff->as_If());
 931   phase->register_control(iffalse, loop, new_iff);
 932   Node* iftrue = new IfTrueNode(new_iff->as_If());
 933   phase->register_control(iftrue, loop, new_iff);
 934   c = iftrue;
 935   const Type *t = phase->igvn().type(val);
 936   assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
 937   Node* uncasted_val = val->in(1);
 938   val = new CastPPNode(uncasted_val, t);
 939   val->init_req(0, c);
 940   phase->register_new_node(val, c);
 941   return val;
 942 }
 943 
 944 void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
 945                                                 Unique_Node_List& uses, PhaseIdealLoop* phase) {
 946   IfNode* iff = unc_ctrl->in(0)->as_If();
 947   Node* proj = iff->proj_out(0);
 948   assert(proj != unc_ctrl, "bad projection");
 949   Node* use = proj->unique_ctrl_out();
 950 
 951   assert(use == unc || use->is_Region(), "what else?");
 952 
 953   uses.clear();
 954   if (use == unc) {
 955     phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
 956     for (uint i = 1; i < unc->req(); i++) {
 957       Node* n = unc->in(i);
 958       if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
 959         uses.push(n);
 960       }
 961     }
 962   } else {
 963     assert(use->is_Region(), "what else?");
 964     uint idx = 1;
 965     for (; use->in(idx) != proj; idx++);
 966     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
 967       Node* u = use->fast_out(i);
 968       if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
 969         uses.push(u->in(idx));
 970       }
 971     }
 972   }
 973   for(uint next = 0; next < uses.size(); next++ ) {
 974     Node *n = uses.at(next);
 975     assert(phase->get_ctrl(n) == proj, "bad control");
 976     phase->set_ctrl_and_loop(n, new_unc_ctrl);
 977     if (n->in(0) == proj) {
 978       phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
 979     }
 980     for (uint i = 0; i < n->req(); i++) {
 981       Node* m = n->in(i);
 982       if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
 983         uses.push(m);
 984       }
 985     }
 986   }
 987 
 988   phase->igvn().rehash_node_delayed(use);
 989   int nb = use->replace_edge(proj, new_unc_ctrl);
 990   assert(nb == 1, "only use expected");
 991 }
 992 
 993 void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 994   IdealLoopTree *loop = phase->get_loop(ctrl);
 995   Node* raw_rbtrue = new CastP2XNode(ctrl, val);
 996   phase->register_new_node(raw_rbtrue, ctrl);
 997   Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 998   phase->register_new_node(cset_offset, ctrl);
 999   Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
1000   phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root());
1001   Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset);
1002   phase->register_new_node(in_cset_fast_test_adr, ctrl);
1003   uint in_cset_fast_test_idx = Compile::AliasIdxRaw;
1004   const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument
1005   debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx));
1006   Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered);
1007   phase->register_new_node(in_cset_fast_test_load, ctrl);
1008   Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT));
1009   phase->register_new_node(in_cset_fast_test_cmp, ctrl);
1010   Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq);
1011   phase->register_new_node(in_cset_fast_test_test, ctrl);
1012   IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
1013   phase->register_control(in_cset_fast_test_iff, loop, ctrl);
1014 
1015   not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff);
1016   phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff);
1017 
1018   ctrl = new IfFalseNode(in_cset_fast_test_iff);
1019   phase->register_control(ctrl, loop, in_cset_fast_test_iff);
1020 }
1021 
1022 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) {
1023   IdealLoopTree*loop = phase->get_loop(ctrl);
1024   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
1025 
1026   // The slow path stub consumes and produces raw memory in addition
1027   // to the existing memory edges
1028   Node* base = find_bottom_mem(ctrl, phase);
1029   MergeMemNode* mm = MergeMemNode::make(base);
1030   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1031   phase->register_new_node(mm, ctrl);
1032 
1033   address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
1034           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) :
1035           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier);
1036 
1037   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(),
1038                                 target,
1039                                 "shenandoah_load_reference_barrier", TypeRawPtr::BOTTOM);
1040   call->init_req(TypeFunc::Control, ctrl);
1041   call->init_req(TypeFunc::I_O, phase->C->top());
1042   call->init_req(TypeFunc::Memory, mm);
1043   call->init_req(TypeFunc::FramePtr, phase->C->top());
1044   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1045   call->init_req(TypeFunc::Parms, val);
1046   call->init_req(TypeFunc::Parms+1, load_addr);
1047   phase->register_control(call, loop, ctrl);
1048   ctrl = new ProjNode(call, TypeFunc::Control);
1049   phase->register_control(ctrl, loop, call);
1050   result_mem = new ProjNode(call, TypeFunc::Memory);
1051   phase->register_new_node(result_mem, call);
1052   val = new ProjNode(call, TypeFunc::Parms);
1053   phase->register_new_node(val, call);
1054   val = new CheckCastPPNode(ctrl, val, obj_type);
1055   phase->register_new_node(val, ctrl);
1056 }
1057 
1058 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1059   Node* ctrl = phase->get_ctrl(barrier);
1060   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1061 
1062   // Update the control of all nodes that should be after the
1063   // barrier control flow
1064   uses.clear();
1065   // Every node that is control dependent on the barrier's input
1066   // control will be after the expanded barrier. The raw memory (if
1067   // its memory is control dependent on the barrier's input control)
1068   // must stay above the barrier.
1069   uses_to_ignore.clear();
1070   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1071     uses_to_ignore.push(init_raw_mem);
1072   }
1073   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1074     Node *n = uses_to_ignore.at(next);
1075     for (uint i = 0; i < n->req(); i++) {
1076       Node* in = n->in(i);
1077       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1078         uses_to_ignore.push(in);
1079       }
1080     }
1081   }
1082   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1083     Node* u = ctrl->fast_out(i);
1084     if (u->_idx < last &&
1085         u != barrier &&
1086         !uses_to_ignore.member(u) &&
1087         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1088         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1089       Node* old_c = phase->ctrl_or_self(u);
1090       Node* c = old_c;
1091       if (c != ctrl ||
1092           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1093           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1094         phase->igvn().rehash_node_delayed(u);
1095         int nb = u->replace_edge(ctrl, region);
1096         if (u->is_CFG()) {
1097           if (phase->idom(u) == ctrl) {
1098             phase->set_idom(u, region, phase->dom_depth(region));
1099           }
1100         } else if (phase->get_ctrl(u) == ctrl) {
1101           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1102           uses.push(u);
1103         }
1104         assert(nb == 1, "more than 1 ctrl input?");
1105         --i, imax -= nb;
1106       }
1107     }
1108   }
1109 }
1110 
1111 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1112   Node* region = NULL;
1113   while (c != ctrl) {
1114     if (c->is_Region()) {
1115       region = c;
1116     }
1117     c = phase->idom(c);
1118   }
1119   assert(region != NULL, "");
1120   Node* phi = new PhiNode(region, n->bottom_type());
1121   for (uint j = 1; j < region->req(); j++) {
1122     Node* in = region->in(j);
1123     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1124       phi->init_req(j, n);
1125     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1126       phi->init_req(j, n_clone);
1127     } else {
1128       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1129     }
1130   }
1131   phase->register_new_node(phi, region);
1132   return phi;
1133 }
1134 
1135 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1136   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1137 
1138   Unique_Node_List uses;
1139   for (int i = 0; i < state->enqueue_barriers_count(); i++) {
1140     Node* barrier = state->enqueue_barrier(i);
1141     Node* ctrl = phase->get_ctrl(barrier);
1142     IdealLoopTree* loop = phase->get_loop(ctrl);
1143     if (loop->_head->is_OuterStripMinedLoop()) {
1144       // Expanding a barrier here will break loop strip mining
1145       // verification. Transform the loop so the loop nest doesn't
1146       // appear as strip mined.
1147       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1148       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1149     }
1150   }
1151 
1152   Node_Stack stack(0);
1153   Node_List clones;
1154   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1155     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1156     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1157       continue;
1158     }
1159 
1160     Node* ctrl = phase->get_ctrl(lrb);
1161     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1162 
1163     CallStaticJavaNode* unc = NULL;
1164     Node* unc_ctrl = NULL;
1165     Node* uncasted_val = val;
1166 
1167     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1168       Node* u = lrb->fast_out(i);
1169       if (u->Opcode() == Op_CastPP &&
1170           u->in(0) != NULL &&
1171           phase->is_dominator(u->in(0), ctrl)) {
1172         const Type* u_t = phase->igvn().type(u);
1173 
1174         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1175             u->in(0)->Opcode() == Op_IfTrue &&
1176             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1177             u->in(0)->in(0)->is_If() &&
1178             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1179             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1180             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1181             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1182             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1183           IdealLoopTree* loop = phase->get_loop(ctrl);
1184           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1185 
1186           if (!unc_loop->is_member(loop)) {
1187             continue;
1188           }
1189 
1190           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1191           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1192           if (branch == NodeSentinel) {
1193             continue;
1194           }
1195 
1196           phase->igvn().replace_input_of(u, 1, val);
1197           phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
1198           phase->set_ctrl(u, u->in(0));
1199           phase->set_ctrl(lrb, u->in(0));
1200           unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1201           unc_ctrl = u->in(0);
1202           val = u;
1203 
1204           for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
1205             Node* u = val->fast_out(j);
1206             if (u == lrb) continue;
1207             phase->igvn().rehash_node_delayed(u);
1208             int nb = u->replace_edge(val, lrb);
1209             --j; jmax -= nb;
1210           }
1211 
1212           RegionNode* r = new RegionNode(3);
1213           IfNode* iff = unc_ctrl->in(0)->as_If();
1214 
1215           Node* ctrl_use = unc_ctrl->unique_ctrl_out();
1216           Node* unc_ctrl_clone = unc_ctrl->clone();
1217           phase->register_control(unc_ctrl_clone, loop, iff);
1218           Node* c = unc_ctrl_clone;
1219           Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
1220           r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
1221 
1222           phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
1223           phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
1224           phase->lazy_replace(c, unc_ctrl);
1225           c = NULL;;
1226           phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
1227           phase->set_ctrl(val, unc_ctrl_clone);
1228 
1229           IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
1230           fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
1231           Node* iff_proj = iff->proj_out(0);
1232           r->init_req(2, iff_proj);
1233           phase->register_control(r, phase->ltree_root(), iff);
1234 
1235           Node* new_bol = new_iff->in(1)->clone();
1236           Node* new_cmp = new_bol->in(1)->clone();
1237           assert(new_cmp->Opcode() == Op_CmpP, "broken");
1238           assert(new_cmp->in(1) == val->in(1), "broken");
1239           new_bol->set_req(1, new_cmp);
1240           new_cmp->set_req(1, lrb);
1241           phase->register_new_node(new_bol, new_iff->in(0));
1242           phase->register_new_node(new_cmp, new_iff->in(0));
1243           phase->igvn().replace_input_of(new_iff, 1, new_bol);
1244           phase->igvn().replace_input_of(new_cast, 1, lrb);
1245 
1246           for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1247             Node* u = lrb->fast_out(i);
1248             if (u == new_cast || u == new_cmp) {
1249               continue;
1250             }
1251             phase->igvn().rehash_node_delayed(u);
1252             int nb = u->replace_edge(lrb, new_cast);
1253             assert(nb > 0, "no update?");
1254             --i; imax -= nb;
1255           }
1256 
1257           for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1258             Node* u = val->fast_out(i);
1259             if (u == lrb) {
1260               continue;
1261             }
1262             phase->igvn().rehash_node_delayed(u);
1263             int nb = u->replace_edge(val, new_cast);
1264             assert(nb > 0, "no update?");
1265             --i; imax -= nb;
1266           }
1267 
1268           ctrl = unc_ctrl_clone;
1269           phase->set_ctrl_and_loop(lrb, ctrl);
1270           break;
1271         }
1272       }
1273     }
1274     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1275       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1276       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1277         // The rethrow call may have too many projections to be
1278         // properly handled here. Given there's no reason for a
1279         // barrier to depend on the call, move it above the call
1280         if (phase->get_ctrl(val) == ctrl) {
1281           assert(val->Opcode() == Op_DecodeN, "unexpected node");
1282           assert(phase->is_dominator(phase->get_ctrl(val->in(1)), call->in(0)), "Load is too low");
1283           phase->set_ctrl(val, call->in(0));
1284         }
1285         phase->set_ctrl(lrb, call->in(0));
1286         continue;
1287       }
1288       CallProjections projs;
1289       call->extract_projections(&projs, false, false);
1290 
1291       Node* lrb_clone = lrb->clone();
1292       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1293       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1294 
1295       stack.push(lrb, 0);
1296       clones.push(lrb_clone);
1297 
1298       do {
1299         assert(stack.size() == clones.size(), "");
1300         Node* n = stack.node();
1301 #ifdef ASSERT
1302         if (n->is_Load()) {
1303           Node* mem = n->in(MemNode::Memory);
1304           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1305             Node* u = mem->fast_out(j);
1306             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1307           }
1308         }
1309 #endif
1310         uint idx = stack.index();
1311         Node* n_clone = clones.at(clones.size()-1);
1312         if (idx < n->outcnt()) {
1313           Node* u = n->raw_out(idx);
1314           Node* c = phase->ctrl_or_self(u);
1315           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1316             stack.set_index(idx+1);
1317             assert(!u->is_CFG(), "");
1318             stack.push(u, 0);
1319             Node* u_clone = u->clone();
1320             int nb = u_clone->replace_edge(n, n_clone);
1321             assert(nb > 0, "should have replaced some uses");
1322             phase->register_new_node(u_clone, projs.catchall_catchproj);
1323             clones.push(u_clone);
1324             phase->set_ctrl(u, projs.fallthrough_catchproj);
1325           } else {
1326             bool replaced = false;
1327             if (u->is_Phi()) {
1328               for (uint k = 1; k < u->req(); k++) {
1329                 if (u->in(k) == n) {
1330                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1331                     phase->igvn().replace_input_of(u, k, n_clone);
1332                     replaced = true;
1333                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1334                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1335                     replaced = true;
1336                   }
1337                 }
1338               }
1339             } else {
1340               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1341                 phase->igvn().rehash_node_delayed(u);
1342                 int nb = u->replace_edge(n, n_clone);
1343                 assert(nb > 0, "should have replaced some uses");
1344                 replaced = true;
1345               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1346                 phase->igvn().rehash_node_delayed(u);
1347                 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1348                 assert(nb > 0, "should have replaced some uses");
1349                 replaced = true;
1350               }
1351             }
1352             if (!replaced) {
1353               stack.set_index(idx+1);
1354             }
1355           }
1356         } else {
1357           stack.pop();
1358           clones.pop();
1359         }
1360       } while (stack.size() > 0);
1361       assert(stack.size() == 0 && clones.size() == 0, "");
1362     }
1363   }
1364 
1365   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1366     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1367     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1368       continue;
1369     }
1370     Node* ctrl = phase->get_ctrl(lrb);
1371     IdealLoopTree* loop = phase->get_loop(ctrl);
1372     if (loop->_head->is_OuterStripMinedLoop()) {
1373       // Expanding a barrier here will break loop strip mining
1374       // verification. Transform the loop so the loop nest doesn't
1375       // appear as strip mined.
1376       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1377       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1378     }
1379   }
1380 
1381   // Expand load-reference-barriers
1382   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1383   Unique_Node_List uses_to_ignore;
1384   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1385     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1386     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1387       phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1388       continue;
1389     }
1390     uint last = phase->C->unique();
1391     Node* ctrl = phase->get_ctrl(lrb);
1392     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1393 
1394 
1395     Node* orig_ctrl = ctrl;
1396 
1397     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1398     Node* init_raw_mem = raw_mem;
1399     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1400 
1401     IdealLoopTree *loop = phase->get_loop(ctrl);
1402     CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
1403     Node* unc_ctrl = NULL;
1404     if (unc != NULL) {
1405       if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
1406         unc = NULL;
1407       } else {
1408         unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
1409       }
1410     }
1411 
1412     Node* uncasted_val = val;
1413     if (unc != NULL) {
1414       uncasted_val = val->in(1);
1415     }
1416 
1417     Node* heap_stable_ctrl = NULL;
1418     Node* null_ctrl = NULL;
1419 
1420     assert(val->bottom_type()->make_oopptr(), "need oop");
1421     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1422 
1423     enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT };
1424     Node* region = new RegionNode(PATH_LIMIT);
1425     Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
1426     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1427 
1428     // Stable path.
1429     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1430     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1431 
1432     // Heap stable case
1433     region->init_req(_heap_stable, heap_stable_ctrl);
1434     val_phi->init_req(_heap_stable, uncasted_val);
1435     raw_mem_phi->init_req(_heap_stable, raw_mem);
1436 
1437     Node* reg2_ctrl = NULL;
1438     // Null case
1439     test_null(ctrl, val, null_ctrl, phase);
1440     if (null_ctrl != NULL) {
1441       reg2_ctrl = null_ctrl->in(0);
1442       region->init_req(_null_path, null_ctrl);
1443       val_phi->init_req(_null_path, uncasted_val);
1444       raw_mem_phi->init_req(_null_path, raw_mem);
1445     } else {
1446       region->del_req(_null_path);
1447       val_phi->del_req(_null_path);
1448       raw_mem_phi->del_req(_null_path);
1449     }
1450 
1451     // Test for in-cset.
1452     // Wires !in_cset(obj) to slot 2 of region and phis
1453     Node* not_cset_ctrl = NULL;
1454     in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
1455     if (not_cset_ctrl != NULL) {
1456       if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
1457       region->init_req(_not_cset, not_cset_ctrl);
1458       val_phi->init_req(_not_cset, uncasted_val);
1459       raw_mem_phi->init_req(_not_cset, raw_mem);
1460     }
1461 
1462     // Resolve object when orig-value is in cset.
1463     // Make the unconditional resolve for fwdptr.
1464     Node* new_val = uncasted_val;
1465     if (unc_ctrl != NULL) {
1466       // Clone the null check in this branch to allow implicit null check
1467       new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
1468       fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
1469 
1470       IfNode* iff = unc_ctrl->in(0)->as_If();
1471       phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
1472     }
1473 
1474     // Call lrb-stub and wire up that path in slots 4
1475     Node* result_mem = NULL;
1476 
1477     Node* fwd = new_val;
1478     Node* addr;
1479     if (ShenandoahSelfFixing) {
1480       VectorSet visited(Thread::current()->resource_area());
1481       addr = get_load_addr(phase, visited, lrb);
1482     } else {
1483       addr = phase->igvn().zerocon(T_OBJECT);
1484     }
1485     if (addr->Opcode() == Op_AddP) {
1486       Node* orig_base = addr->in(AddPNode::Base);
1487       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), true);
1488       phase->register_new_node(base, ctrl);
1489       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1490         // Field access
1491         addr = addr->clone();
1492         addr->set_req(AddPNode::Base, base);
1493         addr->set_req(AddPNode::Address, base);
1494         phase->register_new_node(addr, ctrl);
1495       } else {
1496         Node* addr2 = addr->in(AddPNode::Address);
1497         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1498               addr2->in(AddPNode::Base) == orig_base) {
1499           addr2 = addr2->clone();
1500           addr2->set_req(AddPNode::Base, base);
1501           addr2->set_req(AddPNode::Address, base);
1502           phase->register_new_node(addr2, ctrl);
1503           addr = addr->clone();
1504           addr->set_req(AddPNode::Base, base);
1505           addr->set_req(AddPNode::Address, addr2);
1506           phase->register_new_node(addr, ctrl);
1507         }
1508       }
1509     }
1510     call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, false, phase);
1511     region->init_req(_evac_path, ctrl);
1512     val_phi->init_req(_evac_path, fwd);
1513     raw_mem_phi->init_req(_evac_path, result_mem);
1514 
1515     phase->register_control(region, loop, heap_stable_iff);
1516     Node* out_val = val_phi;
1517     phase->register_new_node(val_phi, region);
1518     phase->register_new_node(raw_mem_phi, region);
1519 
1520     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1521 
1522     ctrl = orig_ctrl;
1523 
1524     if (unc != NULL) {
1525       for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1526         Node* u = val->fast_out(i);
1527         Node* c = phase->ctrl_or_self(u);
1528         if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
1529           phase->igvn().rehash_node_delayed(u);
1530           int nb = u->replace_edge(val, out_val);
1531           --i, imax -= nb;
1532         }
1533       }
1534       if (val->outcnt() == 0) {
1535         phase->igvn()._worklist.push(val);
1536       }
1537     }
1538     phase->igvn().replace_node(lrb, out_val);
1539 
1540     follow_barrier_uses(out_val, ctrl, uses, phase);
1541 
1542     for(uint next = 0; next < uses.size(); next++ ) {
1543       Node *n = uses.at(next);
1544       assert(phase->get_ctrl(n) == ctrl, "bad control");
1545       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1546       phase->set_ctrl(n, region);
1547       follow_barrier_uses(n, ctrl, uses, phase);
1548     }
1549 
1550     // The slow path call produces memory: hook the raw memory phi
1551     // from the expanded load reference barrier with the rest of the graph
1552     // which may require adding memory phis at every post dominated
1553     // region and at enclosing loop heads. Use the memory state
1554     // collected in memory_nodes to fix the memory graph. Update that
1555     // memory state as we go.
1556     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1557   }
1558   // Done expanding load-reference-barriers.
1559   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1560 
1561   for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
1562     Node* barrier = state->enqueue_barrier(i);
1563     Node* pre_val = barrier->in(1);
1564 
1565     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1566       ShouldNotReachHere();
1567       continue;
1568     }
1569 
1570     Node* ctrl = phase->get_ctrl(barrier);
1571 
1572     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1573       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1574       ctrl = ctrl->in(0)->in(0);
1575       phase->set_ctrl(barrier, ctrl);
1576     } else if (ctrl->is_CallRuntime()) {
1577       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1578       ctrl = ctrl->in(0);
1579       phase->set_ctrl(barrier, ctrl);
1580     }
1581 
1582     Node* init_ctrl = ctrl;
1583     IdealLoopTree* loop = phase->get_loop(ctrl);
1584     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1585     Node* init_raw_mem = raw_mem;
1586     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1587     Node* heap_stable_ctrl = NULL;
1588     Node* null_ctrl = NULL;
1589     uint last = phase->C->unique();
1590 
1591     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1592     Node* region = new RegionNode(PATH_LIMIT);
1593     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1594 
1595     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1596     Node* region2 = new RegionNode(PATH_LIMIT2);
1597     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1598 
1599     // Stable path.
1600     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1601     region->init_req(_heap_stable, heap_stable_ctrl);
1602     phi->init_req(_heap_stable, raw_mem);
1603 
1604     // Null path
1605     Node* reg2_ctrl = NULL;
1606     test_null(ctrl, pre_val, null_ctrl, phase);
1607     if (null_ctrl != NULL) {
1608       reg2_ctrl = null_ctrl->in(0);
1609       region2->init_req(_null_path, null_ctrl);
1610       phi2->init_req(_null_path, raw_mem);
1611     } else {
1612       region2->del_req(_null_path);
1613       phi2->del_req(_null_path);
1614     }
1615 
1616     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1617     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1618     Node* thread = new ThreadLocalNode();
1619     phase->register_new_node(thread, ctrl);
1620     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1621     phase->register_new_node(buffer_adr, ctrl);
1622     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1623     phase->register_new_node(index_adr, ctrl);
1624 
1625     BasicType index_bt = TypeX_X->basic_type();
1626     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
1627     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1628     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1629     phase->register_new_node(index, ctrl);
1630     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1631     phase->register_new_node(index_cmp, ctrl);
1632     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1633     phase->register_new_node(index_test, ctrl);
1634     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1635     if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1636     phase->register_control(queue_full_iff, loop, ctrl);
1637     Node* not_full = new IfTrueNode(queue_full_iff);
1638     phase->register_control(not_full, loop, queue_full_iff);
1639     Node* full = new IfFalseNode(queue_full_iff);
1640     phase->register_control(full, loop, queue_full_iff);
1641 
1642     ctrl = not_full;
1643 
1644     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1645     phase->register_new_node(next_index, ctrl);
1646 
1647     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1648     phase->register_new_node(buffer, ctrl);
1649     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1650     phase->register_new_node(log_addr, ctrl);
1651     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1652     phase->register_new_node(log_store, ctrl);
1653     // update the index
1654     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1655     phase->register_new_node(index_update, ctrl);
1656 
1657     // Fast-path case
1658     region2->init_req(_fast_path, ctrl);
1659     phi2->init_req(_fast_path, index_update);
1660 
1661     ctrl = full;
1662 
1663     Node* base = find_bottom_mem(ctrl, phase);
1664 
1665     MergeMemNode* mm = MergeMemNode::make(base);
1666     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1667     phase->register_new_node(mm, ctrl);
1668 
1669     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1670     call->init_req(TypeFunc::Control, ctrl);
1671     call->init_req(TypeFunc::I_O, phase->C->top());
1672     call->init_req(TypeFunc::Memory, mm);
1673     call->init_req(TypeFunc::FramePtr, phase->C->top());
1674     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1675     call->init_req(TypeFunc::Parms, pre_val);
1676     call->init_req(TypeFunc::Parms+1, thread);
1677     phase->register_control(call, loop, ctrl);
1678 
1679     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1680     phase->register_control(ctrl_proj, loop, call);
1681     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1682     phase->register_new_node(mem_proj, call);
1683 
1684     // Slow-path case
1685     region2->init_req(_slow_path, ctrl_proj);
1686     phi2->init_req(_slow_path, mem_proj);
1687 
1688     phase->register_control(region2, loop, reg2_ctrl);
1689     phase->register_new_node(phi2, region2);
1690 
1691     region->init_req(_heap_unstable, region2);
1692     phi->init_req(_heap_unstable, phi2);
1693 
1694     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1695     phase->register_new_node(phi, region);
1696 
1697     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1698     for(uint next = 0; next < uses.size(); next++ ) {
1699       Node *n = uses.at(next);
1700       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1701       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1702       phase->set_ctrl(n, region);
1703       follow_barrier_uses(n, init_ctrl, uses, phase);
1704     }
1705     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1706 
1707     phase->igvn().replace_node(barrier, pre_val);
1708   }
1709   assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1710 
1711 }
1712 
1713 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1714   if (visited.test_set(in->_idx)) {
1715     return NULL;
1716   }
1717   switch (in->Opcode()) {
1718     case Op_Proj:
1719       return get_load_addr(phase, visited, in->in(0));
1720     case Op_CastPP:
1721     case Op_CheckCastPP:
1722     case Op_DecodeN:
1723     case Op_EncodeP:
1724       return get_load_addr(phase, visited, in->in(1));
1725     case Op_LoadN:
1726     case Op_LoadP:
1727       return in->in(MemNode::Address);
1728     case Op_CompareAndExchangeN:
1729     case Op_CompareAndExchangeP:
1730     case Op_GetAndSetN:
1731     case Op_GetAndSetP:
1732     case Op_ShenandoahCompareAndExchangeP:
1733     case Op_ShenandoahCompareAndExchangeN:
1734       // Those instructions would just have stored a different
1735       // value into the field. No use to attempt to fix it at this point.
1736       return phase->igvn().zerocon(T_OBJECT);
1737     case Op_CMoveP:
1738     case Op_CMoveN: {
1739       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1740       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1741       // Handle unambiguous cases: single address reported on both branches.
1742       if (t != NULL && f == NULL) return t;
1743       if (t == NULL && f != NULL) return f;
1744       if (t != NULL && t == f)    return t;
1745       // Ambiguity.
1746       return phase->igvn().zerocon(T_OBJECT);
1747     }
1748     case Op_Phi: {
1749       Node* addr = NULL;
1750       for (uint i = 1; i < in->req(); i++) {
1751         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1752         if (addr == NULL) {
1753           addr = addr1;
1754         }
1755         if (addr != addr1) {
1756           return phase->igvn().zerocon(T_OBJECT);
1757         }
1758       }
1759       return addr;
1760     }
1761     case Op_ShenandoahLoadReferenceBarrier:
1762       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1763     case Op_ShenandoahEnqueueBarrier:
1764       return get_load_addr(phase, visited, in->in(1));
1765     case Op_CallDynamicJava:
1766     case Op_CallLeaf:
1767     case Op_CallStaticJava:
1768     case Op_ConN:
1769     case Op_ConP:
1770     case Op_Parm:
1771       return phase->igvn().zerocon(T_OBJECT);
1772     default:
1773 #ifdef ASSERT
1774       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1775 #endif
1776       return phase->igvn().zerocon(T_OBJECT);
1777   }
1778 
1779 }
1780 
1781 void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1782   IdealLoopTree *loop = phase->get_loop(iff);
1783   Node* loop_head = loop->_head;
1784   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1785 
1786   Node* bol = iff->in(1);
1787   Node* cmp = bol->in(1);
1788   Node* andi = cmp->in(1);
1789   Node* load = andi->in(1);
1790 
1791   assert(is_gc_state_load(load), "broken");
1792   if (!phase->is_dominator(load->in(0), entry_c)) {
1793     Node* mem_ctrl = NULL;
1794     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1795     load = load->clone();
1796     load->set_req(MemNode::Memory, mem);
1797     load->set_req(0, entry_c);
1798     phase->register_new_node(load, entry_c);
1799     andi = andi->clone();
1800     andi->set_req(1, load);
1801     phase->register_new_node(andi, entry_c);
1802     cmp = cmp->clone();
1803     cmp->set_req(1, andi);
1804     phase->register_new_node(cmp, entry_c);
1805     bol = bol->clone();
1806     bol->set_req(1, cmp);
1807     phase->register_new_node(bol, entry_c);
1808 
1809     Node* old_bol =iff->in(1);
1810     phase->igvn().replace_input_of(iff, 1, bol);
1811   }
1812 }
1813 
1814 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1815   if (!n->is_If() || n->is_CountedLoopEnd()) {
1816     return false;
1817   }
1818   Node* region = n->in(0);
1819 
1820   if (!region->is_Region()) {
1821     return false;
1822   }
1823   Node* dom = phase->idom(region);
1824   if (!dom->is_If()) {
1825     return false;
1826   }
1827 
1828   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1829     return false;
1830   }
1831 
1832   IfNode* dom_if = dom->as_If();
1833   Node* proj_true = dom_if->proj_out(1);
1834   Node* proj_false = dom_if->proj_out(0);
1835 
1836   for (uint i = 1; i < region->req(); i++) {
1837     if (phase->is_dominator(proj_true, region->in(i))) {
1838       continue;
1839     }
1840     if (phase->is_dominator(proj_false, region->in(i))) {
1841       continue;
1842     }
1843     return false;
1844   }
1845 
1846   return true;
1847 }
1848 
1849 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1850   assert(is_heap_stable_test(n), "no other tests");
1851   if (identical_backtoback_ifs(n, phase)) {
1852     Node* n_ctrl = n->in(0);
1853     if (phase->can_split_if(n_ctrl)) {
1854       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1855       if (is_heap_stable_test(n)) {
1856         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1857         assert(is_gc_state_load(gc_state_load), "broken");
1858         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1859         assert(is_gc_state_load(dom_gc_state_load), "broken");
1860         if (gc_state_load != dom_gc_state_load) {
1861           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1862         }
1863       }
1864       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1865       Node* proj_true = dom_if->proj_out(1);
1866       Node* proj_false = dom_if->proj_out(0);
1867       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1868       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1869 
1870       for (uint i = 1; i < n_ctrl->req(); i++) {
1871         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1872           bolphi->init_req(i, con_true);
1873         } else {
1874           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1875           bolphi->init_req(i, con_false);
1876         }
1877       }
1878       phase->register_new_node(bolphi, n_ctrl);
1879       phase->igvn().replace_input_of(n, 1, bolphi);
1880       phase->do_split_if(n);
1881     }
1882   }
1883 }
1884 
1885 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1886   // Find first invariant test that doesn't exit the loop
1887   LoopNode *head = loop->_head->as_Loop();
1888   IfNode* unswitch_iff = NULL;
1889   Node* n = head->in(LoopNode::LoopBackControl);
1890   int loop_has_sfpts = -1;
1891   while (n != head) {
1892     Node* n_dom = phase->idom(n);
1893     if (n->is_Region()) {
1894       if (n_dom->is_If()) {
1895         IfNode* iff = n_dom->as_If();
1896         if (iff->in(1)->is_Bool()) {
1897           BoolNode* bol = iff->in(1)->as_Bool();
1898           if (bol->in(1)->is_Cmp()) {
1899             // If condition is invariant and not a loop exit,
1900             // then found reason to unswitch.
1901             if (is_heap_stable_test(iff) &&
1902                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1903               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1904               if (loop_has_sfpts == -1) {
1905                 for(uint i = 0; i < loop->_body.size(); i++) {
1906                   Node *m = loop->_body[i];
1907                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1908                     loop_has_sfpts = 1;
1909                     break;
1910                   }
1911                 }
1912                 if (loop_has_sfpts == -1) {
1913                   loop_has_sfpts = 0;
1914                 }
1915               }
1916               if (!loop_has_sfpts) {
1917                 unswitch_iff = iff;
1918               }
1919             }
1920           }
1921         }
1922       }
1923     }
1924     n = n_dom;
1925   }
1926   return unswitch_iff;
1927 }
1928 
1929 
1930 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1931   Node_List heap_stable_tests;
1932   Node_List gc_state_loads;
1933   stack.push(phase->C->start(), 0);
1934   do {
1935     Node* n = stack.node();
1936     uint i = stack.index();
1937 
1938     if (i < n->outcnt()) {
1939       Node* u = n->raw_out(i);
1940       stack.set_index(i+1);
1941       if (!visited.test_set(u->_idx)) {
1942         stack.push(u, 0);
1943       }
1944     } else {
1945       stack.pop();
1946       if (ShenandoahCommonGCStateLoads && is_gc_state_load(n)) {
1947         gc_state_loads.push(n);
1948       }
1949       if (n->is_If() && is_heap_stable_test(n)) {
1950         heap_stable_tests.push(n);
1951       }
1952     }
1953   } while (stack.size() > 0);
1954 
1955   bool progress;
1956   do {
1957     progress = false;
1958     for (uint i = 0; i < gc_state_loads.size(); i++) {
1959       Node* n = gc_state_loads.at(i);
1960       if (n->outcnt() != 0) {
1961         progress |= try_common_gc_state_load(n, phase);
1962       }
1963     }
1964   } while (progress);
1965 
1966   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1967     Node* n = heap_stable_tests.at(i);
1968     assert(is_heap_stable_test(n), "only evacuation test");
1969     merge_back_to_back_tests(n, phase);
1970   }
1971 
1972   if (!phase->C->major_progress()) {
1973     VectorSet seen(Thread::current()->resource_area());
1974     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1975       Node* n = heap_stable_tests.at(i);
1976       IdealLoopTree* loop = phase->get_loop(n);
1977       if (loop != phase->ltree_root() &&
1978           loop->_child == NULL &&
1979           !loop->_irreducible) {
1980         LoopNode* head = loop->_head->as_Loop();
1981         if ((!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1982             !seen.test_set(head->_idx)) {
1983           IfNode* iff = find_unswitching_candidate(loop, phase);
1984           if (iff != NULL) {
1985             Node* bol = iff->in(1);
1986             if (head->is_strip_mined()) {
1987               head->verify_strip_mined(0);
1988             }
1989             move_heap_stable_test_out_of_loop(iff, phase);
1990             if (loop->policy_unswitching(phase)) {
1991               if (head->is_strip_mined()) {
1992                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1993                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1994               }
1995               phase->do_unswitching(loop, old_new);
1996             } else {
1997               // Not proceeding with unswitching. Move load back in
1998               // the loop.
1999               phase->igvn().replace_input_of(iff, 1, bol);
2000             }
2001           }
2002         }
2003       }
2004     }
2005   }
2006 }
2007 
2008 #ifdef ASSERT
2009 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
2010   const bool trace = false;
2011   ResourceMark rm;
2012   Unique_Node_List nodes;
2013   Unique_Node_List controls;
2014   Unique_Node_List memories;
2015 
2016   nodes.push(root);
2017   for (uint next = 0; next < nodes.size(); next++) {
2018     Node *n  = nodes.at(next);
2019     if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
2020       controls.push(n);
2021       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
2022       for (uint next2 = 0; next2 < controls.size(); next2++) {
2023         Node *m = controls.at(next2);
2024         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2025           Node* u = m->fast_out(i);
2026           if (u->is_CFG() && !u->is_Root() &&
2027               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
2028               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
2029             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
2030             controls.push(u);
2031           }
2032         }
2033       }
2034       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
2035       for (uint next2 = 0; next2 < memories.size(); next2++) {
2036         Node *m = memories.at(next2);
2037         assert(m->bottom_type() == Type::MEMORY, "");
2038         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2039           Node* u = m->fast_out(i);
2040           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
2041             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2042             memories.push(u);
2043           } else if (u->is_LoadStore()) {
2044             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
2045             memories.push(u->find_out_with(Op_SCMemProj));
2046           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
2047             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2048             memories.push(u);
2049           } else if (u->is_Phi()) {
2050             assert(u->bottom_type() == Type::MEMORY, "");
2051             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
2052               assert(controls.member(u->in(0)), "");
2053               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2054               memories.push(u);
2055             }
2056           } else if (u->is_SafePoint() || u->is_MemBar()) {
2057             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2058               Node* uu = u->fast_out(j);
2059               if (uu->bottom_type() == Type::MEMORY) {
2060                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
2061                 memories.push(uu);
2062               }
2063             }
2064           }
2065         }
2066       }
2067       for (uint next2 = 0; next2 < controls.size(); next2++) {
2068         Node *m = controls.at(next2);
2069         if (m->is_Region()) {
2070           bool all_in = true;
2071           for (uint i = 1; i < m->req(); i++) {
2072             if (!controls.member(m->in(i))) {
2073               all_in = false;
2074               break;
2075             }
2076           }
2077           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
2078           bool found_phi = false;
2079           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
2080             Node* u = m->fast_out(j);
2081             if (u->is_Phi() && memories.member(u)) {
2082               found_phi = true;
2083               for (uint i = 1; i < u->req() && found_phi; i++) {
2084                 Node* k = u->in(i);
2085                 if (memories.member(k) != controls.member(m->in(i))) {
2086                   found_phi = false;
2087                 }
2088               }
2089             }
2090           }
2091           assert(found_phi || all_in, "");
2092         }
2093       }
2094       controls.clear();
2095       memories.clear();
2096     }
2097     for( uint i = 0; i < n->len(); ++i ) {
2098       Node *m = n->in(i);
2099       if (m != NULL) {
2100         nodes.push(m);
2101       }
2102     }
2103   }
2104 }
2105 #endif
2106 
2107 ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
2108   ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
2109 }
2110 
2111 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
2112   if (in(1) == NULL || in(1)->is_top()) {
2113     return Type::TOP;
2114   }
2115   const Type* t = in(1)->bottom_type();
2116   if (t == TypePtr::NULL_PTR) {
2117     return t;
2118   }
2119   return t->is_oopptr();
2120 }
2121 
2122 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
2123   if (in(1) == NULL) {
2124     return Type::TOP;
2125   }
2126   const Type* t = phase->type(in(1));
2127   if (t == Type::TOP) {
2128     return Type::TOP;
2129   }
2130   if (t == TypePtr::NULL_PTR) {
2131     return t;
2132   }
2133   return t->is_oopptr();
2134 }
2135 
2136 int ShenandoahEnqueueBarrierNode::needed(Node* n) {
2137   if (n == NULL ||
2138       n->is_Allocate() ||
2139       n->Opcode() == Op_ShenandoahEnqueueBarrier ||
2140       n->bottom_type() == TypePtr::NULL_PTR ||
2141       (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2142     return NotNeeded;
2143   }
2144   if (n->is_Phi() ||
2145       n->is_CMove()) {
2146     return MaybeNeeded;
2147   }
2148   return Needed;
2149 }
2150 
2151 Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
2152   for (;;) {
2153     if (n == NULL) {
2154       return n;
2155     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2156       return n;
2157     } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2158       return n;
2159     } else if (n->is_ConstraintCast() ||
2160                n->Opcode() == Op_DecodeN ||
2161                n->Opcode() == Op_EncodeP) {
2162       n = n->in(1);
2163     } else if (n->is_Proj()) {
2164       n = n->in(0);
2165     } else {
2166       return n;
2167     }
2168   }
2169   ShouldNotReachHere();
2170   return NULL;
2171 }
2172 
2173 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
2174   PhaseIterGVN* igvn = phase->is_IterGVN();
2175 
2176   Node* n = next(in(1));
2177 
2178   int cont = needed(n);
2179 
2180   if (cont == NotNeeded) {
2181     return in(1);
2182   } else if (cont == MaybeNeeded) {
2183     if (igvn == NULL) {
2184       phase->record_for_igvn(this);
2185       return this;
2186     } else {
2187       ResourceMark rm;
2188       Unique_Node_List wq;
2189       uint wq_i = 0;
2190 
2191       for (;;) {
2192         if (n->is_Phi()) {
2193           for (uint i = 1; i < n->req(); i++) {
2194             Node* m = n->in(i);
2195             if (m != NULL) {
2196               wq.push(m);
2197             }
2198           }
2199         } else {
2200           assert(n->is_CMove(), "nothing else here");
2201           Node* m = n->in(CMoveNode::IfFalse);
2202           wq.push(m);
2203           m = n->in(CMoveNode::IfTrue);
2204           wq.push(m);
2205         }
2206         Node* orig_n = NULL;
2207         do {
2208           if (wq_i >= wq.size()) {
2209             return in(1);
2210           }
2211           n = wq.at(wq_i);
2212           wq_i++;
2213           orig_n = n;
2214           n = next(n);
2215           cont = needed(n);
2216           if (cont == Needed) {
2217             return this;
2218           }
2219         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2220       }
2221     }
2222   }
2223 
2224   return this;
2225 }
2226 
2227 #ifdef ASSERT
2228 static bool has_never_branch(Node* root) {
2229   for (uint i = 1; i < root->req(); i++) {
2230     Node* in = root->in(i);
2231     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2232       return true;
2233     }
2234   }
2235   return false;
2236 }
2237 #endif
2238 
2239 void MemoryGraphFixer::collect_memory_nodes() {
2240   Node_Stack stack(0);
2241   VectorSet visited(Thread::current()->resource_area());
2242   Node_List regions;
2243 
2244   // Walk the raw memory graph and create a mapping from CFG node to
2245   // memory node. Exclude phis for now.
2246   stack.push(_phase->C->root(), 1);
2247   do {
2248     Node* n = stack.node();
2249     int opc = n->Opcode();
2250     uint i = stack.index();
2251     if (i < n->req()) {
2252       Node* mem = NULL;
2253       if (opc == Op_Root) {
2254         Node* in = n->in(i);
2255         int in_opc = in->Opcode();
2256         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2257           mem = in->in(TypeFunc::Memory);
2258         } else if (in_opc == Op_Halt) {
2259           if (!in->in(0)->is_Region()) {
2260             Node* proj = in->in(0);
2261             assert(proj->is_Proj(), "");
2262             Node* in = proj->in(0);
2263             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2264             if (in->is_CallStaticJava()) {
2265               mem = in->in(TypeFunc::Memory);
2266             } else if (in->Opcode() == Op_Catch) {
2267               Node* call = in->in(0)->in(0);
2268               assert(call->is_Call(), "");
2269               mem = call->in(TypeFunc::Memory);
2270             } else if (in->Opcode() == Op_NeverBranch) {
2271               ResourceMark rm;
2272               Unique_Node_List wq;
2273               wq.push(in);
2274               wq.push(in->as_Multi()->proj_out(0));
2275               for (uint j = 1; j < wq.size(); j++) {
2276                 Node* c = wq.at(j);
2277                 assert(!c->is_Root(), "shouldn't leave loop");
2278                 if (c->is_SafePoint()) {
2279                   assert(mem == NULL, "only one safepoint");
2280                   mem = c->in(TypeFunc::Memory);
2281                 }
2282                 for (DUIterator_Fast kmax, k = c->fast_outs(kmax); k < kmax; k++) {
2283                   Node* u = c->fast_out(k);
2284                   if (u->is_CFG()) {
2285                     wq.push(u);
2286                   }
2287                 }
2288               }
2289               assert(mem != NULL, "should have found safepoint");
2290             }
2291           }
2292         } else {
2293 #ifdef ASSERT
2294           n->dump();
2295           in->dump();
2296 #endif
2297           ShouldNotReachHere();
2298         }
2299       } else {
2300         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2301         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2302         mem = n->in(i);
2303       }
2304       i++;
2305       stack.set_index(i);
2306       if (mem == NULL) {
2307         continue;
2308       }
2309       for (;;) {
2310         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2311           break;
2312         }
2313         if (mem->is_Phi()) {
2314           stack.push(mem, 2);
2315           mem = mem->in(1);
2316         } else if (mem->is_Proj()) {
2317           stack.push(mem, mem->req());
2318           mem = mem->in(0);
2319         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2320           mem = mem->in(TypeFunc::Memory);
2321         } else if (mem->is_MergeMem()) {
2322           MergeMemNode* mm = mem->as_MergeMem();
2323           mem = mm->memory_at(_alias);
2324         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2325           assert(_alias == Compile::AliasIdxRaw, "");
2326           stack.push(mem, mem->req());
2327           mem = mem->in(MemNode::Memory);
2328         } else {
2329 #ifdef ASSERT
2330           mem->dump();
2331 #endif
2332           ShouldNotReachHere();
2333         }
2334       }
2335     } else {
2336       if (n->is_Phi()) {
2337         // Nothing
2338       } else if (!n->is_Root()) {
2339         Node* c = get_ctrl(n);
2340         _memory_nodes.map(c->_idx, n);
2341       }
2342       stack.pop();
2343     }
2344   } while(stack.is_nonempty());
2345 
2346   // Iterate over CFG nodes in rpo and propagate memory state to
2347   // compute memory state at regions, creating new phis if needed.
2348   Node_List rpo_list;
2349   visited.Clear();
2350   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2351   Node* root = rpo_list.pop();
2352   assert(root == _phase->C->root(), "");
2353 
2354   const bool trace = false;
2355 #ifdef ASSERT
2356   if (trace) {
2357     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2358       Node* c = rpo_list.at(i);
2359       if (_memory_nodes[c->_idx] != NULL) {
2360         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2361       }
2362     }
2363   }
2364 #endif
2365   uint last = _phase->C->unique();
2366 
2367 #ifdef ASSERT
2368   uint8_t max_depth = 0;
2369   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2370     IdealLoopTree* lpt = iter.current();
2371     max_depth = MAX2(max_depth, lpt->_nest);
2372   }
2373 #endif
2374 
2375   bool progress = true;
2376   int iteration = 0;
2377   Node_List dead_phis;
2378   while (progress) {
2379     progress = false;
2380     iteration++;
2381     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2382     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2383     IdealLoopTree* last_updated_ilt = NULL;
2384     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2385       Node* c = rpo_list.at(i);
2386 
2387       Node* prev_mem = _memory_nodes[c->_idx];
2388       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2389         Node* prev_region = regions[c->_idx];
2390         Node* unique = NULL;
2391         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2392           Node* m = _memory_nodes[c->in(j)->_idx];
2393           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2394           if (m != NULL) {
2395             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2396               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), "");
2397               // continue
2398             } else if (unique == NULL) {
2399               unique = m;
2400             } else if (m == unique) {
2401               // continue
2402             } else {
2403               unique = NodeSentinel;
2404             }
2405           }
2406         }
2407         assert(unique != NULL, "empty phi???");
2408         if (unique != NodeSentinel) {
2409           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2410             dead_phis.push(prev_region);
2411           }
2412           regions.map(c->_idx, unique);
2413         } else {
2414           Node* phi = NULL;
2415           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2416             phi = prev_region;
2417             for (uint k = 1; k < c->req(); k++) {
2418               Node* m = _memory_nodes[c->in(k)->_idx];
2419               assert(m != NULL, "expect memory state");
2420               phi->set_req(k, m);
2421             }
2422           } else {
2423             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2424               Node* u = c->fast_out(j);
2425               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2426                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2427                 phi = u;
2428                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2429                   Node* m = _memory_nodes[c->in(k)->_idx];
2430                   assert(m != NULL, "expect memory state");
2431                   if (u->in(k) != m) {
2432                     phi = NULL;
2433                   }
2434                 }
2435               }
2436             }
2437             if (phi == NULL) {
2438               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2439               for (uint k = 1; k < c->req(); k++) {
2440                 Node* m = _memory_nodes[c->in(k)->_idx];
2441                 assert(m != NULL, "expect memory state");
2442                 phi->init_req(k, m);
2443               }
2444             }
2445           }
2446           assert(phi != NULL, "");
2447           regions.map(c->_idx, phi);
2448         }
2449         Node* current_region = regions[c->_idx];
2450         if (current_region != prev_region) {
2451           progress = true;
2452           if (prev_region == prev_mem) {
2453             _memory_nodes.map(c->_idx, current_region);
2454           }
2455         }
2456       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2457         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2458         assert(m != NULL, "expect memory state");
2459         if (m != prev_mem) {
2460           _memory_nodes.map(c->_idx, m);
2461           progress = true;
2462         }
2463       }
2464 #ifdef ASSERT
2465       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2466 #endif
2467     }
2468   }
2469 
2470   // Replace existing phi with computed memory state for that region
2471   // if different (could be a new phi or a dominating memory node if
2472   // that phi was found to be useless).
2473   while (dead_phis.size() > 0) {
2474     Node* n = dead_phis.pop();
2475     n->replace_by(_phase->C->top());
2476     n->destruct();
2477   }
2478   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2479     Node* c = rpo_list.at(i);
2480     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2481       Node* n = regions[c->_idx];
2482       if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2483         _phase->register_new_node(n, c);
2484       }
2485     }
2486   }
2487   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2488     Node* c = rpo_list.at(i);
2489     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2490       Node* n = regions[c->_idx];
2491       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2492         Node* u = c->fast_out(i);
2493         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2494             u != n) {
2495           if (u->adr_type() == TypePtr::BOTTOM) {
2496             fix_memory_uses(u, n, n, c);
2497           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2498             _phase->lazy_replace(u, n);
2499             --i; --imax;
2500           }
2501         }
2502       }
2503     }
2504   }
2505 }
2506 
2507 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2508   Node* c = _phase->get_ctrl(n);
2509   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2510     assert(c == n->in(0), "");
2511     CallNode* call = c->as_Call();
2512     CallProjections projs;
2513     call->extract_projections(&projs, true, false);
2514     if (projs.catchall_memproj != NULL) {
2515       if (projs.fallthrough_memproj == n) {
2516         c = projs.fallthrough_catchproj;
2517       } else {
2518         assert(projs.catchall_memproj == n, "");
2519         c = projs.catchall_catchproj;
2520       }
2521     }
2522   }
2523   return c;
2524 }
2525 
2526 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2527   if (_phase->has_ctrl(n))
2528     return get_ctrl(n);
2529   else {
2530     assert (n->is_CFG(), "must be a CFG node");
2531     return n;
2532   }
2533 }
2534 
2535 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2536   return m != NULL && get_ctrl(m) == c;
2537 }
2538 
2539 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2540   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2541   Node* mem = _memory_nodes[ctrl->_idx];
2542   Node* c = ctrl;
2543   while (!mem_is_valid(mem, c) &&
2544          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2545     c = _phase->idom(c);
2546     mem = _memory_nodes[c->_idx];
2547   }
2548   if (n != NULL && mem_is_valid(mem, c)) {
2549     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2550       mem = next_mem(mem, _alias);
2551     }
2552     if (mem->is_MergeMem()) {
2553       mem = mem->as_MergeMem()->memory_at(_alias);
2554     }
2555     if (!mem_is_valid(mem, c)) {
2556       do {
2557         c = _phase->idom(c);
2558         mem = _memory_nodes[c->_idx];
2559       } while (!mem_is_valid(mem, c) &&
2560                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2561     }
2562   }
2563   assert(mem->bottom_type() == Type::MEMORY, "");
2564   return mem;
2565 }
2566 
2567 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2568   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2569     Node* use = region->fast_out(i);
2570     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2571         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2572       return true;
2573     }
2574   }
2575   return false;
2576 }
2577 
2578 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2579   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2580   const bool trace = false;
2581   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2582   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2583   GrowableArray<Node*> phis;
2584   if (mem_for_ctrl != mem) {
2585     Node* old = mem_for_ctrl;
2586     Node* prev = NULL;
2587     while (old != mem) {
2588       prev = old;
2589       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2590         assert(_alias == Compile::AliasIdxRaw, "");
2591         old = old->in(MemNode::Memory);
2592       } else if (old->Opcode() == Op_SCMemProj) {
2593         assert(_alias == Compile::AliasIdxRaw, "");
2594         old = old->in(0);
2595       } else {
2596         ShouldNotReachHere();
2597       }
2598     }
2599     assert(prev != NULL, "");
2600     if (new_ctrl != ctrl) {
2601       _memory_nodes.map(ctrl->_idx, mem);
2602       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2603     }
2604     uint input = (uint)MemNode::Memory;
2605     _phase->igvn().replace_input_of(prev, input, new_mem);
2606   } else {
2607     uses.clear();
2608     _memory_nodes.map(new_ctrl->_idx, new_mem);
2609     uses.push(new_ctrl);
2610     for(uint next = 0; next < uses.size(); next++ ) {
2611       Node *n = uses.at(next);
2612       assert(n->is_CFG(), "");
2613       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2614       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2615         Node* u = n->fast_out(i);
2616         if (!u->is_Root() && u->is_CFG() && u != n) {
2617           Node* m = _memory_nodes[u->_idx];
2618           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2619               !has_mem_phi(u) &&
2620               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2621             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2622             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2623 
2624             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2625               bool push = true;
2626               bool create_phi = true;
2627               if (_phase->is_dominator(new_ctrl, u)) {
2628                 create_phi = false;
2629               } else if (!_phase->C->has_irreducible_loop()) {
2630                 IdealLoopTree* loop = _phase->get_loop(ctrl);
2631                 bool do_check = true;
2632                 IdealLoopTree* l = loop;
2633                 create_phi = false;
2634                 while (l != _phase->ltree_root()) {
2635                   Node* head = l->_head;
2636                   if (head->in(0) == NULL) {
2637                     head = _phase->get_ctrl(head);
2638                   }
2639                   if (_phase->is_dominator(head, u) && _phase->is_dominator(_phase->idom(u), head)) {
2640                     create_phi = true;
2641                     do_check = false;
2642                     break;
2643                   }
2644                   l = l->_parent;
2645                 }
2646 
2647                 if (do_check) {
2648                   assert(!create_phi, "");
2649                   IdealLoopTree* u_loop = _phase->get_loop(u);
2650                   if (u_loop != _phase->ltree_root() && u_loop->is_member(loop)) {
2651                     Node* c = ctrl;
2652                     while (!_phase->is_dominator(c, u_loop->tail())) {
2653                       c = _phase->idom(c);
2654                     }
2655                     if (!_phase->is_dominator(c, u)) {
2656                       do_check = false;
2657                     }
2658                   }
2659                 }
2660 
2661                 if (do_check && _phase->is_dominator(_phase->idom(u), new_ctrl)) {
2662                   create_phi = true;
2663                 }
2664               }
2665               if (create_phi) {
2666                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2667                 _phase->register_new_node(phi, u);
2668                 phis.push(phi);
2669                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2670                 if (!mem_is_valid(m, u)) {
2671                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2672                   _memory_nodes.map(u->_idx, phi);
2673                 } else {
2674                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2675                   for (;;) {
2676                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2677                     Node* next = NULL;
2678                     if (m->is_Proj()) {
2679                       next = m->in(0);
2680                     } else {
2681                       assert(m->is_Mem() || m->is_LoadStore(), "");
2682                       assert(_alias == Compile::AliasIdxRaw, "");
2683                       next = m->in(MemNode::Memory);
2684                     }
2685                     if (_phase->get_ctrl(next) != u) {
2686                       break;
2687                     }
2688                     if (next->is_MergeMem()) {
2689                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2690                       break;
2691                     }
2692                     if (next->is_Phi()) {
2693                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2694                       break;
2695                     }
2696                     m = next;
2697                   }
2698 
2699                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2700                   assert(m->is_Mem() || m->is_LoadStore(), "");
2701                   uint input = (uint)MemNode::Memory;
2702                   _phase->igvn().replace_input_of(m, input, phi);
2703                   push = false;
2704                 }
2705               } else {
2706                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2707               }
2708               if (push) {
2709                 uses.push(u);
2710               }
2711             }
2712           } else if (!mem_is_valid(m, u) &&
2713                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2714             uses.push(u);
2715           }
2716         }
2717       }
2718     }
2719     for (int i = 0; i < phis.length(); i++) {
2720       Node* n = phis.at(i);
2721       Node* r = n->in(0);
2722       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2723       for (uint j = 1; j < n->req(); j++) {
2724         Node* m = find_mem(r->in(j), NULL);
2725         _phase->igvn().replace_input_of(n, j, m);
2726         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2727       }
2728     }
2729   }
2730   uint last = _phase->C->unique();
2731   MergeMemNode* mm = NULL;
2732   int alias = _alias;
2733   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2734   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2735     Node* u = mem->out(i);
2736     if (u->_idx < last) {
2737       if (u->is_Mem()) {
2738         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2739           Node* m = find_mem(_phase->get_ctrl(u), u);
2740           if (m != mem) {
2741             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2742             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2743             --i;
2744           }
2745         }
2746       } else if (u->is_MergeMem()) {
2747         MergeMemNode* u_mm = u->as_MergeMem();
2748         if (u_mm->memory_at(alias) == mem) {
2749           MergeMemNode* newmm = NULL;
2750           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2751             Node* uu = u->fast_out(j);
2752             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2753             if (uu->is_Phi()) {
2754               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2755               Node* region = uu->in(0);
2756               int nb = 0;
2757               for (uint k = 1; k < uu->req(); k++) {
2758                 if (uu->in(k) == u) {
2759                   Node* m = find_mem(region->in(k), NULL);
2760                   if (m != mem) {
2761                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2762                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2763                     if (newmm != u) {
2764                       _phase->igvn().replace_input_of(uu, k, newmm);
2765                       nb++;
2766                       --jmax;
2767                     }
2768                   }
2769                 }
2770               }
2771               if (nb > 0) {
2772                 --j;
2773               }
2774             } else {
2775               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2776               if (m != mem) {
2777                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2778                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2779                 if (newmm != u) {
2780                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2781                   --j, --jmax;
2782                 }
2783               }
2784             }
2785           }
2786         }
2787       } else if (u->is_Phi()) {
2788         assert(u->bottom_type() == Type::MEMORY, "what else?");
2789         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2790           Node* region = u->in(0);
2791           bool replaced = false;
2792           for (uint j = 1; j < u->req(); j++) {
2793             if (u->in(j) == mem) {
2794               Node* m = find_mem(region->in(j), NULL);
2795               Node* nnew = m;
2796               if (m != mem) {
2797                 if (u->adr_type() == TypePtr::BOTTOM) {
2798                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2799                   nnew = mm;
2800                 }
2801                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2802                 _phase->igvn().replace_input_of(u, j, nnew);
2803                 replaced = true;
2804               }
2805             }
2806           }
2807           if (replaced) {
2808             --i;
2809           }
2810         }
2811       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2812                  u->adr_type() == NULL) {
2813         assert(u->adr_type() != NULL ||
2814                u->Opcode() == Op_Rethrow ||
2815                u->Opcode() == Op_Return ||
2816                u->Opcode() == Op_SafePoint ||
2817                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2818                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2819                u->Opcode() == Op_CallLeaf, "");
2820         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2821         if (m != mem) {
2822           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2823           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2824           --i;
2825         }
2826       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2827         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2828         if (m != mem) {
2829           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2830           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2831           --i;
2832         }
2833       } else if (u->adr_type() != TypePtr::BOTTOM &&
2834                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2835         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2836         assert(m != mem, "");
2837         // u is on the wrong slice...
2838         assert(u->is_ClearArray(), "");
2839         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2840         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2841         --i;
2842       }
2843     }
2844   }
2845 #ifdef ASSERT
2846   assert(new_mem->outcnt() > 0, "");
2847   for (int i = 0; i < phis.length(); i++) {
2848     Node* n = phis.at(i);
2849     assert(n->outcnt() > 0, "new phi must have uses now");
2850   }
2851 #endif
2852 }
2853 
2854 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2855   MergeMemNode* mm = MergeMemNode::make(mem);
2856   mm->set_memory_at(_alias, rep_proj);
2857   _phase->register_new_node(mm, rep_ctrl);
2858   return mm;
2859 }
2860 
2861 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2862   MergeMemNode* newmm = NULL;
2863   MergeMemNode* u_mm = u->as_MergeMem();
2864   Node* c = _phase->get_ctrl(u);
2865   if (_phase->is_dominator(c, rep_ctrl)) {
2866     c = rep_ctrl;
2867   } else {
2868     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2869   }
2870   if (u->outcnt() == 1) {
2871     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2872       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2873       --i;
2874     } else {
2875       _phase->igvn().rehash_node_delayed(u);
2876       u_mm->set_memory_at(_alias, rep_proj);
2877     }
2878     newmm = u_mm;
2879     _phase->set_ctrl_and_loop(u, c);
2880   } else {
2881     // can't simply clone u and then change one of its input because
2882     // it adds and then removes an edge which messes with the
2883     // DUIterator
2884     newmm = MergeMemNode::make(u_mm->base_memory());
2885     for (uint j = 0; j < u->req(); j++) {
2886       if (j < newmm->req()) {
2887         if (j == (uint)_alias) {
2888           newmm->set_req(j, rep_proj);
2889         } else if (newmm->in(j) != u->in(j)) {
2890           newmm->set_req(j, u->in(j));
2891         }
2892       } else if (j == (uint)_alias) {
2893         newmm->add_req(rep_proj);
2894       } else {
2895         newmm->add_req(u->in(j));
2896       }
2897     }
2898     if ((uint)_alias >= u->req()) {
2899       newmm->set_memory_at(_alias, rep_proj);
2900     }
2901     _phase->register_new_node(newmm, c);
2902   }
2903   return newmm;
2904 }
2905 
2906 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2907   if (phi->adr_type() == TypePtr::BOTTOM) {
2908     Node* region = phi->in(0);
2909     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2910       Node* uu = region->fast_out(j);
2911       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2912         return false;
2913       }
2914     }
2915     return true;
2916   }
2917   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2918 }
2919 
2920 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2921   uint last = _phase-> C->unique();
2922   MergeMemNode* mm = NULL;
2923   assert(mem->bottom_type() == Type::MEMORY, "");
2924   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2925     Node* u = mem->out(i);
2926     if (u != replacement && u->_idx < last) {
2927       if (u->is_MergeMem()) {
2928         MergeMemNode* u_mm = u->as_MergeMem();
2929         if (u_mm->memory_at(_alias) == mem) {
2930           MergeMemNode* newmm = NULL;
2931           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2932             Node* uu = u->fast_out(j);
2933             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2934             if (uu->is_Phi()) {
2935               if (should_process_phi(uu)) {
2936                 Node* region = uu->in(0);
2937                 int nb = 0;
2938                 for (uint k = 1; k < uu->req(); k++) {
2939                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2940                     if (newmm == NULL) {
2941                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2942                     }
2943                     if (newmm != u) {
2944                       _phase->igvn().replace_input_of(uu, k, newmm);
2945                       nb++;
2946                       --jmax;
2947                     }
2948                   }
2949                 }
2950                 if (nb > 0) {
2951                   --j;
2952                 }
2953               }
2954             } else {
2955               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2956                 if (newmm == NULL) {
2957                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2958                 }
2959                 if (newmm != u) {
2960                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2961                   --j, --jmax;
2962                 }
2963               }
2964             }
2965           }
2966         }
2967       } else if (u->is_Phi()) {
2968         assert(u->bottom_type() == Type::MEMORY, "what else?");
2969         Node* region = u->in(0);
2970         if (should_process_phi(u)) {
2971           bool replaced = false;
2972           for (uint j = 1; j < u->req(); j++) {
2973             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2974               Node* nnew = rep_proj;
2975               if (u->adr_type() == TypePtr::BOTTOM) {
2976                 if (mm == NULL) {
2977                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2978                 }
2979                 nnew = mm;
2980               }
2981               _phase->igvn().replace_input_of(u, j, nnew);
2982               replaced = true;
2983             }
2984           }
2985           if (replaced) {
2986             --i;
2987           }
2988 
2989         }
2990       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2991                  u->adr_type() == NULL) {
2992         assert(u->adr_type() != NULL ||
2993                u->Opcode() == Op_Rethrow ||
2994                u->Opcode() == Op_Return ||
2995                u->Opcode() == Op_SafePoint ||
2996                u->Opcode() == Op_StoreIConditional ||
2997                u->Opcode() == Op_StoreLConditional ||
2998                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2999                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
3000                u->Opcode() == Op_CallLeaf, "%s", u->Name());
3001         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3002           if (mm == NULL) {
3003             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
3004           }
3005           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
3006           --i;
3007         }
3008       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
3009         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3010           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
3011           --i;
3012         }
3013       }
3014     }
3015   }
3016 }
3017 
3018 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj)
3019 : Node(ctrl, obj) {
3020   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
3021 }
3022 
3023 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
3024   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
3025     return Type::TOP;
3026   }
3027   const Type* t = in(ValueIn)->bottom_type();
3028   if (t == TypePtr::NULL_PTR) {
3029     return t;
3030   }
3031   return t->is_oopptr();
3032 }
3033 
3034 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
3035   // Either input is TOP ==> the result is TOP
3036   const Type *t2 = phase->type(in(ValueIn));
3037   if( t2 == Type::TOP ) return Type::TOP;
3038 
3039   if (t2 == TypePtr::NULL_PTR) {
3040     return t2;
3041   }
3042 
3043   const Type* type = t2->is_oopptr();
3044   return type;
3045 }
3046 
3047 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
3048   Node* value = in(ValueIn);
3049   if (!needs_barrier(phase, value)) {
3050     return value;
3051   }
3052   return this;
3053 }
3054 
3055 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
3056   Unique_Node_List visited;
3057   return needs_barrier_impl(phase, n, visited);
3058 }
3059 
3060 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
3061   if (n == NULL) return false;
3062   if (visited.member(n)) {
3063     return false; // Been there.
3064   }
3065   visited.push(n);
3066 
3067   if (n->is_Allocate()) {
3068     // tty->print_cr("optimize barrier on alloc");
3069     return false;
3070   }
3071   if (n->is_Call()) {
3072     // tty->print_cr("optimize barrier on call");
3073     return false;
3074   }
3075 
3076   const Type* type = phase->type(n);
3077   if (type == Type::TOP) {
3078     return false;
3079   }
3080   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
3081     // tty->print_cr("optimize barrier on null");
3082     return false;
3083   }
3084   if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
3085     // tty->print_cr("optimize barrier on constant");
3086     return false;
3087   }
3088 
3089   switch (n->Opcode()) {
3090     case Op_AddP:
3091       return true; // TODO: Can refine?
3092     case Op_LoadP:
3093     case Op_ShenandoahCompareAndExchangeN:
3094     case Op_ShenandoahCompareAndExchangeP:
3095     case Op_CompareAndExchangeN:
3096     case Op_CompareAndExchangeP:
3097     case Op_GetAndSetN:
3098     case Op_GetAndSetP:
3099       return true;
3100     case Op_Phi: {
3101       for (uint i = 1; i < n->req(); i++) {
3102         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3103       }
3104       return false;
3105     }
3106     case Op_CheckCastPP:
3107     case Op_CastPP:
3108       return needs_barrier_impl(phase, n->in(1), visited);
3109     case Op_Proj:
3110       return needs_barrier_impl(phase, n->in(0), visited);
3111     case Op_ShenandoahLoadReferenceBarrier:
3112       // tty->print_cr("optimize barrier on barrier");
3113       return false;
3114     case Op_Parm:
3115       // tty->print_cr("optimize barrier on input arg");
3116       return false;
3117     case Op_DecodeN:
3118     case Op_EncodeP:
3119       return needs_barrier_impl(phase, n->in(1), visited);
3120     case Op_LoadN:
3121       return true;
3122     case Op_CMoveN:
3123     case Op_CMoveP:
3124       return needs_barrier_impl(phase, n->in(2), visited) ||
3125              needs_barrier_impl(phase, n->in(3), visited);
3126     case Op_ShenandoahEnqueueBarrier:
3127       return needs_barrier_impl(phase, n->in(1), visited);
3128     case Op_CreateEx:
3129       return false;
3130     default:
3131       break;
3132   }
3133 #ifdef ASSERT
3134   tty->print("need barrier on?: ");
3135   tty->print_cr("ins:");
3136   n->dump(2);
3137   tty->print_cr("outs:");
3138   n->dump(-2);
3139   ShouldNotReachHere();
3140 #endif
3141   return true;
3142 }
3143 
3144 ShenandoahLoadReferenceBarrierNode::Strength ShenandoahLoadReferenceBarrierNode::get_barrier_strength() {
3145   Unique_Node_List visited;
3146   Node_Stack stack(0);
3147   stack.push(this, 0);
3148 
3149   // Look for strongest strength: go over nodes looking for STRONG ones.
3150   // Stop once we encountered STRONG. Otherwise, walk until we ran out of nodes,
3151   // and then the overall strength is NONE.
3152   Strength strength = NONE;
3153   while (strength != STRONG && stack.size() > 0) {
3154     Node* n = stack.node();
3155     if (visited.member(n)) {
3156       stack.pop();
3157       continue;
3158     }
3159     visited.push(n);
3160     bool visit_users = false;
3161     switch (n->Opcode()) {
3162       case Op_CallStaticJava:
3163       case Op_CallDynamicJava:
3164       case Op_CallLeaf:
3165       case Op_CallLeafNoFP:
3166       case Op_CompareAndSwapL:
3167       case Op_CompareAndSwapI:
3168       case Op_CompareAndSwapB:
3169       case Op_CompareAndSwapS:
3170       case Op_CompareAndSwapN:
3171       case Op_CompareAndSwapP:
3172       case Op_CompareAndExchangeL:
3173       case Op_CompareAndExchangeI:
3174       case Op_CompareAndExchangeB:
3175       case Op_CompareAndExchangeS:
3176       case Op_CompareAndExchangeN:
3177       case Op_CompareAndExchangeP:
3178       case Op_WeakCompareAndSwapL:
3179       case Op_WeakCompareAndSwapI:
3180       case Op_WeakCompareAndSwapB:
3181       case Op_WeakCompareAndSwapS:
3182       case Op_WeakCompareAndSwapN:
3183       case Op_WeakCompareAndSwapP:
3184       case Op_ShenandoahCompareAndSwapN:
3185       case Op_ShenandoahCompareAndSwapP:
3186       case Op_ShenandoahWeakCompareAndSwapN:
3187       case Op_ShenandoahWeakCompareAndSwapP:
3188       case Op_ShenandoahCompareAndExchangeN:
3189       case Op_ShenandoahCompareAndExchangeP:
3190       case Op_GetAndSetL:
3191       case Op_GetAndSetI:
3192       case Op_GetAndSetB:
3193       case Op_GetAndSetS:
3194       case Op_GetAndSetP:
3195       case Op_GetAndSetN:
3196       case Op_GetAndAddL:
3197       case Op_GetAndAddI:
3198       case Op_GetAndAddB:
3199       case Op_GetAndAddS:
3200       case Op_ShenandoahEnqueueBarrier:
3201       case Op_FastLock:
3202       case Op_FastUnlock:
3203       case Op_Rethrow:
3204       case Op_Return:
3205       case Op_StoreB:
3206       case Op_StoreC:
3207       case Op_StoreD:
3208       case Op_StoreF:
3209       case Op_StoreL:
3210       case Op_StoreLConditional:
3211       case Op_StoreI:
3212       case Op_StoreIConditional:
3213       case Op_StoreN:
3214       case Op_StoreP:
3215       case Op_StoreVector:
3216       case Op_StrInflatedCopy:
3217       case Op_StrCompressedCopy:
3218       case Op_EncodeP:
3219       case Op_CastP2X:
3220       case Op_SafePoint:
3221       case Op_EncodeISOArray:
3222       case Op_AryEq:
3223       case Op_StrEquals:
3224       case Op_StrComp:
3225       case Op_StrIndexOf:
3226       case Op_StrIndexOfChar:
3227       case Op_HasNegatives:
3228         // Known to require barriers
3229         strength = STRONG;
3230         break;
3231       case Op_CmpP: {
3232         if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) ||
3233             n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3234           // One of the sides is known null, no need for barrier.
3235         } else {
3236           strength = STRONG;
3237         }
3238         break;
3239       }
3240       case Op_LoadB:
3241       case Op_LoadUB:
3242       case Op_LoadUS:
3243       case Op_LoadD:
3244       case Op_LoadF:
3245       case Op_LoadL:
3246       case Op_LoadI:
3247       case Op_LoadS:
3248       case Op_LoadN:
3249       case Op_LoadP:
3250       case Op_LoadVector: {
3251         const TypePtr* adr_type = n->adr_type();
3252         int alias_idx = Compile::current()->get_alias_index(adr_type);
3253         Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3254         ciField* field = alias_type->field();
3255         bool is_static = field != NULL && field->is_static();
3256         bool is_final = field != NULL && field->is_final();
3257 
3258         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3259           // Loading the constant does not require barriers: it should be handled
3260           // as part of GC roots already.
3261         } else {
3262           strength = STRONG;
3263         }
3264         break;
3265       }
3266       case Op_Conv2B:
3267       case Op_LoadRange:
3268       case Op_LoadKlass:
3269       case Op_LoadNKlass:
3270         // Do not require barriers
3271         break;
3272       case Op_AddP:
3273       case Op_CheckCastPP:
3274       case Op_CastPP:
3275       case Op_CMoveP:
3276       case Op_Phi:
3277       case Op_ShenandoahLoadReferenceBarrier:
3278         // Whether or not these need the barriers depends on their users
3279         visit_users = true;
3280         break;
3281       default: {
3282 #ifdef ASSERT
3283         fatal("Unknown node in get_barrier_strength: %s", NodeClassNames[n->Opcode()]);
3284 #else
3285         // Default to strong: better to have excess barriers, rather than miss some.
3286         strength = STRONG;
3287 #endif
3288       }
3289     }
3290 
3291     stack.pop();
3292     if (visit_users) {
3293       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3294         Node* user = n->fast_out(i);
3295         if (user != NULL) {
3296           stack.push(user, 0);
3297         }
3298       }
3299     }
3300   }
3301   return strength;
3302 }
3303 
3304 CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
3305   Node* val = in(ValueIn);
3306 
3307   const Type* val_t = igvn.type(val);
3308 
3309   if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
3310       val->Opcode() == Op_CastPP &&
3311       val->in(0) != NULL &&
3312       val->in(0)->Opcode() == Op_IfTrue &&
3313       val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
3314       val->in(0)->in(0)->is_If() &&
3315       val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
3316       val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
3317       val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
3318       val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
3319       val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
3320     assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
3321     CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
3322     return unc;
3323   }
3324   return NULL;
3325 }