1 /*
   2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  27 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.hpp"
  30 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  31 #include "gc/shenandoah/shenandoahRuntime.hpp"
  32 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/block.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/movenode.hpp"
  38 #include "opto/phaseX.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 
  43 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  44   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  45   if ((state->enqueue_barriers_count() +
  46        state->load_reference_barriers_count()) > 0) {
  47     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  48     C->clear_major_progress();
  49     PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
  50     if (C->failing()) return false;
  51     PhaseIdealLoop::verify(igvn);
  52     DEBUG_ONLY(verify_raw_mem(C->root());)
  53     if (attempt_more_loopopts) {
  54       C->set_major_progress();
  55       if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
  56         return false;
  57       }
  58       C->clear_major_progress();
  59     }
  60   }
  61   return true;
  62 }
  63 
  64 bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) {
  65   if (!UseShenandoahGC) {
  66     return false;
  67   }
  68   assert(iff->is_If(), "bad input");
  69   if (iff->Opcode() != Op_If) {
  70     return false;
  71   }
  72   Node* bol = iff->in(1);
  73   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  74     return false;
  75   }
  76   Node* cmp = bol->in(1);
  77   if (cmp->Opcode() != Op_CmpI) {
  78     return false;
  79   }
  80   Node* in1 = cmp->in(1);
  81   Node* in2 = cmp->in(2);
  82   if (in2->find_int_con(-1) != 0) {
  83     return false;
  84   }
  85   if (in1->Opcode() != Op_AndI) {
  86     return false;
  87   }
  88   in2 = in1->in(2);
  89   if (in2->find_int_con(-1) != mask) {
  90     return false;
  91   }
  92   in1 = in1->in(1);
  93 
  94   return is_gc_state_load(in1);
  95 }
  96 
  97 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
  98   return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
  99 }
 100 
 101 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 102   if (!UseShenandoahGC) {
 103     return false;
 104   }
 105   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 106     return false;
 107   }
 108   Node* addp = n->in(MemNode::Address);
 109   if (!addp->is_AddP()) {
 110     return false;
 111   }
 112   Node* base = addp->in(AddPNode::Address);
 113   Node* off = addp->in(AddPNode::Offset);
 114   if (base->Opcode() != Op_ThreadLocal) {
 115     return false;
 116   }
 117   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 118     return false;
 119   }
 120   return true;
 121 }
 122 
 123 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 124   assert(phase->is_dominator(stop, start), "bad inputs");
 125   ResourceMark rm;
 126   Unique_Node_List wq;
 127   wq.push(start);
 128   for (uint next = 0; next < wq.size(); next++) {
 129     Node *m = wq.at(next);
 130     if (m == stop) {
 131       continue;
 132     }
 133     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 134       return true;
 135     }
 136     if (m->is_Region()) {
 137       for (uint i = 1; i < m->req(); i++) {
 138         wq.push(m->in(i));
 139       }
 140     } else {
 141       wq.push(m->in(0));
 142     }
 143   }
 144   return false;
 145 }
 146 
 147 bool ShenandoahBarrierC2Support::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) {
 148   assert(is_gc_state_load(n), "inconsistent");
 149   Node* addp = n->in(MemNode::Address);
 150   Node* dominator = NULL;
 151   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 152     Node* u = addp->fast_out(i);
 153     assert(is_gc_state_load(u), "inconsistent");
 154     if (u != n && phase->is_dominator(u->in(0), n->in(0))) {
 155       if (dominator == NULL) {
 156         dominator = u;
 157       } else {
 158         if (phase->dom_depth(u->in(0)) < phase->dom_depth(dominator->in(0))) {
 159           dominator = u;
 160         }
 161       }
 162     }
 163   }
 164   if (dominator == NULL || has_safepoint_between(n->in(0), dominator->in(0), phase)) {
 165     return false;
 166   }
 167   phase->igvn().replace_node(n, dominator);
 168 
 169   return true;
 170 }
 171 
 172 #ifdef ASSERT
 173 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 174   assert(phis.size() == 0, "");
 175 
 176   while (true) {
 177     if (in->bottom_type() == TypePtr::NULL_PTR) {
 178       if (trace) {tty->print_cr("NULL");}
 179     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 180       if (trace) {tty->print_cr("Non oop");}
 181     } else if (t == ShenandoahLoad && ShenandoahOptimizeStableFinals &&
 182                in->bottom_type()->make_ptr()->isa_aryptr() &&
 183                in->bottom_type()->make_ptr()->is_aryptr()->is_stable()) {
 184       if (trace) {tty->print_cr("Stable array load");}
 185     } else {
 186       if (in->is_ConstraintCast()) {
 187         in = in->in(1);
 188         continue;
 189       } else if (in->is_AddP()) {
 190         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 191         in = in->in(AddPNode::Address);
 192         continue;
 193       } else if (in->is_Con()) {
 194         if (trace) {
 195           tty->print("Found constant");
 196           in->dump();
 197         }
 198       } else if (in->Opcode() == Op_Parm) {
 199         if (trace) {
 200           tty->print("Found argument");
 201         }
 202       } else if (in->Opcode() == Op_CreateEx) {
 203         if (trace) {
 204           tty->print("Found create-exception");
 205         }
 206       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 207         if (trace) {
 208           tty->print("Found raw LoadP (OSR argument?)");
 209         }
 210       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 211         if (t == ShenandoahOopStore) {
 212           uint i = 0;
 213           for (; i < phis.size(); i++) {
 214             Node* n = phis.node_at(i);
 215             if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
 216               break;
 217             }
 218           }
 219           if (i == phis.size()) {
 220             return false;
 221           }
 222         }
 223         barriers_used.push(in);
 224         if (trace) {tty->print("Found barrier"); in->dump();}
 225       } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
 226         if (t != ShenandoahOopStore) {
 227           in = in->in(1);
 228           continue;
 229         }
 230         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 231         phis.push(in, in->req());
 232         in = in->in(1);
 233         continue;
 234       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 235         if (trace) {
 236           tty->print("Found alloc");
 237           in->in(0)->dump();
 238         }
 239       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 240         if (trace) {
 241           tty->print("Found Java call");
 242         }
 243       } else if (in->is_Phi()) {
 244         if (!visited.test_set(in->_idx)) {
 245           if (trace) {tty->print("Pushed phi:"); in->dump();}
 246           phis.push(in, 2);
 247           in = in->in(1);
 248           continue;
 249         }
 250         if (trace) {tty->print("Already seen phi:"); in->dump();}
 251       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 252         if (!visited.test_set(in->_idx)) {
 253           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 254           phis.push(in, CMoveNode::IfTrue);
 255           in = in->in(CMoveNode::IfFalse);
 256           continue;
 257         }
 258         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 259       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 260         in = in->in(1);
 261         continue;
 262       } else {
 263         return false;
 264       }
 265     }
 266     bool cont = false;
 267     while (phis.is_nonempty()) {
 268       uint idx = phis.index();
 269       Node* phi = phis.node();
 270       if (idx >= phi->req()) {
 271         if (trace) {tty->print("Popped phi:"); phi->dump();}
 272         phis.pop();
 273         continue;
 274       }
 275       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 276       in = phi->in(idx);
 277       phis.set_index(idx+1);
 278       cont = true;
 279       break;
 280     }
 281     if (!cont) {
 282       break;
 283     }
 284   }
 285   return true;
 286 }
 287 
 288 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 289   if (n1 != NULL) {
 290     n1->dump(+10);
 291   }
 292   if (n2 != NULL) {
 293     n2->dump(+10);
 294   }
 295   fatal("%s", msg);
 296 }
 297 
 298 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 299   ResourceMark rm;
 300   Unique_Node_List wq;
 301   GrowableArray<Node*> barriers;
 302   Unique_Node_List barriers_used;
 303   Node_Stack phis(0);
 304   VectorSet visited(Thread::current()->resource_area());
 305   const bool trace = false;
 306   const bool verify_no_useless_barrier = false;
 307 
 308   wq.push(root);
 309   for (uint next = 0; next < wq.size(); next++) {
 310     Node *n = wq.at(next);
 311     if (n->is_Load()) {
 312       const bool trace = false;
 313       if (trace) {tty->print("Verifying"); n->dump();}
 314       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 315         if (trace) {tty->print_cr("Load range/klass");}
 316       } else {
 317         const TypePtr* adr_type = n->as_Load()->adr_type();
 318 
 319         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 320           if (trace) {tty->print_cr("Mark load");}
 321         } else if (adr_type->isa_instptr() &&
 322                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 323                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
 324           if (trace) {tty->print_cr("Reference.get()");}
 325         } else {
 326           bool verify = true;
 327           if (adr_type->isa_instptr()) {
 328             const TypeInstPtr* tinst = adr_type->is_instptr();
 329             ciKlass* k = tinst->klass();
 330             assert(k->is_instance_klass(), "");
 331             ciInstanceKlass* ik = (ciInstanceKlass*)k;
 332             int offset = adr_type->offset();
 333 
 334             if ((ik->debug_final_field_at(offset) && ShenandoahOptimizeInstanceFinals) ||
 335                 (ik->debug_stable_field_at(offset) && ShenandoahOptimizeStableFinals)) {
 336               if (trace) {tty->print_cr("Final/stable");}
 337               verify = false;
 338             } else if (k == ciEnv::current()->Class_klass() &&
 339                        tinst->const_oop() != NULL &&
 340                        tinst->offset() >= (ik->size_helper() * wordSize)) {
 341               ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
 342               ciField* field = k->get_field_by_offset(tinst->offset(), true);
 343               if ((ShenandoahOptimizeStaticFinals && field->is_final()) ||
 344                   (ShenandoahOptimizeStableFinals && field->is_stable())) {
 345                 verify = false;
 346               }
 347             }
 348           }
 349 
 350           if (verify && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 351             report_verify_failure("Shenandoah verification: Load should have barriers", n);
 352           }
 353         }
 354       }
 355     } else if (n->is_Store()) {
 356       const bool trace = false;
 357 
 358       if (trace) {tty->print("Verifying"); n->dump();}
 359       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 360         Node* adr = n->in(MemNode::Address);
 361         bool verify = true;
 362 
 363         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 364           adr = adr->in(AddPNode::Address);
 365           if (adr->is_AddP()) {
 366             assert(adr->in(AddPNode::Base)->is_top(), "");
 367             adr = adr->in(AddPNode::Address);
 368             if (adr->Opcode() == Op_LoadP &&
 369                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 370                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 371                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 372               if (trace) {tty->print_cr("SATB prebarrier");}
 373               verify = false;
 374             }
 375           }
 376         }
 377 
 378         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 379           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 380         }
 381       }
 382       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 383         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 384       }
 385     } else if (n->Opcode() == Op_CmpP) {
 386       const bool trace = false;
 387 
 388       Node* in1 = n->in(1);
 389       Node* in2 = n->in(2);
 390       if (in1->bottom_type()->isa_oopptr()) {
 391         if (trace) {tty->print("Verifying"); n->dump();}
 392 
 393         bool mark_inputs = false;
 394         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 395             (in1->is_Con() || in2->is_Con())) {
 396           if (trace) {tty->print_cr("Comparison against a constant");}
 397           mark_inputs = true;
 398         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 399                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 400           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 401           mark_inputs = true;
 402         } else {
 403           assert(in2->bottom_type()->isa_oopptr(), "");
 404 
 405           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 406               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 407             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 408           }
 409         }
 410         if (verify_no_useless_barrier &&
 411             mark_inputs &&
 412             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 413              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 414           phis.clear();
 415           visited.Reset();
 416         }
 417       }
 418     } else if (n->is_LoadStore()) {
 419       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 420           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 421         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 422       }
 423 
 424       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 425         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 426       }
 427     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 428       CallNode* call = n->as_Call();
 429 
 430       static struct {
 431         const char* name;
 432         struct {
 433           int pos;
 434           verify_type t;
 435         } args[6];
 436       } calls[] = {
 437         "aescrypt_encryptBlock",
 438         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 439           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 440         "aescrypt_decryptBlock",
 441         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 442           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 443         "multiplyToLen",
 444         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 445           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 446         "squareToLen",
 447         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 448           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 449         "montgomery_multiply",
 450         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 451           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 452         "montgomery_square",
 453         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 454           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 455         "mulAdd",
 456         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 457           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 458         "vectorizedMismatch",
 459         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 460           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 461         "updateBytesCRC32",
 462         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 463           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 464         "updateBytesAdler32",
 465         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 466           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 467         "updateBytesCRC32C",
 468         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 469           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 470         "counterMode_AESCrypt",
 471         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 472           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 473         "cipherBlockChaining_encryptAESCrypt",
 474         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 475           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 476         "cipherBlockChaining_decryptAESCrypt",
 477         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 478           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 479         "shenandoah_clone_barrier",
 480         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 481           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 482         "ghash_processBlocks",
 483         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 484           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 485         "sha1_implCompress",
 486         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 487           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 488         "sha256_implCompress",
 489         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 490           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 491         "sha512_implCompress",
 492         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 493           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 494         "sha1_implCompressMB",
 495         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 496           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 497         "sha256_implCompressMB",
 498         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 499           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 500         "sha512_implCompressMB",
 501         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 502           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 503         "encodeBlock",
 504         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 505           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 506       };
 507 
 508       if (call->is_call_to_arraycopystub()) {
 509         Node* dest = NULL;
 510         const TypeTuple* args = n->as_Call()->_tf->domain();
 511         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 512           if (args->field_at(i)->isa_ptr()) {
 513             j++;
 514             if (j == 2) {
 515               dest = n->in(i);
 516               break;
 517             }
 518           }
 519         }
 520         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 521             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 522           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 523         }
 524       } else if (strlen(call->_name) > 5 &&
 525                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 526         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 527           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 528         }
 529       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 530         // skip
 531       } else {
 532         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 533         int i = 0;
 534         for (; i < calls_len; i++) {
 535           if (!strcmp(calls[i].name, call->_name)) {
 536             break;
 537           }
 538         }
 539         if (i != calls_len) {
 540           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 541           for (uint j = 0; j < args_len; j++) {
 542             int pos = calls[i].args[j].pos;
 543             if (pos == -1) {
 544               break;
 545             }
 546             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 547               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 548             }
 549           }
 550           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 551             if (call->in(j)->bottom_type()->make_ptr() &&
 552                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 553               uint k = 0;
 554               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 555               if (k == args_len) {
 556                 fatal("arg %d for call %s not covered", j, call->_name);
 557               }
 558             }
 559           }
 560         } else {
 561           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 562             if (call->in(j)->bottom_type()->make_ptr() &&
 563                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 564               fatal("%s not covered", call->_name);
 565             }
 566           }
 567         }
 568       }
 569     } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 570       // skip
 571     } else if (n->is_AddP()
 572                || n->is_Phi()
 573                || n->is_ConstraintCast()
 574                || n->Opcode() == Op_Return
 575                || n->Opcode() == Op_CMoveP
 576                || n->Opcode() == Op_CMoveN
 577                || n->Opcode() == Op_Rethrow
 578                || n->is_MemBar()
 579                || n->Opcode() == Op_Conv2B
 580                || n->Opcode() == Op_SafePoint
 581                || n->is_CallJava()
 582                || n->Opcode() == Op_Unlock
 583                || n->Opcode() == Op_EncodeP
 584                || n->Opcode() == Op_DecodeN) {
 585       // nothing to do
 586     } else {
 587       static struct {
 588         int opcode;
 589         struct {
 590           int pos;
 591           verify_type t;
 592         } inputs[2];
 593       } others[] = {
 594         Op_FastLock,
 595         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 596         Op_Lock,
 597         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 598         Op_ArrayCopy,
 599         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 600         Op_StrCompressedCopy,
 601         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 602         Op_StrInflatedCopy,
 603         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 604         Op_AryEq,
 605         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 606         Op_StrIndexOf,
 607         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 608         Op_StrComp,
 609         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 610         Op_StrEquals,
 611         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 612         Op_EncodeISOArray,
 613         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 614         Op_HasNegatives,
 615         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 616         Op_CastP2X,
 617         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 618         Op_StrIndexOfChar,
 619         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 620       };
 621 
 622       const int others_len = sizeof(others) / sizeof(others[0]);
 623       int i = 0;
 624       for (; i < others_len; i++) {
 625         if (others[i].opcode == n->Opcode()) {
 626           break;
 627         }
 628       }
 629       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 630       if (i != others_len) {
 631         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 632         for (uint j = 0; j < inputs_len; j++) {
 633           int pos = others[i].inputs[j].pos;
 634           if (pos == -1) {
 635             break;
 636           }
 637           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 638             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 639           }
 640         }
 641         for (uint j = 1; j < stop; j++) {
 642           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 643               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 644             uint k = 0;
 645             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 646             if (k == inputs_len) {
 647               fatal("arg %d for node %s not covered", j, n->Name());
 648             }
 649           }
 650         }
 651       } else {
 652         for (uint j = 1; j < stop; j++) {
 653           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 654               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 655             fatal("%s not covered", n->Name());
 656           }
 657         }
 658       }
 659     }
 660 
 661     if (n->is_SafePoint()) {
 662       SafePointNode* sfpt = n->as_SafePoint();
 663       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 664         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 665           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 666             phis.clear();
 667             visited.Reset();
 668           }
 669         }
 670       }
 671     }
 672     for( uint i = 0; i < n->len(); ++i ) {
 673       Node *m = n->in(i);
 674       if (m == NULL) continue;
 675 
 676       // In most cases, inputs should be known to be non null. If it's
 677       // not the case, it could be a missing cast_not_null() in an
 678       // intrinsic or support might be needed in AddPNode::Ideal() to
 679       // avoid a NULL+offset input.
 680       if (!(n->is_Phi() ||
 681             (n->is_SafePoint() && (!n->is_CallRuntime() || !strcmp(n->as_Call()->_name, "shenandoah_wb_pre") || !strcmp(n->as_Call()->_name, "unsafe_arraycopy"))) ||
 682             n->Opcode() == Op_CmpP ||
 683             n->Opcode() == Op_CmpN ||
 684             (n->Opcode() == Op_StoreP && i == StoreNode::ValueIn) ||
 685             (n->Opcode() == Op_StoreN && i == StoreNode::ValueIn) ||
 686             n->is_ConstraintCast() ||
 687             n->Opcode() == Op_Return ||
 688             n->Opcode() == Op_Conv2B ||
 689             n->is_AddP() ||
 690             n->Opcode() == Op_CMoveP ||
 691             n->Opcode() == Op_CMoveN ||
 692             n->Opcode() == Op_Rethrow ||
 693             n->is_MemBar() ||
 694             n->is_Mem() ||
 695             n->Opcode() == Op_AryEq ||
 696             n->Opcode() == Op_SCMemProj ||
 697             n->Opcode() == Op_EncodeP ||
 698             n->Opcode() == Op_DecodeN ||
 699             n->Opcode() == Op_ShenandoahEnqueueBarrier ||
 700             n->Opcode() == Op_ShenandoahLoadReferenceBarrier)) {
 701         if (m->bottom_type()->make_oopptr() && m->bottom_type()->make_oopptr()->meet(TypePtr::NULL_PTR) == m->bottom_type()) {
 702           report_verify_failure("Shenandoah verification: null input", n, m);
 703         }
 704       }
 705 
 706       wq.push(m);
 707     }
 708   }
 709 
 710   if (verify_no_useless_barrier) {
 711     for (int i = 0; i < barriers.length(); i++) {
 712       Node* n = barriers.at(i);
 713       if (!barriers_used.member(n)) {
 714         tty->print("XXX useless barrier"); n->dump(-2);
 715         ShouldNotReachHere();
 716       }
 717     }
 718   }
 719 }
 720 #endif
 721 
 722 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 723   // That both nodes have the same control is not sufficient to prove
 724   // domination, verify that there's no path from d to n
 725   ResourceMark rm;
 726   Unique_Node_List wq;
 727   wq.push(d);
 728   for (uint next = 0; next < wq.size(); next++) {
 729     Node *m = wq.at(next);
 730     if (m == n) {
 731       return false;
 732     }
 733     if (m->is_Phi() && m->in(0)->is_Loop()) {
 734       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 735     } else {
 736       for (uint i = 0; i < m->req(); i++) {
 737         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 738           wq.push(m->in(i));
 739         }
 740       }
 741     }
 742   }
 743   return true;
 744 }
 745 
 746 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 747   if (d_c != n_c) {
 748     return phase->is_dominator(d_c, n_c);
 749   }
 750   return is_dominator_same_ctrl(d_c, d, n, phase);
 751 }
 752 
 753 Node* next_mem(Node* mem, int alias) {
 754   Node* res = NULL;
 755   if (mem->is_Proj()) {
 756     res = mem->in(0);
 757   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 758     res = mem->in(TypeFunc::Memory);
 759   } else if (mem->is_Phi()) {
 760     res = mem->in(1);
 761   } else if (mem->is_MergeMem()) {
 762     res = mem->as_MergeMem()->memory_at(alias);
 763   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 764     assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 765     res = mem->in(MemNode::Memory);
 766   } else {
 767 #ifdef ASSERT
 768     mem->dump();
 769 #endif
 770     ShouldNotReachHere();
 771   }
 772   return res;
 773 }
 774 
 775 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 776   Node* iffproj = NULL;
 777   while (c != dom) {
 778     Node* next = phase->idom(c);
 779     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 780     if (c->is_Region()) {
 781       ResourceMark rm;
 782       Unique_Node_List wq;
 783       wq.push(c);
 784       for (uint i = 0; i < wq.size(); i++) {
 785         Node *n = wq.at(i);
 786         if (n == next) {
 787           continue;
 788         }
 789         if (n->is_Region()) {
 790           for (uint j = 1; j < n->req(); j++) {
 791             wq.push(n->in(j));
 792           }
 793         } else {
 794           wq.push(n->in(0));
 795         }
 796       }
 797       for (uint i = 0; i < wq.size(); i++) {
 798         Node *n = wq.at(i);
 799         assert(n->is_CFG(), "");
 800         if (n->is_Multi()) {
 801           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 802             Node* u = n->fast_out(j);
 803             if (u->is_CFG()) {
 804               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 805                 return NodeSentinel;
 806               }
 807             }
 808           }
 809         }
 810       }
 811     } else  if (c->is_Proj()) {
 812       if (c->is_IfProj()) {
 813         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 814           // continue;
 815         } else {
 816           if (!allow_one_proj) {
 817             return NodeSentinel;
 818           }
 819           if (iffproj == NULL) {
 820             iffproj = c;
 821           } else {
 822             return NodeSentinel;
 823           }
 824         }
 825       } else if (c->Opcode() == Op_JumpProj) {
 826         return NodeSentinel; // unsupported
 827       } else if (c->Opcode() == Op_CatchProj) {
 828         return NodeSentinel; // unsupported
 829       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 830         return NodeSentinel; // unsupported
 831       } else {
 832         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 833       }
 834     }
 835     c = next;
 836   }
 837   return iffproj;
 838 }
 839 
 840 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 841   ResourceMark rm;
 842   VectorSet wq(Thread::current()->resource_area());
 843   wq.set(mem->_idx);
 844   mem_ctrl = phase->ctrl_or_self(mem);
 845   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 846     mem = next_mem(mem, alias);
 847     if (wq.test_set(mem->_idx)) {
 848       return NULL;
 849     }
 850     mem_ctrl = phase->ctrl_or_self(mem);
 851   }
 852   if (mem->is_MergeMem()) {
 853     mem = mem->as_MergeMem()->memory_at(alias);
 854     mem_ctrl = phase->ctrl_or_self(mem);
 855   }
 856   return mem;
 857 }
 858 
 859 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 860   Node* mem = NULL;
 861   Node* c = ctrl;
 862   do {
 863     if (c->is_Region()) {
 864       Node* phi_bottom = NULL;
 865       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 866         Node* u = c->fast_out(i);
 867         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 868           if (u->adr_type() == TypePtr::BOTTOM) {
 869             mem = u;
 870           }
 871         }
 872       }
 873     } else {
 874       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 875         CallProjections projs;
 876         c->as_Call()->extract_projections(&projs, true, false);
 877         if (projs.fallthrough_memproj != NULL) {
 878           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 879             if (projs.catchall_memproj == NULL) {
 880               mem = projs.fallthrough_memproj;
 881             } else {
 882               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 883                 mem = projs.fallthrough_memproj;
 884               } else {
 885                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 886                 mem = projs.catchall_memproj;
 887               }
 888             }
 889           }
 890         } else {
 891           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 892           if (proj != NULL &&
 893               proj->adr_type() == TypePtr::BOTTOM) {
 894             mem = proj;
 895           }
 896         }
 897       } else {
 898         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 899           Node* u = c->fast_out(i);
 900           if (u->is_Proj() &&
 901               u->bottom_type() == Type::MEMORY &&
 902               u->adr_type() == TypePtr::BOTTOM) {
 903               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 904               assert(mem == NULL, "only one proj");
 905               mem = u;
 906           }
 907         }
 908         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 909       }
 910     }
 911     c = phase->idom(c);
 912   } while (mem == NULL);
 913   return mem;
 914 }
 915 
 916 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 917   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 918     Node* u = n->fast_out(i);
 919     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 920       uses.push(u);
 921     }
 922   }
 923 }
 924 
 925 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 926   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 927   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 928   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 929   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 930   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 931   phase->lazy_replace(outer, new_outer);
 932   phase->lazy_replace(le, new_le);
 933   inner->clear_strip_mined();
 934 }
 935 
 936 void ShenandoahBarrierC2Support::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
 937                                                   PhaseIdealLoop* phase) {
 938   IdealLoopTree* loop = phase->get_loop(ctrl);
 939   Node* thread = new ThreadLocalNode();
 940   phase->register_new_node(thread, ctrl);
 941   Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 942   phase->set_ctrl(offset, phase->C->root());
 943   Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset);
 944   phase->register_new_node(gc_state_addr, ctrl);
 945   uint gc_state_idx = Compile::AliasIdxRaw;
 946   const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
 947   debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
 948 
 949   Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered);
 950   phase->register_new_node(gc_state, ctrl);
 951   Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED));
 952   phase->register_new_node(heap_stable_and, ctrl);
 953   Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT));
 954   phase->register_new_node(heap_stable_cmp, ctrl);
 955   Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne);
 956   phase->register_new_node(heap_stable_test, ctrl);
 957   IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 958   phase->register_control(heap_stable_iff, loop, ctrl);
 959 
 960   heap_stable_ctrl = new IfFalseNode(heap_stable_iff);
 961   phase->register_control(heap_stable_ctrl, loop, heap_stable_iff);
 962   ctrl = new IfTrueNode(heap_stable_iff);
 963   phase->register_control(ctrl, loop, heap_stable_iff);
 964 
 965   assert(is_heap_stable_test(heap_stable_iff), "Should match the shape");
 966 }
 967 
 968 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 969   const Type* val_t = phase->igvn().type(val);
 970   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 971     IdealLoopTree* loop = phase->get_loop(ctrl);
 972     Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT));
 973     phase->register_new_node(null_cmp, ctrl);
 974     Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
 975     phase->register_new_node(null_test, ctrl);
 976     IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 977     phase->register_control(null_iff, loop, ctrl);
 978     ctrl = new IfTrueNode(null_iff);
 979     phase->register_control(ctrl, loop, null_iff);
 980     null_ctrl = new IfFalseNode(null_iff);
 981     phase->register_control(null_ctrl, loop, null_iff);
 982   }
 983 }
 984 
 985 Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
 986   IdealLoopTree *loop = phase->get_loop(c);
 987   Node* iff = unc_ctrl->in(0);
 988   assert(iff->is_If(), "broken");
 989   Node* new_iff = iff->clone();
 990   new_iff->set_req(0, c);
 991   phase->register_control(new_iff, loop, c);
 992   Node* iffalse = new IfFalseNode(new_iff->as_If());
 993   phase->register_control(iffalse, loop, new_iff);
 994   Node* iftrue = new IfTrueNode(new_iff->as_If());
 995   phase->register_control(iftrue, loop, new_iff);
 996   c = iftrue;
 997   const Type *t = phase->igvn().type(val);
 998   assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
 999   Node* uncasted_val = val->in(1);
1000   val = new CastPPNode(uncasted_val, t);
1001   val->init_req(0, c);
1002   phase->register_new_node(val, c);
1003   return val;
1004 }
1005 
1006 void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
1007                                                 Unique_Node_List& uses, PhaseIdealLoop* phase) {
1008   IfNode* iff = unc_ctrl->in(0)->as_If();
1009   Node* proj = iff->proj_out(0);
1010   assert(proj != unc_ctrl, "bad projection");
1011   Node* use = proj->unique_ctrl_out();
1012 
1013   assert(use == unc || use->is_Region(), "what else?");
1014 
1015   uses.clear();
1016   if (use == unc) {
1017     phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
1018     for (uint i = 1; i < unc->req(); i++) {
1019       Node* n = unc->in(i);
1020       if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
1021         uses.push(n);
1022       }
1023     }
1024   } else {
1025     assert(use->is_Region(), "what else?");
1026     uint idx = 1;
1027     for (; use->in(idx) != proj; idx++);
1028     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1029       Node* u = use->fast_out(i);
1030       if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
1031         uses.push(u->in(idx));
1032       }
1033     }
1034   }
1035   for(uint next = 0; next < uses.size(); next++ ) {
1036     Node *n = uses.at(next);
1037     assert(phase->get_ctrl(n) == proj, "bad control");
1038     phase->set_ctrl_and_loop(n, new_unc_ctrl);
1039     if (n->in(0) == proj) {
1040       phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
1041     }
1042     for (uint i = 0; i < n->req(); i++) {
1043       Node* m = n->in(i);
1044       if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
1045         uses.push(m);
1046       }
1047     }
1048   }
1049 
1050   phase->igvn().rehash_node_delayed(use);
1051   int nb = use->replace_edge(proj, new_unc_ctrl);
1052   assert(nb == 1, "only use expected");
1053 }
1054 
1055 void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
1056   IdealLoopTree *loop = phase->get_loop(ctrl);
1057   Node* raw_rbtrue = new CastP2XNode(ctrl, val);
1058   phase->register_new_node(raw_rbtrue, ctrl);
1059   Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
1060   phase->register_new_node(cset_offset, ctrl);
1061   Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
1062   phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root());
1063   Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset);
1064   phase->register_new_node(in_cset_fast_test_adr, ctrl);
1065   uint in_cset_fast_test_idx = Compile::AliasIdxRaw;
1066   const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument
1067   debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx));
1068   Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered);
1069   phase->register_new_node(in_cset_fast_test_load, ctrl);
1070   Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT));
1071   phase->register_new_node(in_cset_fast_test_cmp, ctrl);
1072   Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq);
1073   phase->register_new_node(in_cset_fast_test_test, ctrl);
1074   IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
1075   phase->register_control(in_cset_fast_test_iff, loop, ctrl);
1076 
1077   not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff);
1078   phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff);
1079 
1080   ctrl = new IfFalseNode(in_cset_fast_test_iff);
1081   phase->register_control(ctrl, loop, in_cset_fast_test_iff);
1082 }
1083 
1084 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node*& result_mem, Node* raw_mem, PhaseIdealLoop* phase) {
1085   IdealLoopTree*loop = phase->get_loop(ctrl);
1086   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr()->cast_to_nonconst();
1087 
1088   // The slow path stub consumes and produces raw memory in addition
1089   // to the existing memory edges
1090   Node* base = find_bottom_mem(ctrl, phase);
1091   MergeMemNode* mm = MergeMemNode::make(base);
1092   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1093   phase->register_new_node(mm, ctrl);
1094 
1095   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_write_barrier_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT), "shenandoah_write_barrier", TypeRawPtr::BOTTOM);
1096   call->init_req(TypeFunc::Control, ctrl);
1097   call->init_req(TypeFunc::I_O, phase->C->top());
1098   call->init_req(TypeFunc::Memory, mm);
1099   call->init_req(TypeFunc::FramePtr, phase->C->top());
1100   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1101   call->init_req(TypeFunc::Parms, val);
1102   phase->register_control(call, loop, ctrl);
1103   ctrl = new ProjNode(call, TypeFunc::Control);
1104   phase->register_control(ctrl, loop, call);
1105   result_mem = new ProjNode(call, TypeFunc::Memory);
1106   phase->register_new_node(result_mem, call);
1107   val = new ProjNode(call, TypeFunc::Parms);
1108   phase->register_new_node(val, call);
1109   val = new CheckCastPPNode(ctrl, val, obj_type);
1110   phase->register_new_node(val, ctrl);
1111 }
1112 
1113 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1114   Node* ctrl = phase->get_ctrl(barrier);
1115   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1116 
1117   // Update the control of all nodes that should be after the
1118   // barrier control flow
1119   uses.clear();
1120   // Every node that is control dependent on the barrier's input
1121   // control will be after the expanded barrier. The raw memory (if
1122   // its memory is control dependent on the barrier's input control)
1123   // must stay above the barrier.
1124   uses_to_ignore.clear();
1125   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1126     uses_to_ignore.push(init_raw_mem);
1127   }
1128   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1129     Node *n = uses_to_ignore.at(next);
1130     for (uint i = 0; i < n->req(); i++) {
1131       Node* in = n->in(i);
1132       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1133         uses_to_ignore.push(in);
1134       }
1135     }
1136   }
1137   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1138     Node* u = ctrl->fast_out(i);
1139     if (u->_idx < last &&
1140         u != barrier &&
1141         !uses_to_ignore.member(u) &&
1142         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1143         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1144       Node* old_c = phase->ctrl_or_self(u);
1145       Node* c = old_c;
1146       if (c != ctrl ||
1147           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1148           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1149         phase->igvn().rehash_node_delayed(u);
1150         int nb = u->replace_edge(ctrl, region);
1151         if (u->is_CFG()) {
1152           if (phase->idom(u) == ctrl) {
1153             phase->set_idom(u, region, phase->dom_depth(region));
1154           }
1155         } else if (phase->get_ctrl(u) == ctrl) {
1156           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1157           uses.push(u);
1158         }
1159         assert(nb == 1, "more than 1 ctrl input?");
1160         --i, imax -= nb;
1161       }
1162     }
1163   }
1164 }
1165 
1166 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1167   Node* region = NULL;
1168   while (c != ctrl) {
1169     if (c->is_Region()) {
1170       region = c;
1171     }
1172     c = phase->idom(c);
1173   }
1174   assert(region != NULL, "");
1175   Node* phi = new PhiNode(region, n->bottom_type());
1176   for (uint j = 1; j < region->req(); j++) {
1177     Node* in = region->in(j);
1178     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1179       phi->init_req(j, n);
1180     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1181       phi->init_req(j, n_clone);
1182     } else {
1183       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1184     }
1185   }
1186   phase->register_new_node(phi, region);
1187   return phi;
1188 }
1189 
1190 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1191   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1192 
1193   // Collect raw memory state at CFG points in the entire graph and
1194   // record it in memory_nodes. Optimize the raw memory graph in the
1195   // process. Optimizing the memory graph also makes the memory graph
1196   // simpler.
1197   GrowableArray<MemoryGraphFixer*> memory_graph_fixers;
1198 
1199   Unique_Node_List uses;
1200   for (int i = 0; i < state->enqueue_barriers_count(); i++) {
1201     Node* barrier = state->enqueue_barrier(i);
1202     Node* ctrl = phase->get_ctrl(barrier);
1203     IdealLoopTree* loop = phase->get_loop(ctrl);
1204     if (loop->_head->is_OuterStripMinedLoop()) {
1205       // Expanding a barrier here will break loop strip mining
1206       // verification. Transform the loop so the loop nest doesn't
1207       // appear as strip mined.
1208       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1209       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1210     }
1211   }
1212 
1213   Node_Stack stack(0);
1214   Node_List clones;
1215   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1216     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1217     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1218       continue;
1219     }
1220 
1221     Node* ctrl = phase->get_ctrl(lrb);
1222     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1223 
1224     CallStaticJavaNode* unc = NULL;
1225     Node* unc_ctrl = NULL;
1226     Node* uncasted_val = val;
1227 
1228     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1229       Node* u = lrb->fast_out(i);
1230       if (u->Opcode() == Op_CastPP &&
1231           u->in(0) != NULL &&
1232           phase->is_dominator(u->in(0), ctrl)) {
1233         const Type* u_t = phase->igvn().type(u);
1234 
1235         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1236             u->in(0)->Opcode() == Op_IfTrue &&
1237             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1238             u->in(0)->in(0)->is_If() &&
1239             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1240             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1241             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1242             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1243             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1244           IdealLoopTree* loop = phase->get_loop(ctrl);
1245           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1246 
1247           if (!unc_loop->is_member(loop)) {
1248             continue;
1249           }
1250 
1251           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1252           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1253           if (branch == NodeSentinel) {
1254             continue;
1255           }
1256 
1257           phase->igvn().replace_input_of(u, 1, val);
1258           phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
1259           phase->set_ctrl(u, u->in(0));
1260           phase->set_ctrl(lrb, u->in(0));
1261           unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1262           unc_ctrl = u->in(0);
1263           val = u;
1264 
1265           for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
1266             Node* u = val->fast_out(j);
1267             if (u == lrb) continue;
1268             phase->igvn().rehash_node_delayed(u);
1269             int nb = u->replace_edge(val, lrb);
1270             --j; jmax -= nb;
1271           }
1272 
1273           RegionNode* r = new RegionNode(3);
1274           IfNode* iff = unc_ctrl->in(0)->as_If();
1275 
1276           Node* ctrl_use = unc_ctrl->unique_ctrl_out();
1277           Node* unc_ctrl_clone = unc_ctrl->clone();
1278           phase->register_control(unc_ctrl_clone, loop, iff);
1279           Node* c = unc_ctrl_clone;
1280           Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
1281           r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
1282 
1283           phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
1284           phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
1285           phase->lazy_replace(c, unc_ctrl);
1286           c = NULL;;
1287           phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
1288           phase->set_ctrl(val, unc_ctrl_clone);
1289 
1290           IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
1291           fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
1292           Node* iff_proj = iff->proj_out(0);
1293           r->init_req(2, iff_proj);
1294           phase->register_control(r, phase->ltree_root(), iff);
1295 
1296           Node* new_bol = new_iff->in(1)->clone();
1297           Node* new_cmp = new_bol->in(1)->clone();
1298           assert(new_cmp->Opcode() == Op_CmpP, "broken");
1299           assert(new_cmp->in(1) == val->in(1), "broken");
1300           new_bol->set_req(1, new_cmp);
1301           new_cmp->set_req(1, lrb);
1302           phase->register_new_node(new_bol, new_iff->in(0));
1303           phase->register_new_node(new_cmp, new_iff->in(0));
1304           phase->igvn().replace_input_of(new_iff, 1, new_bol);
1305           phase->igvn().replace_input_of(new_cast, 1, lrb);
1306 
1307           for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1308             Node* u = lrb->fast_out(i);
1309             if (u == new_cast || u == new_cmp) {
1310               continue;
1311             }
1312             phase->igvn().rehash_node_delayed(u);
1313             int nb = u->replace_edge(lrb, new_cast);
1314             assert(nb > 0, "no update?");
1315             --i; imax -= nb;
1316           }
1317 
1318           for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1319             Node* u = val->fast_out(i);
1320             if (u == lrb) {
1321               continue;
1322             }
1323             phase->igvn().rehash_node_delayed(u);
1324             int nb = u->replace_edge(val, new_cast);
1325             assert(nb > 0, "no update?");
1326             --i; imax -= nb;
1327           }
1328 
1329           ctrl = unc_ctrl_clone;
1330           phase->set_ctrl_and_loop(lrb, ctrl);
1331           break;
1332         }
1333       }
1334     }
1335     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1336       CallNode* call = ctrl->in(0)->as_CallJava();
1337       CallProjections projs;
1338       call->extract_projections(&projs, false, false);
1339 
1340       Node* lrb_clone = lrb->clone();
1341       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1342       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1343 
1344       stack.push(lrb, 0);
1345       clones.push(lrb_clone);
1346 
1347       do {
1348         assert(stack.size() == clones.size(), "");
1349         Node* n = stack.node();
1350 #ifdef ASSERT
1351         if (n->is_Load()) {
1352           Node* mem = n->in(MemNode::Memory);
1353           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1354             Node* u = mem->fast_out(j);
1355             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1356           }
1357         }
1358 #endif
1359         uint idx = stack.index();
1360         Node* n_clone = clones.at(clones.size()-1);
1361         if (idx < n->outcnt()) {
1362           Node* u = n->raw_out(idx);
1363           Node* c = phase->ctrl_or_self(u);
1364           if (c == ctrl) {
1365             stack.set_index(idx+1);
1366             assert(!u->is_CFG(), "");
1367             stack.push(u, 0);
1368             Node* u_clone = u->clone();
1369             int nb = u_clone->replace_edge(n, n_clone);
1370             assert(nb > 0, "should have replaced some uses");
1371             phase->register_new_node(u_clone, projs.catchall_catchproj);
1372             clones.push(u_clone);
1373             phase->set_ctrl(u, projs.fallthrough_catchproj);
1374           } else {
1375             bool replaced = false;
1376             if (u->is_Phi()) {
1377               for (uint k = 1; k < u->req(); k++) {
1378                 if (u->in(k) == n) {
1379                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1380                     phase->igvn().replace_input_of(u, k, n_clone);
1381                     replaced = true;
1382                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1383                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1384                     replaced = true;
1385                   }
1386                 }
1387               }
1388             } else {
1389               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1390                 phase->igvn().rehash_node_delayed(u);
1391                 int nb = u->replace_edge(n, n_clone);
1392                 assert(nb > 0, "should have replaced some uses");
1393                 replaced = true;
1394               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1395                 phase->igvn().rehash_node_delayed(u);
1396                 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1397                 assert(nb > 0, "should have replaced some uses");
1398                 replaced = true;
1399               }
1400             }
1401             if (!replaced) {
1402               stack.set_index(idx+1);
1403             }
1404           }
1405         } else {
1406           // assert(n_clone->outcnt() > 0, "");
1407           // assert(n->outcnt() > 0, "");
1408           stack.pop();
1409           clones.pop();
1410         }
1411       } while (stack.size() > 0);
1412       assert(stack.size() == 0 && clones.size() == 0, "");
1413       ctrl = projs.fallthrough_catchproj;
1414     }
1415   }
1416 
1417   // Expand load-reference-barriers
1418   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1419   Unique_Node_List uses_to_ignore;
1420   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1421     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1422     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1423       phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1424       continue;
1425     }
1426     uint last = phase->C->unique();
1427     Node* ctrl = phase->get_ctrl(lrb);
1428     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1429 
1430 
1431     Node* orig_ctrl = ctrl;
1432 
1433     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1434     Node* init_raw_mem = raw_mem;
1435     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1436     // int alias = phase->C->get_alias_index(lrb->adr_type());
1437 
1438     IdealLoopTree *loop = phase->get_loop(ctrl);
1439     CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
1440     Node* unc_ctrl = NULL;
1441     if (unc != NULL) {
1442       if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
1443         unc = NULL;
1444       } else {
1445         unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
1446       }
1447     }
1448 
1449     Node* uncasted_val = val;
1450     if (unc != NULL) {
1451       uncasted_val = val->in(1);
1452     }
1453 
1454     Node* heap_stable_ctrl = NULL;
1455     Node* null_ctrl = NULL;
1456 
1457     assert(val->bottom_type()->make_oopptr(), "need oop");
1458     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1459 
1460     enum { _heap_stable = 1, _not_cset, _fwded, _evac_path, _null_path, PATH_LIMIT };
1461     Node* region = new RegionNode(PATH_LIMIT);
1462     Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
1463     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1464 
1465     // Stable path.
1466     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1467     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1468 
1469     // Heap stable case
1470     region->init_req(_heap_stable, heap_stable_ctrl);
1471     val_phi->init_req(_heap_stable, uncasted_val);
1472     raw_mem_phi->init_req(_heap_stable, raw_mem);
1473 
1474     Node* reg2_ctrl = NULL;
1475     // Null case
1476     test_null(ctrl, val, null_ctrl, phase);
1477     if (null_ctrl != NULL) {
1478       reg2_ctrl = null_ctrl->in(0);
1479       region->init_req(_null_path, null_ctrl);
1480       val_phi->init_req(_null_path, uncasted_val);
1481       raw_mem_phi->init_req(_null_path, raw_mem);
1482     } else {
1483       region->del_req(_null_path);
1484       val_phi->del_req(_null_path);
1485       raw_mem_phi->del_req(_null_path);
1486     }
1487 
1488     // Test for in-cset.
1489     // Wires !in_cset(obj) to slot 2 of region and phis
1490     Node* not_cset_ctrl = NULL;
1491     in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
1492     if (not_cset_ctrl != NULL) {
1493       if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
1494       region->init_req(_not_cset, not_cset_ctrl);
1495       val_phi->init_req(_not_cset, uncasted_val);
1496       raw_mem_phi->init_req(_not_cset, raw_mem);
1497     }
1498 
1499     // Resolve object when orig-value is in cset.
1500     // Make the unconditional resolve for fwdptr.
1501     Node* new_val = uncasted_val;
1502     if (unc_ctrl != NULL) {
1503       // Clone the null check in this branch to allow implicit null check
1504       new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
1505       fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
1506 
1507       IfNode* iff = unc_ctrl->in(0)->as_If();
1508       phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
1509     }
1510     Node* addr = new AddPNode(new_val, new_val, phase->igvn().MakeConX(oopDesc::mark_offset_in_bytes()));
1511     phase->register_new_node(addr, ctrl);
1512     assert(new_val->bottom_type()->isa_oopptr(), "what else?");
1513     Node* markword = new LoadXNode(ctrl, raw_mem, addr, TypeRawPtr::BOTTOM, TypeX_X, MemNode::unordered);
1514     phase->register_new_node(markword, ctrl);
1515 
1516     // Test if object is forwarded. This is the case if lowest two bits are set.
1517     Node* masked = new AndXNode(markword, phase->igvn().MakeConX(markOopDesc::lock_mask_in_place));
1518     phase->register_new_node(masked, ctrl);
1519     Node* cmp = new CmpXNode(masked, phase->igvn().MakeConX(markOopDesc::marked_value));
1520     phase->register_new_node(cmp, ctrl);
1521 
1522     // Only branch to LRB stub if object is not forwarded; otherwise reply with fwd ptr
1523     Node* bol = new BoolNode(cmp, BoolTest::eq); // Equals 3 means it's forwarded
1524     phase->register_new_node(bol, ctrl);
1525 
1526     IfNode* iff = new IfNode(ctrl, bol, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
1527     phase->register_control(iff, loop, ctrl);
1528     Node* if_fwd = new IfTrueNode(iff);
1529     phase->register_control(if_fwd, loop, iff);
1530     Node* if_not_fwd = new IfFalseNode(iff);
1531     phase->register_control(if_not_fwd, loop, iff);
1532 
1533     // Decode forward pointer.
1534     Node* masked2 = new AndXNode(markword, phase->igvn().MakeConX(~markOopDesc::lock_mask_in_place));
1535     phase->register_new_node(masked2, if_fwd);
1536     Node* fwdraw = new CastX2PNode(masked2);
1537     fwdraw->init_req(0, if_fwd);
1538     phase->register_new_node(fwdraw, if_fwd);
1539     Node* fwd = new CheckCastPPNode(NULL, fwdraw, val->bottom_type());
1540     phase->register_new_node(fwd, if_fwd);
1541 
1542     // Wire up not-equal-path in slots 3.
1543     region->init_req(_fwded, if_fwd);
1544     val_phi->init_req(_fwded, fwd);
1545     raw_mem_phi->init_req(_fwded, raw_mem);
1546 
1547     // Call wb-stub and wire up that path in slots 4
1548     Node* result_mem = NULL;
1549     ctrl = if_not_fwd;
1550     fwd = new_val;
1551     call_lrb_stub(ctrl, fwd, result_mem, raw_mem, phase);
1552     region->init_req(_evac_path, ctrl);
1553     val_phi->init_req(_evac_path, fwd);
1554     raw_mem_phi->init_req(_evac_path, result_mem);
1555 
1556     phase->register_control(region, loop, heap_stable_iff);
1557     Node* out_val = val_phi;
1558     phase->register_new_node(val_phi, region);
1559     phase->register_new_node(raw_mem_phi, region);
1560 
1561     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1562 
1563     ctrl = orig_ctrl;
1564 
1565     if (unc != NULL) {
1566       for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1567         Node* u = val->fast_out(i);
1568         Node* c = phase->ctrl_or_self(u);
1569         if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
1570           phase->igvn().rehash_node_delayed(u);
1571           int nb = u->replace_edge(val, out_val);
1572           --i, imax -= nb;
1573         }
1574       }
1575       if (val->outcnt() == 0) {
1576         phase->igvn()._worklist.push(val);
1577       }
1578     }
1579     phase->igvn().replace_node(lrb, out_val);
1580 
1581     follow_barrier_uses(out_val, ctrl, uses, phase);
1582 
1583     for(uint next = 0; next < uses.size(); next++ ) {
1584       Node *n = uses.at(next);
1585       assert(phase->get_ctrl(n) == ctrl, "bad control");
1586       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1587       phase->set_ctrl(n, region);
1588       follow_barrier_uses(n, ctrl, uses, phase);
1589     }
1590 
1591     // The slow path call produces memory: hook the raw memory phi
1592     // from the expanded load reference barrier with the rest of the graph
1593     // which may require adding memory phis at every post dominated
1594     // region and at enclosing loop heads. Use the memory state
1595     // collected in memory_nodes to fix the memory graph. Update that
1596     // memory state as we go.
1597     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1598   }
1599   // Done expanding load-reference-barriers.
1600   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1601 
1602   for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
1603     Node* barrier = state->enqueue_barrier(i);
1604     Node* pre_val = barrier->in(1);
1605 
1606     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1607       ShouldNotReachHere();
1608       continue;
1609     }
1610 
1611     Node* ctrl = phase->get_ctrl(barrier);
1612 
1613     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1614       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1615       ctrl = ctrl->in(0)->in(0);
1616       phase->set_ctrl(barrier, ctrl);
1617     } else if (ctrl->is_CallRuntime()) {
1618       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1619       ctrl = ctrl->in(0);
1620       phase->set_ctrl(barrier, ctrl);
1621     }
1622 
1623     Node* init_ctrl = ctrl;
1624     IdealLoopTree* loop = phase->get_loop(ctrl);
1625     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1626     Node* init_raw_mem = raw_mem;
1627     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1628     Node* heap_stable_ctrl = NULL;
1629     Node* null_ctrl = NULL;
1630     uint last = phase->C->unique();
1631 
1632     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1633     Node* region = new RegionNode(PATH_LIMIT);
1634     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1635 
1636     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1637     Node* region2 = new RegionNode(PATH_LIMIT2);
1638     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1639 
1640     // Stable path.
1641     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1642     region->init_req(_heap_stable, heap_stable_ctrl);
1643     phi->init_req(_heap_stable, raw_mem);
1644 
1645     // Null path
1646     Node* reg2_ctrl = NULL;
1647     test_null(ctrl, pre_val, null_ctrl, phase);
1648     if (null_ctrl != NULL) {
1649       reg2_ctrl = null_ctrl->in(0);
1650       region2->init_req(_null_path, null_ctrl);
1651       phi2->init_req(_null_path, raw_mem);
1652     } else {
1653       region2->del_req(_null_path);
1654       phi2->del_req(_null_path);
1655     }
1656 
1657     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1658     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1659     Node* thread = new ThreadLocalNode();
1660     phase->register_new_node(thread, ctrl);
1661     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1662     phase->register_new_node(buffer_adr, ctrl);
1663     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1664     phase->register_new_node(index_adr, ctrl);
1665 
1666     BasicType index_bt = TypeX_X->basic_type();
1667     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
1668     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1669     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1670     phase->register_new_node(index, ctrl);
1671     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1672     phase->register_new_node(index_cmp, ctrl);
1673     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1674     phase->register_new_node(index_test, ctrl);
1675     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1676     if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1677     phase->register_control(queue_full_iff, loop, ctrl);
1678     Node* not_full = new IfTrueNode(queue_full_iff);
1679     phase->register_control(not_full, loop, queue_full_iff);
1680     Node* full = new IfFalseNode(queue_full_iff);
1681     phase->register_control(full, loop, queue_full_iff);
1682 
1683     ctrl = not_full;
1684 
1685     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1686     phase->register_new_node(next_index, ctrl);
1687 
1688     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1689     phase->register_new_node(buffer, ctrl);
1690     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1691     phase->register_new_node(log_addr, ctrl);
1692     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1693     phase->register_new_node(log_store, ctrl);
1694     // update the index
1695     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1696     phase->register_new_node(index_update, ctrl);
1697 
1698     // Fast-path case
1699     region2->init_req(_fast_path, ctrl);
1700     phi2->init_req(_fast_path, index_update);
1701 
1702     ctrl = full;
1703 
1704     Node* base = find_bottom_mem(ctrl, phase);
1705 
1706     MergeMemNode* mm = MergeMemNode::make(base);
1707     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1708     phase->register_new_node(mm, ctrl);
1709 
1710     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1711     call->init_req(TypeFunc::Control, ctrl);
1712     call->init_req(TypeFunc::I_O, phase->C->top());
1713     call->init_req(TypeFunc::Memory, mm);
1714     call->init_req(TypeFunc::FramePtr, phase->C->top());
1715     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1716     call->init_req(TypeFunc::Parms, pre_val);
1717     call->init_req(TypeFunc::Parms+1, thread);
1718     phase->register_control(call, loop, ctrl);
1719 
1720     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1721     phase->register_control(ctrl_proj, loop, call);
1722     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1723     phase->register_new_node(mem_proj, call);
1724 
1725     // Slow-path case
1726     region2->init_req(_slow_path, ctrl_proj);
1727     phi2->init_req(_slow_path, mem_proj);
1728 
1729     phase->register_control(region2, loop, reg2_ctrl);
1730     phase->register_new_node(phi2, region2);
1731 
1732     region->init_req(_heap_unstable, region2);
1733     phi->init_req(_heap_unstable, phi2);
1734 
1735     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1736     phase->register_new_node(phi, region);
1737 
1738     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1739     for(uint next = 0; next < uses.size(); next++ ) {
1740       Node *n = uses.at(next);
1741       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1742       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1743       phase->set_ctrl(n, region);
1744       follow_barrier_uses(n, init_ctrl, uses, phase);
1745     }
1746     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1747 
1748     phase->igvn().replace_node(barrier, pre_val);
1749   }
1750   assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1751 
1752 }
1753 
1754 void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1755   IdealLoopTree *loop = phase->get_loop(iff);
1756   Node* loop_head = loop->_head;
1757   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1758 
1759   Node* bol = iff->in(1);
1760   Node* cmp = bol->in(1);
1761   Node* andi = cmp->in(1);
1762   Node* load = andi->in(1);
1763 
1764   assert(is_gc_state_load(load), "broken");
1765   if (!phase->is_dominator(load->in(0), entry_c)) {
1766     Node* mem_ctrl = NULL;
1767     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1768     load = load->clone();
1769     load->set_req(MemNode::Memory, mem);
1770     load->set_req(0, entry_c);
1771     phase->register_new_node(load, entry_c);
1772     andi = andi->clone();
1773     andi->set_req(1, load);
1774     phase->register_new_node(andi, entry_c);
1775     cmp = cmp->clone();
1776     cmp->set_req(1, andi);
1777     phase->register_new_node(cmp, entry_c);
1778     bol = bol->clone();
1779     bol->set_req(1, cmp);
1780     phase->register_new_node(bol, entry_c);
1781 
1782     Node* old_bol =iff->in(1);
1783     phase->igvn().replace_input_of(iff, 1, bol);
1784   }
1785 }
1786 
1787 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1788   if (!n->is_If() || n->is_CountedLoopEnd()) {
1789     return false;
1790   }
1791   Node* region = n->in(0);
1792 
1793   if (!region->is_Region()) {
1794     return false;
1795   }
1796   Node* dom = phase->idom(region);
1797   if (!dom->is_If()) {
1798     return false;
1799   }
1800 
1801   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1802     return false;
1803   }
1804 
1805   IfNode* dom_if = dom->as_If();
1806   Node* proj_true = dom_if->proj_out(1);
1807   Node* proj_false = dom_if->proj_out(0);
1808 
1809   for (uint i = 1; i < region->req(); i++) {
1810     if (phase->is_dominator(proj_true, region->in(i))) {
1811       continue;
1812     }
1813     if (phase->is_dominator(proj_false, region->in(i))) {
1814       continue;
1815     }
1816     return false;
1817   }
1818 
1819   return true;
1820 }
1821 
1822 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1823   assert(is_heap_stable_test(n), "no other tests");
1824   if (identical_backtoback_ifs(n, phase)) {
1825     Node* n_ctrl = n->in(0);
1826     if (phase->can_split_if(n_ctrl)) {
1827       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1828       if (is_heap_stable_test(n)) {
1829         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1830         assert(is_gc_state_load(gc_state_load), "broken");
1831         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1832         assert(is_gc_state_load(dom_gc_state_load), "broken");
1833         if (gc_state_load != dom_gc_state_load) {
1834           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1835         }
1836       }
1837       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1838       Node* proj_true = dom_if->proj_out(1);
1839       Node* proj_false = dom_if->proj_out(0);
1840       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1841       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1842 
1843       for (uint i = 1; i < n_ctrl->req(); i++) {
1844         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1845           bolphi->init_req(i, con_true);
1846         } else {
1847           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1848           bolphi->init_req(i, con_false);
1849         }
1850       }
1851       phase->register_new_node(bolphi, n_ctrl);
1852       phase->igvn().replace_input_of(n, 1, bolphi);
1853       phase->do_split_if(n);
1854     }
1855   }
1856 }
1857 
1858 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1859   // Find first invariant test that doesn't exit the loop
1860   LoopNode *head = loop->_head->as_Loop();
1861   IfNode* unswitch_iff = NULL;
1862   Node* n = head->in(LoopNode::LoopBackControl);
1863   int loop_has_sfpts = -1;
1864   while (n != head) {
1865     Node* n_dom = phase->idom(n);
1866     if (n->is_Region()) {
1867       if (n_dom->is_If()) {
1868         IfNode* iff = n_dom->as_If();
1869         if (iff->in(1)->is_Bool()) {
1870           BoolNode* bol = iff->in(1)->as_Bool();
1871           if (bol->in(1)->is_Cmp()) {
1872             // If condition is invariant and not a loop exit,
1873             // then found reason to unswitch.
1874             if (is_heap_stable_test(iff) &&
1875                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1876               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1877               if (loop_has_sfpts == -1) {
1878                 for(uint i = 0; i < loop->_body.size(); i++) {
1879                   Node *m = loop->_body[i];
1880                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1881                     loop_has_sfpts = 1;
1882                     break;
1883                   }
1884                 }
1885                 if (loop_has_sfpts == -1) {
1886                   loop_has_sfpts = 0;
1887                 }
1888               }
1889               if (!loop_has_sfpts) {
1890                 unswitch_iff = iff;
1891               }
1892             }
1893           }
1894         }
1895       }
1896     }
1897     n = n_dom;
1898   }
1899   return unswitch_iff;
1900 }
1901 
1902 
1903 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1904   Node_List heap_stable_tests;
1905   Node_List gc_state_loads;
1906   stack.push(phase->C->start(), 0);
1907   do {
1908     Node* n = stack.node();
1909     uint i = stack.index();
1910 
1911     if (i < n->outcnt()) {
1912       Node* u = n->raw_out(i);
1913       stack.set_index(i+1);
1914       if (!visited.test_set(u->_idx)) {
1915         stack.push(u, 0);
1916       }
1917     } else {
1918       stack.pop();
1919       if (ShenandoahCommonGCStateLoads && is_gc_state_load(n)) {
1920         gc_state_loads.push(n);
1921       }
1922       if (n->is_If() && is_heap_stable_test(n)) {
1923         heap_stable_tests.push(n);
1924       }
1925     }
1926   } while (stack.size() > 0);
1927 
1928   bool progress;
1929   do {
1930     progress = false;
1931     for (uint i = 0; i < gc_state_loads.size(); i++) {
1932       Node* n = gc_state_loads.at(i);
1933       if (n->outcnt() != 0) {
1934         progress |= try_common_gc_state_load(n, phase);
1935       }
1936     }
1937   } while (progress);
1938 
1939   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1940     Node* n = heap_stable_tests.at(i);
1941     assert(is_heap_stable_test(n), "only evacuation test");
1942     merge_back_to_back_tests(n, phase);
1943   }
1944 
1945   if (!phase->C->major_progress()) {
1946     VectorSet seen(Thread::current()->resource_area());
1947     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1948       Node* n = heap_stable_tests.at(i);
1949       IdealLoopTree* loop = phase->get_loop(n);
1950       if (loop != phase->ltree_root() &&
1951           loop->_child == NULL &&
1952           !loop->_irreducible) {
1953         LoopNode* head = loop->_head->as_Loop();
1954         if ((!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1955             !seen.test_set(head->_idx)) {
1956           IfNode* iff = find_unswitching_candidate(loop, phase);
1957           if (iff != NULL) {
1958             Node* bol = iff->in(1);
1959             if (head->is_strip_mined()) {
1960               head->verify_strip_mined(0);
1961             }
1962             move_heap_stable_test_out_of_loop(iff, phase);
1963             if (loop->policy_unswitching(phase)) {
1964               if (head->is_strip_mined()) {
1965                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1966                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1967               }
1968               phase->do_unswitching(loop, old_new);
1969             } else {
1970               // Not proceeding with unswitching. Move load back in
1971               // the loop.
1972               phase->igvn().replace_input_of(iff, 1, bol);
1973             }
1974           }
1975         }
1976       }
1977     }
1978   }
1979 }
1980 
1981 #ifdef ASSERT
1982 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
1983   const bool trace = false;
1984   ResourceMark rm;
1985   Unique_Node_List nodes;
1986   Unique_Node_List controls;
1987   Unique_Node_List memories;
1988 
1989   nodes.push(root);
1990   for (uint next = 0; next < nodes.size(); next++) {
1991     Node *n  = nodes.at(next);
1992     if (ShenandoahBarrierSetC2::is_shenandoah_wb_call(n)) {
1993       controls.push(n);
1994       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
1995       for (uint next2 = 0; next2 < controls.size(); next2++) {
1996         Node *m = controls.at(next2);
1997         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
1998           Node* u = m->fast_out(i);
1999           if (u->is_CFG() && !u->is_Root() &&
2000               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
2001               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
2002             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
2003             controls.push(u);
2004           }
2005         }
2006       }
2007       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
2008       for (uint next2 = 0; next2 < memories.size(); next2++) {
2009         Node *m = memories.at(next2);
2010         assert(m->bottom_type() == Type::MEMORY, "");
2011         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2012           Node* u = m->fast_out(i);
2013           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
2014             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2015             memories.push(u);
2016           } else if (u->is_LoadStore()) {
2017             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
2018             memories.push(u->find_out_with(Op_SCMemProj));
2019           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
2020             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2021             memories.push(u);
2022           } else if (u->is_Phi()) {
2023             assert(u->bottom_type() == Type::MEMORY, "");
2024             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
2025               assert(controls.member(u->in(0)), "");
2026               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2027               memories.push(u);
2028             }
2029           } else if (u->is_SafePoint() || u->is_MemBar()) {
2030             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2031               Node* uu = u->fast_out(j);
2032               if (uu->bottom_type() == Type::MEMORY) {
2033                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
2034                 memories.push(uu);
2035               }
2036             }
2037           }
2038         }
2039       }
2040       for (uint next2 = 0; next2 < controls.size(); next2++) {
2041         Node *m = controls.at(next2);
2042         if (m->is_Region()) {
2043           bool all_in = true;
2044           for (uint i = 1; i < m->req(); i++) {
2045             if (!controls.member(m->in(i))) {
2046               all_in = false;
2047               break;
2048             }
2049           }
2050           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
2051           bool found_phi = false;
2052           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
2053             Node* u = m->fast_out(j);
2054             if (u->is_Phi() && memories.member(u)) {
2055               found_phi = true;
2056               for (uint i = 1; i < u->req() && found_phi; i++) {
2057                 Node* k = u->in(i);
2058                 if (memories.member(k) != controls.member(m->in(i))) {
2059                   found_phi = false;
2060                 }
2061               }
2062             }
2063           }
2064           assert(found_phi || all_in, "");
2065         }
2066       }
2067       controls.clear();
2068       memories.clear();
2069     }
2070     for( uint i = 0; i < n->len(); ++i ) {
2071       Node *m = n->in(i);
2072       if (m != NULL) {
2073         nodes.push(m);
2074       }
2075     }
2076   }
2077 }
2078 #endif
2079 
2080 ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
2081   ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
2082 }
2083 
2084 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
2085   if (in(1) == NULL || in(1)->is_top()) {
2086     return Type::TOP;
2087   }
2088   const Type* t = in(1)->bottom_type();
2089   if (t == TypePtr::NULL_PTR) {
2090     return t;
2091   }
2092   return t->is_oopptr()->cast_to_nonconst();
2093 }
2094 
2095 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
2096   if (in(1) == NULL) {
2097     return Type::TOP;
2098   }
2099   const Type* t = phase->type(in(1));
2100   if (t == Type::TOP) {
2101     return Type::TOP;
2102   }
2103   if (t == TypePtr::NULL_PTR) {
2104     return t;
2105   }
2106   return t->is_oopptr()->cast_to_nonconst();
2107 }
2108 
2109 int ShenandoahEnqueueBarrierNode::needed(Node* n) {
2110   if (n == NULL ||
2111       n->is_Allocate() ||
2112       n->Opcode() == Op_ShenandoahEnqueueBarrier ||
2113       n->bottom_type() == TypePtr::NULL_PTR ||
2114       (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2115     return NotNeeded;
2116   }
2117   if (n->is_Phi() ||
2118       n->is_CMove()) {
2119     return MaybeNeeded;
2120   }
2121   return Needed;
2122 }
2123 
2124 Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
2125   for (;;) {
2126     if (n == NULL) {
2127       return n;
2128     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2129       return n;
2130     } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2131       return n;
2132     } else if (n->is_ConstraintCast() ||
2133                n->Opcode() == Op_DecodeN ||
2134                n->Opcode() == Op_EncodeP) {
2135       n = n->in(1);
2136     } else if (n->is_Proj()) {
2137       n = n->in(0);
2138     } else {
2139       return n;
2140     }
2141   }
2142   ShouldNotReachHere();
2143   return NULL;
2144 }
2145 
2146 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
2147   PhaseIterGVN* igvn = phase->is_IterGVN();
2148 
2149   Node* n = next(in(1));
2150 
2151   int cont = needed(n);
2152 
2153   if (cont == NotNeeded) {
2154     return in(1);
2155   } else if (cont == MaybeNeeded) {
2156     if (igvn == NULL) {
2157       phase->record_for_igvn(this);
2158       return this;
2159     } else {
2160       ResourceMark rm;
2161       Unique_Node_List wq;
2162       uint wq_i = 0;
2163 
2164       for (;;) {
2165         if (n->is_Phi()) {
2166           for (uint i = 1; i < n->req(); i++) {
2167             Node* m = n->in(i);
2168             if (m != NULL) {
2169               wq.push(m);
2170             }
2171           }
2172         } else {
2173           assert(n->is_CMove(), "nothing else here");
2174           Node* m = n->in(CMoveNode::IfFalse);
2175           wq.push(m);
2176           m = n->in(CMoveNode::IfTrue);
2177           wq.push(m);
2178         }
2179         Node* orig_n = NULL;
2180         do {
2181           if (wq_i >= wq.size()) {
2182             return in(1);
2183           }
2184           n = wq.at(wq_i);
2185           wq_i++;
2186           orig_n = n;
2187           n = next(n);
2188           cont = needed(n);
2189           if (cont == Needed) {
2190             return this;
2191           }
2192         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2193       }
2194     }
2195   }
2196 
2197   return this;
2198 }
2199 
2200 #ifdef ASSERT
2201 static bool has_never_branch(Node* root) {
2202   for (uint i = 1; i < root->req(); i++) {
2203     Node* in = root->in(i);
2204     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2205       return true;
2206     }
2207   }
2208   return false;
2209 }
2210 #endif
2211 
2212 void MemoryGraphFixer::collect_memory_nodes() {
2213   Node_Stack stack(0);
2214   VectorSet visited(Thread::current()->resource_area());
2215   Node_List regions;
2216 
2217   // Walk the raw memory graph and create a mapping from CFG node to
2218   // memory node. Exclude phis for now.
2219   stack.push(_phase->C->root(), 1);
2220   do {
2221     Node* n = stack.node();
2222     int opc = n->Opcode();
2223     uint i = stack.index();
2224     if (i < n->req()) {
2225       Node* mem = NULL;
2226       if (opc == Op_Root) {
2227         Node* in = n->in(i);
2228         int in_opc = in->Opcode();
2229         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2230           mem = in->in(TypeFunc::Memory);
2231         } else if (in_opc == Op_Halt) {
2232           if (!in->in(0)->is_Region()) {
2233             Node* proj = in->in(0);
2234             assert(proj->is_Proj(), "");
2235             Node* in = proj->in(0);
2236             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2237             if (in->is_CallStaticJava()) {
2238               mem = in->in(TypeFunc::Memory);
2239             } else if (in->Opcode() == Op_Catch) {
2240               Node* call = in->in(0)->in(0);
2241               assert(call->is_Call(), "");
2242               mem = call->in(TypeFunc::Memory);
2243             } else if (in->Opcode() == Op_NeverBranch) {
2244               ResourceMark rm;
2245               Unique_Node_List wq;
2246               wq.push(in);
2247               wq.push(in->as_Multi()->proj_out(0));
2248               for (uint j = 1; j < wq.size(); j++) {
2249                 Node* c = wq.at(j);
2250                 assert(!c->is_Root(), "shouldn't leave loop");
2251                 if (c->is_SafePoint()) {
2252                   assert(mem == NULL, "only one safepoint");
2253                   mem = c->in(TypeFunc::Memory);
2254                 }
2255                 for (DUIterator_Fast kmax, k = c->fast_outs(kmax); k < kmax; k++) {
2256                   Node* u = c->fast_out(k);
2257                   if (u->is_CFG()) {
2258                     wq.push(u);
2259                   }
2260                 }
2261               }
2262               assert(mem != NULL, "should have found safepoint");
2263             }
2264           }
2265         } else {
2266 #ifdef ASSERT
2267           n->dump();
2268           in->dump();
2269 #endif
2270           ShouldNotReachHere();
2271         }
2272       } else {
2273         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2274         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2275         mem = n->in(i);
2276       }
2277       i++;
2278       stack.set_index(i);
2279       if (mem == NULL) {
2280         continue;
2281       }
2282       for (;;) {
2283         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2284           break;
2285         }
2286         if (mem->is_Phi()) {
2287           stack.push(mem, 2);
2288           mem = mem->in(1);
2289         } else if (mem->is_Proj()) {
2290           stack.push(mem, mem->req());
2291           mem = mem->in(0);
2292         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2293           mem = mem->in(TypeFunc::Memory);
2294         } else if (mem->is_MergeMem()) {
2295           MergeMemNode* mm = mem->as_MergeMem();
2296           mem = mm->memory_at(_alias);
2297         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2298           assert(_alias == Compile::AliasIdxRaw, "");
2299           stack.push(mem, mem->req());
2300           mem = mem->in(MemNode::Memory);
2301         } else {
2302 #ifdef ASSERT
2303           mem->dump();
2304 #endif
2305           ShouldNotReachHere();
2306         }
2307       }
2308     } else {
2309       if (n->is_Phi()) {
2310         // Nothing
2311       } else if (!n->is_Root()) {
2312         Node* c = get_ctrl(n);
2313         _memory_nodes.map(c->_idx, n);
2314       }
2315       stack.pop();
2316     }
2317   } while(stack.is_nonempty());
2318 
2319   // Iterate over CFG nodes in rpo and propagate memory state to
2320   // compute memory state at regions, creating new phis if needed.
2321   Node_List rpo_list;
2322   visited.Clear();
2323   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2324   Node* root = rpo_list.pop();
2325   assert(root == _phase->C->root(), "");
2326 
2327   const bool trace = false;
2328 #ifdef ASSERT
2329   if (trace) {
2330     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2331       Node* c = rpo_list.at(i);
2332       if (_memory_nodes[c->_idx] != NULL) {
2333         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2334       }
2335     }
2336   }
2337 #endif
2338   uint last = _phase->C->unique();
2339 
2340 #ifdef ASSERT
2341   uint8_t max_depth = 0;
2342   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2343     IdealLoopTree* lpt = iter.current();
2344     max_depth = MAX2(max_depth, lpt->_nest);
2345   }
2346 #endif
2347 
2348   bool progress = true;
2349   int iteration = 0;
2350   Node_List dead_phis;
2351   while (progress) {
2352     progress = false;
2353     iteration++;
2354     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2355     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2356     IdealLoopTree* last_updated_ilt = NULL;
2357     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2358       Node* c = rpo_list.at(i);
2359 
2360       Node* prev_mem = _memory_nodes[c->_idx];
2361       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2362         Node* prev_region = regions[c->_idx];
2363         Node* unique = NULL;
2364         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2365           Node* m = _memory_nodes[c->in(j)->_idx];
2366           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2367           if (m != NULL) {
2368             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2369               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), "");
2370               // continue
2371             } else if (unique == NULL) {
2372               unique = m;
2373             } else if (m == unique) {
2374               // continue
2375             } else {
2376               unique = NodeSentinel;
2377             }
2378           }
2379         }
2380         assert(unique != NULL, "empty phi???");
2381         if (unique != NodeSentinel) {
2382           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2383             dead_phis.push(prev_region);
2384           }
2385           regions.map(c->_idx, unique);
2386         } else {
2387           Node* phi = NULL;
2388           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2389             phi = prev_region;
2390             for (uint k = 1; k < c->req(); k++) {
2391               Node* m = _memory_nodes[c->in(k)->_idx];
2392               assert(m != NULL, "expect memory state");
2393               phi->set_req(k, m);
2394             }
2395           } else {
2396             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2397               Node* u = c->fast_out(j);
2398               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2399                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2400                 phi = u;
2401                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2402                   Node* m = _memory_nodes[c->in(k)->_idx];
2403                   assert(m != NULL, "expect memory state");
2404                   if (u->in(k) != m) {
2405                     phi = NULL;
2406                   }
2407                 }
2408               }
2409             }
2410             if (phi == NULL) {
2411               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2412               for (uint k = 1; k < c->req(); k++) {
2413                 Node* m = _memory_nodes[c->in(k)->_idx];
2414                 assert(m != NULL, "expect memory state");
2415                 phi->init_req(k, m);
2416               }
2417             }
2418           }
2419           assert(phi != NULL, "");
2420           regions.map(c->_idx, phi);
2421         }
2422         Node* current_region = regions[c->_idx];
2423         if (current_region != prev_region) {
2424           progress = true;
2425           if (prev_region == prev_mem) {
2426             _memory_nodes.map(c->_idx, current_region);
2427           }
2428         }
2429       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2430         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2431         assert(m != NULL, "expect memory state");
2432         if (m != prev_mem) {
2433           _memory_nodes.map(c->_idx, m);
2434           progress = true;
2435         }
2436       }
2437 #ifdef ASSERT
2438       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2439 #endif
2440     }
2441   }
2442 
2443   // Replace existing phi with computed memory state for that region
2444   // if different (could be a new phi or a dominating memory node if
2445   // that phi was found to be useless).
2446   while (dead_phis.size() > 0) {
2447     Node* n = dead_phis.pop();
2448     n->replace_by(_phase->C->top());
2449     n->destruct();
2450   }
2451   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2452     Node* c = rpo_list.at(i);
2453     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2454       Node* n = regions[c->_idx];
2455       if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2456         _phase->register_new_node(n, c);
2457       }
2458     }
2459   }
2460   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2461     Node* c = rpo_list.at(i);
2462     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2463       Node* n = regions[c->_idx];
2464       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2465         Node* u = c->fast_out(i);
2466         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2467             u != n) {
2468           if (u->adr_type() == TypePtr::BOTTOM) {
2469             fix_memory_uses(u, n, n, c);
2470           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2471             _phase->lazy_replace(u, n);
2472             --i; --imax;
2473           }
2474         }
2475       }
2476     }
2477   }
2478 }
2479 
2480 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2481   Node* c = _phase->get_ctrl(n);
2482   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2483     assert(c == n->in(0), "");
2484     CallNode* call = c->as_Call();
2485     CallProjections projs;
2486     call->extract_projections(&projs, true, false);
2487     if (projs.catchall_memproj != NULL) {
2488       if (projs.fallthrough_memproj == n) {
2489         c = projs.fallthrough_catchproj;
2490       } else {
2491         assert(projs.catchall_memproj == n, "");
2492         c = projs.catchall_catchproj;
2493       }
2494     }
2495   }
2496   return c;
2497 }
2498 
2499 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2500   if (_phase->has_ctrl(n))
2501     return get_ctrl(n);
2502   else {
2503     assert (n->is_CFG(), "must be a CFG node");
2504     return n;
2505   }
2506 }
2507 
2508 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2509   return m != NULL && get_ctrl(m) == c;
2510 }
2511 
2512 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2513   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2514   Node* mem = _memory_nodes[ctrl->_idx];
2515   Node* c = ctrl;
2516   while (!mem_is_valid(mem, c) &&
2517          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2518     c = _phase->idom(c);
2519     mem = _memory_nodes[c->_idx];
2520   }
2521   if (n != NULL && mem_is_valid(mem, c)) {
2522     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2523       mem = next_mem(mem, _alias);
2524     }
2525     if (mem->is_MergeMem()) {
2526       mem = mem->as_MergeMem()->memory_at(_alias);
2527     }
2528     if (!mem_is_valid(mem, c)) {
2529       do {
2530         c = _phase->idom(c);
2531         mem = _memory_nodes[c->_idx];
2532       } while (!mem_is_valid(mem, c) &&
2533                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2534     }
2535   }
2536   assert(mem->bottom_type() == Type::MEMORY, "");
2537   return mem;
2538 }
2539 
2540 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2541   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2542     Node* use = region->fast_out(i);
2543     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2544         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2545       return true;
2546     }
2547   }
2548   return false;
2549 }
2550 
2551 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2552   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2553   const bool trace = false;
2554   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2555   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2556   GrowableArray<Node*> phis;
2557   if (mem_for_ctrl != mem) {
2558     Node* old = mem_for_ctrl;
2559     Node* prev = NULL;
2560     while (old != mem) {
2561       prev = old;
2562       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2563         assert(_alias == Compile::AliasIdxRaw, "");
2564         old = old->in(MemNode::Memory);
2565       } else if (old->Opcode() == Op_SCMemProj) {
2566         assert(_alias == Compile::AliasIdxRaw, "");
2567         old = old->in(0);
2568       } else {
2569         ShouldNotReachHere();
2570       }
2571     }
2572     assert(prev != NULL, "");
2573     if (new_ctrl != ctrl) {
2574       _memory_nodes.map(ctrl->_idx, mem);
2575       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2576     }
2577     uint input = (uint)MemNode::Memory;
2578     _phase->igvn().replace_input_of(prev, input, new_mem);
2579   } else {
2580     uses.clear();
2581     _memory_nodes.map(new_ctrl->_idx, new_mem);
2582     uses.push(new_ctrl);
2583     for(uint next = 0; next < uses.size(); next++ ) {
2584       Node *n = uses.at(next);
2585       assert(n->is_CFG(), "");
2586       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2587       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2588         Node* u = n->fast_out(i);
2589         if (!u->is_Root() && u->is_CFG() && u != n) {
2590           Node* m = _memory_nodes[u->_idx];
2591           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2592               !has_mem_phi(u) &&
2593               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2594             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2595             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2596 
2597             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2598               bool push = true;
2599               bool create_phi = true;
2600               if (_phase->is_dominator(new_ctrl, u)) {
2601                 create_phi = false;
2602               } else if (!_phase->C->has_irreducible_loop()) {
2603                 IdealLoopTree* loop = _phase->get_loop(ctrl);
2604                 bool do_check = true;
2605                 IdealLoopTree* l = loop;
2606                 create_phi = false;
2607                 while (l != _phase->ltree_root()) {
2608                   if (_phase->is_dominator(l->_head, u) && _phase->is_dominator(_phase->idom(u), l->_head)) {
2609                     create_phi = true;
2610                     do_check = false;
2611                     break;
2612                   }
2613                   l = l->_parent;
2614                 }
2615 
2616                 if (do_check) {
2617                   assert(!create_phi, "");
2618                   IdealLoopTree* u_loop = _phase->get_loop(u);
2619                   if (u_loop != _phase->ltree_root() && u_loop->is_member(loop)) {
2620                     Node* c = ctrl;
2621                     while (!_phase->is_dominator(c, u_loop->tail())) {
2622                       c = _phase->idom(c);
2623                     }
2624                     if (!_phase->is_dominator(c, u)) {
2625                       do_check = false;
2626                     }
2627                   }
2628                 }
2629 
2630                 if (do_check && _phase->is_dominator(_phase->idom(u), new_ctrl)) {
2631                   create_phi = true;
2632                 }
2633               }
2634               if (create_phi) {
2635                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2636                 _phase->register_new_node(phi, u);
2637                 phis.push(phi);
2638                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2639                 if (!mem_is_valid(m, u)) {
2640                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2641                   _memory_nodes.map(u->_idx, phi);
2642                 } else {
2643                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2644                   for (;;) {
2645                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2646                     Node* next = NULL;
2647                     if (m->is_Proj()) {
2648                       next = m->in(0);
2649                     } else {
2650                       assert(m->is_Mem() || m->is_LoadStore(), "");
2651                       assert(_alias == Compile::AliasIdxRaw, "");
2652                       next = m->in(MemNode::Memory);
2653                     }
2654                     if (_phase->get_ctrl(next) != u) {
2655                       break;
2656                     }
2657                     if (next->is_MergeMem()) {
2658                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2659                       break;
2660                     }
2661                     if (next->is_Phi()) {
2662                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2663                       break;
2664                     }
2665                     m = next;
2666                   }
2667 
2668                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2669                   assert(m->is_Mem() || m->is_LoadStore(), "");
2670                   uint input = (uint)MemNode::Memory;
2671                   _phase->igvn().replace_input_of(m, input, phi);
2672                   push = false;
2673                 }
2674               } else {
2675                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2676               }
2677               if (push) {
2678                 uses.push(u);
2679               }
2680             }
2681           } else if (!mem_is_valid(m, u) &&
2682                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2683             uses.push(u);
2684           }
2685         }
2686       }
2687     }
2688     for (int i = 0; i < phis.length(); i++) {
2689       Node* n = phis.at(i);
2690       Node* r = n->in(0);
2691       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2692       for (uint j = 1; j < n->req(); j++) {
2693         Node* m = find_mem(r->in(j), NULL);
2694         _phase->igvn().replace_input_of(n, j, m);
2695         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2696       }
2697     }
2698   }
2699   uint last = _phase->C->unique();
2700   MergeMemNode* mm = NULL;
2701   int alias = _alias;
2702   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2703   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2704     Node* u = mem->out(i);
2705     if (u->_idx < last) {
2706       if (u->is_Mem()) {
2707         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2708           Node* m = find_mem(_phase->get_ctrl(u), u);
2709           if (m != mem) {
2710             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2711             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2712             --i;
2713           }
2714         }
2715       } else if (u->is_MergeMem()) {
2716         MergeMemNode* u_mm = u->as_MergeMem();
2717         if (u_mm->memory_at(alias) == mem) {
2718           MergeMemNode* newmm = NULL;
2719           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2720             Node* uu = u->fast_out(j);
2721             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2722             if (uu->is_Phi()) {
2723               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2724               Node* region = uu->in(0);
2725               int nb = 0;
2726               for (uint k = 1; k < uu->req(); k++) {
2727                 if (uu->in(k) == u) {
2728                   Node* m = find_mem(region->in(k), NULL);
2729                   if (m != mem) {
2730                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2731                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2732                     if (newmm != u) {
2733                       _phase->igvn().replace_input_of(uu, k, newmm);
2734                       nb++;
2735                       --jmax;
2736                     }
2737                   }
2738                 }
2739               }
2740               if (nb > 0) {
2741                 --j;
2742               }
2743             } else {
2744               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2745               if (m != mem) {
2746                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2747                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2748                 if (newmm != u) {
2749                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2750                   --j, --jmax;
2751                 }
2752               }
2753             }
2754           }
2755         }
2756       } else if (u->is_Phi()) {
2757         assert(u->bottom_type() == Type::MEMORY, "what else?");
2758         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2759           Node* region = u->in(0);
2760           bool replaced = false;
2761           for (uint j = 1; j < u->req(); j++) {
2762             if (u->in(j) == mem) {
2763               Node* m = find_mem(region->in(j), NULL);
2764               Node* nnew = m;
2765               if (m != mem) {
2766                 if (u->adr_type() == TypePtr::BOTTOM) {
2767                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2768                   nnew = mm;
2769                 }
2770                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2771                 _phase->igvn().replace_input_of(u, j, nnew);
2772                 replaced = true;
2773               }
2774             }
2775           }
2776           if (replaced) {
2777             --i;
2778           }
2779         }
2780       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2781                  u->adr_type() == NULL) {
2782         assert(u->adr_type() != NULL ||
2783                u->Opcode() == Op_Rethrow ||
2784                u->Opcode() == Op_Return ||
2785                u->Opcode() == Op_SafePoint ||
2786                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2787                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2788                u->Opcode() == Op_CallLeaf, "");
2789         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2790         if (m != mem) {
2791           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2792           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2793           --i;
2794         }
2795       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2796         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2797         if (m != mem) {
2798           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2799           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2800           --i;
2801         }
2802       } else if (u->adr_type() != TypePtr::BOTTOM &&
2803                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2804         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2805         assert(m != mem, "");
2806         // u is on the wrong slice...
2807         assert(u->is_ClearArray(), "");
2808         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2809         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2810         --i;
2811       }
2812     }
2813   }
2814 #ifdef ASSERT
2815   assert(new_mem->outcnt() > 0, "");
2816   for (int i = 0; i < phis.length(); i++) {
2817     Node* n = phis.at(i);
2818     assert(n->outcnt() > 0, "new phi must have uses now");
2819   }
2820 #endif
2821 }
2822 
2823 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2824   MergeMemNode* mm = MergeMemNode::make(mem);
2825   mm->set_memory_at(_alias, rep_proj);
2826   _phase->register_new_node(mm, rep_ctrl);
2827   return mm;
2828 }
2829 
2830 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2831   MergeMemNode* newmm = NULL;
2832   MergeMemNode* u_mm = u->as_MergeMem();
2833   Node* c = _phase->get_ctrl(u);
2834   if (_phase->is_dominator(c, rep_ctrl)) {
2835     c = rep_ctrl;
2836   } else {
2837     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2838   }
2839   if (u->outcnt() == 1) {
2840     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2841       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2842       --i;
2843     } else {
2844       _phase->igvn().rehash_node_delayed(u);
2845       u_mm->set_memory_at(_alias, rep_proj);
2846     }
2847     newmm = u_mm;
2848     _phase->set_ctrl_and_loop(u, c);
2849   } else {
2850     // can't simply clone u and then change one of its input because
2851     // it adds and then removes an edge which messes with the
2852     // DUIterator
2853     newmm = MergeMemNode::make(u_mm->base_memory());
2854     for (uint j = 0; j < u->req(); j++) {
2855       if (j < newmm->req()) {
2856         if (j == (uint)_alias) {
2857           newmm->set_req(j, rep_proj);
2858         } else if (newmm->in(j) != u->in(j)) {
2859           newmm->set_req(j, u->in(j));
2860         }
2861       } else if (j == (uint)_alias) {
2862         newmm->add_req(rep_proj);
2863       } else {
2864         newmm->add_req(u->in(j));
2865       }
2866     }
2867     if ((uint)_alias >= u->req()) {
2868       newmm->set_memory_at(_alias, rep_proj);
2869     }
2870     _phase->register_new_node(newmm, c);
2871   }
2872   return newmm;
2873 }
2874 
2875 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2876   if (phi->adr_type() == TypePtr::BOTTOM) {
2877     Node* region = phi->in(0);
2878     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2879       Node* uu = region->fast_out(j);
2880       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2881         return false;
2882       }
2883     }
2884     return true;
2885   }
2886   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2887 }
2888 
2889 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2890   uint last = _phase-> C->unique();
2891   MergeMemNode* mm = NULL;
2892   assert(mem->bottom_type() == Type::MEMORY, "");
2893   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2894     Node* u = mem->out(i);
2895     if (u != replacement && u->_idx < last) {
2896       if (u->is_MergeMem()) {
2897         MergeMemNode* u_mm = u->as_MergeMem();
2898         if (u_mm->memory_at(_alias) == mem) {
2899           MergeMemNode* newmm = NULL;
2900           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2901             Node* uu = u->fast_out(j);
2902             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2903             if (uu->is_Phi()) {
2904               if (should_process_phi(uu)) {
2905                 Node* region = uu->in(0);
2906                 int nb = 0;
2907                 for (uint k = 1; k < uu->req(); k++) {
2908                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2909                     if (newmm == NULL) {
2910                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2911                     }
2912                     if (newmm != u) {
2913                       _phase->igvn().replace_input_of(uu, k, newmm);
2914                       nb++;
2915                       --jmax;
2916                     }
2917                   }
2918                 }
2919                 if (nb > 0) {
2920                   --j;
2921                 }
2922               }
2923             } else {
2924               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2925                 if (newmm == NULL) {
2926                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2927                 }
2928                 if (newmm != u) {
2929                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2930                   --j, --jmax;
2931                 }
2932               }
2933             }
2934           }
2935         }
2936       } else if (u->is_Phi()) {
2937         assert(u->bottom_type() == Type::MEMORY, "what else?");
2938         Node* region = u->in(0);
2939         if (should_process_phi(u)) {
2940           bool replaced = false;
2941           for (uint j = 1; j < u->req(); j++) {
2942             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2943               Node* nnew = rep_proj;
2944               if (u->adr_type() == TypePtr::BOTTOM) {
2945                 if (mm == NULL) {
2946                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2947                 }
2948                 nnew = mm;
2949               }
2950               _phase->igvn().replace_input_of(u, j, nnew);
2951               replaced = true;
2952             }
2953           }
2954           if (replaced) {
2955             --i;
2956           }
2957 
2958         }
2959       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2960                  u->adr_type() == NULL) {
2961         assert(u->adr_type() != NULL ||
2962                u->Opcode() == Op_Rethrow ||
2963                u->Opcode() == Op_Return ||
2964                u->Opcode() == Op_SafePoint ||
2965                u->Opcode() == Op_StoreLConditional ||
2966                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2967                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2968                u->Opcode() == Op_CallLeaf, "");
2969         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2970           if (mm == NULL) {
2971             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2972           }
2973           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2974           --i;
2975         }
2976       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2977         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2978           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2979           --i;
2980         }
2981       }
2982     }
2983   }
2984 }
2985 
2986 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj)
2987 : Node(ctrl, obj) {
2988   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
2989 }
2990 
2991 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2992   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
2993     return Type::TOP;
2994   }
2995   const Type* t = in(ValueIn)->bottom_type();
2996   if (t == TypePtr::NULL_PTR) {
2997     return t;
2998   }
2999   return t->is_oopptr();
3000 }
3001 
3002 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
3003   // Either input is TOP ==> the result is TOP
3004   const Type *t2 = phase->type(in(ValueIn));
3005   if( t2 == Type::TOP ) return Type::TOP;
3006 
3007   if (t2 == TypePtr::NULL_PTR) {
3008     return t2;
3009   }
3010 
3011   const Type* type = t2->is_oopptr()/*->cast_to_nonconst()*/;
3012   return type;
3013 }
3014 
3015 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
3016   Node* value = in(ValueIn);
3017   if (!needs_barrier(phase, value)) {
3018     return value;
3019   }
3020   return this;
3021 }
3022 
3023 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
3024   Unique_Node_List visited;
3025   return needs_barrier_impl(phase, n, visited);
3026 }
3027 
3028 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
3029   if (n == NULL) return false;
3030   if (visited.member(n)) {
3031     return false; // Been there.
3032   }
3033   visited.push(n);
3034 
3035   if (n->is_Allocate()) {
3036     // tty->print_cr("optimize barrier on alloc");
3037     return false;
3038   }
3039   if (n->is_Call()) {
3040     // tty->print_cr("optimize barrier on call");
3041     return false;
3042   }
3043 
3044   const Type* type = phase->type(n);
3045   if (type == Type::TOP) {
3046     return false;
3047   }
3048   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
3049     // tty->print_cr("optimize barrier on null");
3050     return false;
3051   }
3052   if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
3053     // tty->print_cr("optimize barrier on constant");
3054     return false;
3055   }
3056 
3057   switch (n->Opcode()) {
3058     case Op_AddP:
3059       return true; // TODO: Can refine?
3060     case Op_LoadP:
3061     case Op_ShenandoahCompareAndExchangeN:
3062     case Op_ShenandoahCompareAndExchangeP:
3063     case Op_CompareAndExchangeN:
3064     case Op_CompareAndExchangeP:
3065     case Op_GetAndSetN:
3066     case Op_GetAndSetP:
3067       return true;
3068     case Op_Phi: {
3069       for (uint i = 1; i < n->req(); i++) {
3070         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3071       }
3072       return false;
3073     }
3074     case Op_CheckCastPP:
3075     case Op_CastPP:
3076       return needs_barrier_impl(phase, n->in(1), visited);
3077     case Op_Proj:
3078       return needs_barrier_impl(phase, n->in(0), visited);
3079     case Op_ShenandoahLoadReferenceBarrier:
3080       // tty->print_cr("optimize barrier on barrier");
3081       return false;
3082     case Op_Parm:
3083       // tty->print_cr("optimize barrier on input arg");
3084       return false;
3085     case Op_DecodeN:
3086     case Op_EncodeP:
3087       return needs_barrier_impl(phase, n->in(1), visited);
3088     case Op_LoadN:
3089       return true;
3090     case Op_CMoveP:
3091       return needs_barrier_impl(phase, n->in(2), visited) ||
3092              needs_barrier_impl(phase, n->in(3), visited);
3093     case Op_ShenandoahEnqueueBarrier:
3094       return needs_barrier_impl(phase, n->in(1), visited);
3095     default:
3096       break;
3097   }
3098 #ifdef ASSERT
3099   tty->print("need barrier on?: ");
3100   tty->print_cr("ins:");
3101   n->dump(2);
3102   tty->print_cr("outs:");
3103   n->dump(-2);
3104   ShouldNotReachHere();
3105 #endif
3106   return true;
3107 }
3108 
3109 ShenandoahLoadReferenceBarrierNode::Strength ShenandoahLoadReferenceBarrierNode::get_barrier_strength() {
3110   Unique_Node_List visited;
3111   Node_Stack stack(0);
3112   stack.push(this, 0);
3113   Strength strength = NONE;
3114   while (strength != STRONG && stack.size() > 0) {
3115     Node* n = stack.node();
3116     if (visited.member(n)) {
3117       stack.pop();
3118       continue;
3119     }
3120     visited.push(n);
3121     bool visit_users = false;
3122     switch (n->Opcode()) {
3123       case Op_StoreN:
3124       case Op_StoreP: {
3125         strength = STRONG;
3126         break;
3127       }
3128       case Op_CmpP: {
3129         if (!n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) &&
3130             !n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3131           strength = STRONG;
3132         }
3133         break;
3134       }
3135       case Op_CallStaticJava: {
3136         strength = STRONG;
3137         break;
3138       }
3139       case Op_CallDynamicJava:
3140       case Op_CallLeaf:
3141       case Op_CallLeafNoFP:
3142       case Op_CompareAndSwapL:
3143       case Op_CompareAndSwapI:
3144       case Op_CompareAndSwapB:
3145       case Op_CompareAndSwapS:
3146       case Op_CompareAndSwapN:
3147       case Op_CompareAndSwapP:
3148       case Op_CompareAndExchangeL:
3149       case Op_CompareAndExchangeI:
3150       case Op_CompareAndExchangeB:
3151       case Op_CompareAndExchangeS:
3152       case Op_CompareAndExchangeN:
3153       case Op_CompareAndExchangeP:
3154       case Op_WeakCompareAndSwapL:
3155       case Op_WeakCompareAndSwapI:
3156       case Op_WeakCompareAndSwapB:
3157       case Op_WeakCompareAndSwapS:
3158       case Op_WeakCompareAndSwapN:
3159       case Op_WeakCompareAndSwapP:
3160       case Op_ShenandoahCompareAndSwapN:
3161       case Op_ShenandoahCompareAndSwapP:
3162       case Op_ShenandoahWeakCompareAndSwapN:
3163       case Op_ShenandoahWeakCompareAndSwapP:
3164       case Op_ShenandoahCompareAndExchangeN:
3165       case Op_ShenandoahCompareAndExchangeP:
3166       case Op_GetAndSetL:
3167       case Op_GetAndSetI:
3168       case Op_GetAndSetB:
3169       case Op_GetAndSetS:
3170       case Op_GetAndSetP:
3171       case Op_GetAndSetN:
3172       case Op_GetAndAddL:
3173       case Op_GetAndAddI:
3174       case Op_GetAndAddB:
3175       case Op_GetAndAddS:
3176       case Op_ShenandoahEnqueueBarrier:
3177       case Op_FastLock:
3178       case Op_FastUnlock:
3179       case Op_Rethrow:
3180       case Op_Return:
3181       case Op_StoreB:
3182       case Op_StoreC:
3183       case Op_StoreD:
3184       case Op_StoreF:
3185       case Op_StoreL:
3186       case Op_StoreLConditional:
3187       case Op_StoreI:
3188       case Op_StoreVector:
3189       case Op_StrInflatedCopy:
3190       case Op_StrCompressedCopy:
3191       case Op_EncodeP:
3192       case Op_CastP2X:
3193       case Op_SafePoint:
3194       case Op_EncodeISOArray:
3195       case Op_LoadKlass:
3196       case Op_LoadNKlass:
3197         strength = STRONG;
3198         break;
3199       case Op_LoadB:
3200       case Op_LoadUB:
3201       case Op_LoadUS:
3202       case Op_LoadD:
3203       case Op_LoadF:
3204       case Op_LoadL:
3205       case Op_LoadI:
3206       case Op_LoadS:
3207       case Op_LoadN:
3208       case Op_LoadP:
3209       case Op_LoadVector: {
3210         const TypePtr* adr_type = n->adr_type();
3211         int alias_idx = Compile::current()->get_alias_index(adr_type);
3212         Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3213         ciField* field = alias_type->field();
3214         bool is_static = field != NULL && field->is_static();
3215         bool is_final = field != NULL && field->is_final();
3216         bool is_stable = field != NULL && field->is_stable();
3217         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3218           // Leave strength as is.
3219         } else if (ShenandoahOptimizeInstanceFinals && !is_static && is_final) {
3220           // Leave strength as is.
3221         } else if (ShenandoahOptimizeStableFinals && (is_stable || (adr_type->isa_aryptr() && adr_type->isa_aryptr()->is_stable()))) {
3222           // Leave strength as is.
3223         } else {
3224           strength = WEAK;
3225         }
3226         break;
3227       }
3228       case Op_AryEq: {
3229         Node* n1 = n->in(2);
3230         Node* n2 = n->in(3);
3231         if (!ShenandoahOptimizeStableFinals ||
3232             !n1->bottom_type()->isa_aryptr() || !n1->bottom_type()->isa_aryptr()->is_stable() ||
3233             !n2->bottom_type()->isa_aryptr() || !n2->bottom_type()->isa_aryptr()->is_stable()) {
3234           strength = WEAK;
3235         }
3236         break;
3237       }
3238       case Op_StrEquals:
3239       case Op_StrComp:
3240       case Op_StrIndexOf:
3241       case Op_StrIndexOfChar:
3242         if (!ShenandoahOptimizeStableFinals) {
3243            strength = WEAK;
3244         }
3245         break;
3246       case Op_Conv2B:
3247       case Op_LoadRange:
3248         // NONE, i.e. leave current strength as is
3249         break;
3250       case Op_AddP:
3251       case Op_CheckCastPP:
3252       case Op_CastPP:
3253       case Op_CMoveP:
3254       case Op_Phi:
3255       case Op_ShenandoahLoadReferenceBarrier:
3256         visit_users = true;
3257         break;
3258       default: {
3259 #ifdef ASSERT
3260         tty->print_cr("Unknown node in get_barrier_strength:");
3261         n->dump(1);
3262         ShouldNotReachHere();
3263 #else
3264         strength = STRONG;
3265 #endif
3266       }
3267     }
3268 #ifdef ASSERT
3269 /*
3270     if (strength == STRONG) {
3271       tty->print("strengthening node: ");
3272       n->dump();
3273     }
3274     */
3275 #endif
3276     stack.pop();
3277     if (visit_users) {
3278       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3279         Node* user = n->fast_out(i);
3280         if (user != NULL) {
3281           stack.push(user, 0);
3282         }
3283       }
3284     }
3285   }
3286   return strength;
3287 }
3288 
3289 CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
3290   Node* val = in(ValueIn);
3291 
3292   const Type* val_t = igvn.type(val);
3293 
3294   if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
3295       val->Opcode() == Op_CastPP &&
3296       val->in(0) != NULL &&
3297       val->in(0)->Opcode() == Op_IfTrue &&
3298       val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
3299       val->in(0)->in(0)->is_If() &&
3300       val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
3301       val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
3302       val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
3303       val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
3304       val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
3305     assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
3306     CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
3307     return unc;
3308   }
3309   return NULL;
3310 }