1 /*
   2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  27 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahForwarding.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.hpp"
  31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  32 #include "gc/shenandoah/shenandoahRuntime.hpp"
  33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/block.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/castnode.hpp"
  38 #include "opto/movenode.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/subnode.hpp"
  43 
  44 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  45   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  46   if ((state->enqueue_barriers_count() +
  47        state->load_reference_barriers_count()) > 0) {
  48     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  49     C->clear_major_progress();
  50     PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
  51     if (C->failing()) return false;
  52     PhaseIdealLoop::verify(igvn);
  53     DEBUG_ONLY(verify_raw_mem(C->root());)
  54     if (attempt_more_loopopts) {
  55       C->set_major_progress();
  56       if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
  57         return false;
  58       }
  59       C->clear_major_progress();
  60     }
  61   }
  62   return true;
  63 }
  64 
  65 bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) {
  66   if (!UseShenandoahGC) {
  67     return false;
  68   }
  69   assert(iff->is_If(), "bad input");
  70   if (iff->Opcode() != Op_If) {
  71     return false;
  72   }
  73   Node* bol = iff->in(1);
  74   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  75     return false;
  76   }
  77   Node* cmp = bol->in(1);
  78   if (cmp->Opcode() != Op_CmpI) {
  79     return false;
  80   }
  81   Node* in1 = cmp->in(1);
  82   Node* in2 = cmp->in(2);
  83   if (in2->find_int_con(-1) != 0) {
  84     return false;
  85   }
  86   if (in1->Opcode() != Op_AndI) {
  87     return false;
  88   }
  89   in2 = in1->in(2);
  90   if (in2->find_int_con(-1) != mask) {
  91     return false;
  92   }
  93   in1 = in1->in(1);
  94 
  95   return is_gc_state_load(in1);
  96 }
  97 
  98 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
  99   return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 100 }
 101 
 102 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 103   if (!UseShenandoahGC) {
 104     return false;
 105   }
 106   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 107     return false;
 108   }
 109   Node* addp = n->in(MemNode::Address);
 110   if (!addp->is_AddP()) {
 111     return false;
 112   }
 113   Node* base = addp->in(AddPNode::Address);
 114   Node* off = addp->in(AddPNode::Offset);
 115   if (base->Opcode() != Op_ThreadLocal) {
 116     return false;
 117   }
 118   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 119     return false;
 120   }
 121   return true;
 122 }
 123 
 124 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 125   assert(phase->is_dominator(stop, start), "bad inputs");
 126   ResourceMark rm;
 127   Unique_Node_List wq;
 128   wq.push(start);
 129   for (uint next = 0; next < wq.size(); next++) {
 130     Node *m = wq.at(next);
 131     if (m == stop) {
 132       continue;
 133     }
 134     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 135       return true;
 136     }
 137     if (m->is_Region()) {
 138       for (uint i = 1; i < m->req(); i++) {
 139         wq.push(m->in(i));
 140       }
 141     } else {
 142       wq.push(m->in(0));
 143     }
 144   }
 145   return false;
 146 }
 147 
 148 bool ShenandoahBarrierC2Support::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) {
 149   assert(is_gc_state_load(n), "inconsistent");
 150   Node* addp = n->in(MemNode::Address);
 151   Node* dominator = NULL;
 152   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 153     Node* u = addp->fast_out(i);
 154     assert(is_gc_state_load(u), "inconsistent");
 155     if (u != n && phase->is_dominator(u->in(0), n->in(0))) {
 156       if (dominator == NULL) {
 157         dominator = u;
 158       } else {
 159         if (phase->dom_depth(u->in(0)) < phase->dom_depth(dominator->in(0))) {
 160           dominator = u;
 161         }
 162       }
 163     }
 164   }
 165   if (dominator == NULL || has_safepoint_between(n->in(0), dominator->in(0), phase)) {
 166     return false;
 167   }
 168   phase->igvn().replace_node(n, dominator);
 169 
 170   return true;
 171 }
 172 
 173 #ifdef ASSERT
 174 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 175   assert(phis.size() == 0, "");
 176 
 177   while (true) {
 178     if (in->bottom_type() == TypePtr::NULL_PTR) {
 179       if (trace) {tty->print_cr("NULL");}
 180     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 181       if (trace) {tty->print_cr("Non oop");}
 182     } else if (t == ShenandoahLoad && ShenandoahOptimizeStableFinals &&
 183                in->bottom_type()->make_ptr()->isa_aryptr() &&
 184                in->bottom_type()->make_ptr()->is_aryptr()->is_stable()) {
 185       if (trace) {tty->print_cr("Stable array load");}
 186     } else {
 187       if (in->is_ConstraintCast()) {
 188         in = in->in(1);
 189         continue;
 190       } else if (in->is_AddP()) {
 191         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 192         in = in->in(AddPNode::Address);
 193         continue;
 194       } else if (in->is_Con()) {
 195         if (trace) {
 196           tty->print("Found constant");
 197           in->dump();
 198         }
 199       } else if (in->Opcode() == Op_Parm) {
 200         if (trace) {
 201           tty->print("Found argument");
 202         }
 203       } else if (in->Opcode() == Op_CreateEx) {
 204         if (trace) {
 205           tty->print("Found create-exception");
 206         }
 207       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 208         if (trace) {
 209           tty->print("Found raw LoadP (OSR argument?)");
 210         }
 211       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 212         if (t == ShenandoahOopStore) {
 213           uint i = 0;
 214           for (; i < phis.size(); i++) {
 215             Node* n = phis.node_at(i);
 216             if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
 217               break;
 218             }
 219           }
 220           if (i == phis.size()) {
 221             return false;
 222           }
 223         }
 224         barriers_used.push(in);
 225         if (trace) {tty->print("Found barrier"); in->dump();}
 226       } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
 227         if (t != ShenandoahOopStore) {
 228           in = in->in(1);
 229           continue;
 230         }
 231         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 232         phis.push(in, in->req());
 233         in = in->in(1);
 234         continue;
 235       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 236         if (trace) {
 237           tty->print("Found alloc");
 238           in->in(0)->dump();
 239         }
 240       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 241         if (trace) {
 242           tty->print("Found Java call");
 243         }
 244       } else if (in->is_Phi()) {
 245         if (!visited.test_set(in->_idx)) {
 246           if (trace) {tty->print("Pushed phi:"); in->dump();}
 247           phis.push(in, 2);
 248           in = in->in(1);
 249           continue;
 250         }
 251         if (trace) {tty->print("Already seen phi:"); in->dump();}
 252       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 253         if (!visited.test_set(in->_idx)) {
 254           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 255           phis.push(in, CMoveNode::IfTrue);
 256           in = in->in(CMoveNode::IfFalse);
 257           continue;
 258         }
 259         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 260       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 261         in = in->in(1);
 262         continue;
 263       } else {
 264         return false;
 265       }
 266     }
 267     bool cont = false;
 268     while (phis.is_nonempty()) {
 269       uint idx = phis.index();
 270       Node* phi = phis.node();
 271       if (idx >= phi->req()) {
 272         if (trace) {tty->print("Popped phi:"); phi->dump();}
 273         phis.pop();
 274         continue;
 275       }
 276       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 277       in = phi->in(idx);
 278       phis.set_index(idx+1);
 279       cont = true;
 280       break;
 281     }
 282     if (!cont) {
 283       break;
 284     }
 285   }
 286   return true;
 287 }
 288 
 289 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 290   if (n1 != NULL) {
 291     n1->dump(+10);
 292   }
 293   if (n2 != NULL) {
 294     n2->dump(+10);
 295   }
 296   fatal("%s", msg);
 297 }
 298 
 299 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 300   ResourceMark rm;
 301   Unique_Node_List wq;
 302   GrowableArray<Node*> barriers;
 303   Unique_Node_List barriers_used;
 304   Node_Stack phis(0);
 305   VectorSet visited(Thread::current()->resource_area());
 306   const bool trace = false;
 307   const bool verify_no_useless_barrier = false;
 308 
 309   wq.push(root);
 310   for (uint next = 0; next < wq.size(); next++) {
 311     Node *n = wq.at(next);
 312     if (n->is_Load()) {
 313       const bool trace = false;
 314       if (trace) {tty->print("Verifying"); n->dump();}
 315       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 316         if (trace) {tty->print_cr("Load range/klass");}
 317       } else {
 318         const TypePtr* adr_type = n->as_Load()->adr_type();
 319 
 320         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 321           if (trace) {tty->print_cr("Mark load");}
 322         } else if (adr_type->isa_instptr() &&
 323                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 324                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
 325           if (trace) {tty->print_cr("Reference.get()");}
 326         } else {
 327           bool verify = true;
 328           if (adr_type->isa_instptr()) {
 329             const TypeInstPtr* tinst = adr_type->is_instptr();
 330             ciKlass* k = tinst->klass();
 331             assert(k->is_instance_klass(), "");
 332             ciInstanceKlass* ik = (ciInstanceKlass*)k;
 333             int offset = adr_type->offset();
 334 
 335             if ((ik->debug_final_field_at(offset) && ShenandoahOptimizeInstanceFinals) ||
 336                 (ik->debug_stable_field_at(offset) && ShenandoahOptimizeStableFinals)) {
 337               if (trace) {tty->print_cr("Final/stable");}
 338               verify = false;
 339             } else if (k == ciEnv::current()->Class_klass() &&
 340                        tinst->const_oop() != NULL &&
 341                        tinst->offset() >= (ik->size_helper() * wordSize)) {
 342               ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
 343               ciField* field = k->get_field_by_offset(tinst->offset(), true);
 344               if ((ShenandoahOptimizeStaticFinals && field->is_final()) ||
 345                   (ShenandoahOptimizeStableFinals && field->is_stable())) {
 346                 verify = false;
 347               }
 348             }
 349           }
 350 
 351           if (verify && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 352             report_verify_failure("Shenandoah verification: Load should have barriers", n);
 353           }
 354         }
 355       }
 356     } else if (n->is_Store()) {
 357       const bool trace = false;
 358 
 359       if (trace) {tty->print("Verifying"); n->dump();}
 360       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 361         Node* adr = n->in(MemNode::Address);
 362         bool verify = true;
 363 
 364         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 365           adr = adr->in(AddPNode::Address);
 366           if (adr->is_AddP()) {
 367             assert(adr->in(AddPNode::Base)->is_top(), "");
 368             adr = adr->in(AddPNode::Address);
 369             if (adr->Opcode() == Op_LoadP &&
 370                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 371                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 372                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 373               if (trace) {tty->print_cr("SATB prebarrier");}
 374               verify = false;
 375             }
 376           }
 377         }
 378 
 379         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 380           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 381         }
 382       }
 383       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 384         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 385       }
 386     } else if (n->Opcode() == Op_CmpP) {
 387       const bool trace = false;
 388 
 389       Node* in1 = n->in(1);
 390       Node* in2 = n->in(2);
 391       if (in1->bottom_type()->isa_oopptr()) {
 392         if (trace) {tty->print("Verifying"); n->dump();}
 393 
 394         bool mark_inputs = false;
 395         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 396             (in1->is_Con() || in2->is_Con())) {
 397           if (trace) {tty->print_cr("Comparison against a constant");}
 398           mark_inputs = true;
 399         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 400                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 401           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 402           mark_inputs = true;
 403         } else {
 404           assert(in2->bottom_type()->isa_oopptr(), "");
 405 
 406           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 407               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 408             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 409           }
 410         }
 411         if (verify_no_useless_barrier &&
 412             mark_inputs &&
 413             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 414              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 415           phis.clear();
 416           visited.Reset();
 417         }
 418       }
 419     } else if (n->is_LoadStore()) {
 420       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 421           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 422         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 423       }
 424 
 425       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 426         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 427       }
 428     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 429       CallNode* call = n->as_Call();
 430 
 431       static struct {
 432         const char* name;
 433         struct {
 434           int pos;
 435           verify_type t;
 436         } args[6];
 437       } calls[] = {
 438         "aescrypt_encryptBlock",
 439         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 440           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 441         "aescrypt_decryptBlock",
 442         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 443           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 444         "multiplyToLen",
 445         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 446           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 447         "squareToLen",
 448         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 449           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 450         "montgomery_multiply",
 451         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 452           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 453         "montgomery_square",
 454         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 455           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 456         "mulAdd",
 457         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 458           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 459         "vectorizedMismatch",
 460         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 461           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 462         "updateBytesCRC32",
 463         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 464           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 465         "updateBytesAdler32",
 466         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 467           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 468         "updateBytesCRC32C",
 469         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 470           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 471         "counterMode_AESCrypt",
 472         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 473           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 474         "cipherBlockChaining_encryptAESCrypt",
 475         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 476           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 477         "cipherBlockChaining_decryptAESCrypt",
 478         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 479           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 480         "shenandoah_clone_barrier",
 481         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 482           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 483         "ghash_processBlocks",
 484         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 485           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 486         "sha1_implCompress",
 487         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 488           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 489         "sha256_implCompress",
 490         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 491           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 492         "sha512_implCompress",
 493         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 494           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 495         "sha1_implCompressMB",
 496         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 497           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 498         "sha256_implCompressMB",
 499         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 500           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 501         "sha512_implCompressMB",
 502         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 503           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 504         "encodeBlock",
 505         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 506           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 507       };
 508 
 509       if (call->is_call_to_arraycopystub()) {
 510         Node* dest = NULL;
 511         const TypeTuple* args = n->as_Call()->_tf->domain();
 512         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 513           if (args->field_at(i)->isa_ptr()) {
 514             j++;
 515             if (j == 2) {
 516               dest = n->in(i);
 517               break;
 518             }
 519           }
 520         }
 521         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 522             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 523           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 524         }
 525       } else if (strlen(call->_name) > 5 &&
 526                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 527         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 528           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 529         }
 530       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 531         // skip
 532       } else {
 533         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 534         int i = 0;
 535         for (; i < calls_len; i++) {
 536           if (!strcmp(calls[i].name, call->_name)) {
 537             break;
 538           }
 539         }
 540         if (i != calls_len) {
 541           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 542           for (uint j = 0; j < args_len; j++) {
 543             int pos = calls[i].args[j].pos;
 544             if (pos == -1) {
 545               break;
 546             }
 547             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 548               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 549             }
 550           }
 551           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 552             if (call->in(j)->bottom_type()->make_ptr() &&
 553                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 554               uint k = 0;
 555               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 556               if (k == args_len) {
 557                 fatal("arg %d for call %s not covered", j, call->_name);
 558               }
 559             }
 560           }
 561         } else {
 562           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 563             if (call->in(j)->bottom_type()->make_ptr() &&
 564                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 565               fatal("%s not covered", call->_name);
 566             }
 567           }
 568         }
 569       }
 570     } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 571       // skip
 572     } else if (n->is_AddP()
 573                || n->is_Phi()
 574                || n->is_ConstraintCast()
 575                || n->Opcode() == Op_Return
 576                || n->Opcode() == Op_CMoveP
 577                || n->Opcode() == Op_CMoveN
 578                || n->Opcode() == Op_Rethrow
 579                || n->is_MemBar()
 580                || n->Opcode() == Op_Conv2B
 581                || n->Opcode() == Op_SafePoint
 582                || n->is_CallJava()
 583                || n->Opcode() == Op_Unlock
 584                || n->Opcode() == Op_EncodeP
 585                || n->Opcode() == Op_DecodeN) {
 586       // nothing to do
 587     } else {
 588       static struct {
 589         int opcode;
 590         struct {
 591           int pos;
 592           verify_type t;
 593         } inputs[2];
 594       } others[] = {
 595         Op_FastLock,
 596         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 597         Op_Lock,
 598         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 599         Op_ArrayCopy,
 600         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 601         Op_StrCompressedCopy,
 602         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 603         Op_StrInflatedCopy,
 604         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 605         Op_AryEq,
 606         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 607         Op_StrIndexOf,
 608         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 609         Op_StrComp,
 610         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 611         Op_StrEquals,
 612         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 613         Op_EncodeISOArray,
 614         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 615         Op_HasNegatives,
 616         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 617         Op_CastP2X,
 618         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 619         Op_StrIndexOfChar,
 620         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 621       };
 622 
 623       const int others_len = sizeof(others) / sizeof(others[0]);
 624       int i = 0;
 625       for (; i < others_len; i++) {
 626         if (others[i].opcode == n->Opcode()) {
 627           break;
 628         }
 629       }
 630       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 631       if (i != others_len) {
 632         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 633         for (uint j = 0; j < inputs_len; j++) {
 634           int pos = others[i].inputs[j].pos;
 635           if (pos == -1) {
 636             break;
 637           }
 638           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 639             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 640           }
 641         }
 642         for (uint j = 1; j < stop; j++) {
 643           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 644               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 645             uint k = 0;
 646             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 647             if (k == inputs_len) {
 648               fatal("arg %d for node %s not covered", j, n->Name());
 649             }
 650           }
 651         }
 652       } else {
 653         for (uint j = 1; j < stop; j++) {
 654           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 655               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 656             fatal("%s not covered", n->Name());
 657           }
 658         }
 659       }
 660     }
 661 
 662     if (n->is_SafePoint()) {
 663       SafePointNode* sfpt = n->as_SafePoint();
 664       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 665         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 666           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 667             phis.clear();
 668             visited.Reset();
 669           }
 670         }
 671       }
 672     }
 673     for( uint i = 0; i < n->len(); ++i ) {
 674       Node *m = n->in(i);
 675       if (m == NULL) continue;
 676 
 677       // In most cases, inputs should be known to be non null. If it's
 678       // not the case, it could be a missing cast_not_null() in an
 679       // intrinsic or support might be needed in AddPNode::Ideal() to
 680       // avoid a NULL+offset input.
 681       if (!(n->is_Phi() ||
 682             (n->is_SafePoint() && (!n->is_CallRuntime() || !strcmp(n->as_Call()->_name, "shenandoah_wb_pre") || !strcmp(n->as_Call()->_name, "unsafe_arraycopy"))) ||
 683             n->Opcode() == Op_CmpP ||
 684             n->Opcode() == Op_CmpN ||
 685             (n->Opcode() == Op_StoreP && i == StoreNode::ValueIn) ||
 686             (n->Opcode() == Op_StoreN && i == StoreNode::ValueIn) ||
 687             n->is_ConstraintCast() ||
 688             n->Opcode() == Op_Return ||
 689             n->Opcode() == Op_Conv2B ||
 690             n->is_AddP() ||
 691             n->Opcode() == Op_CMoveP ||
 692             n->Opcode() == Op_CMoveN ||
 693             n->Opcode() == Op_Rethrow ||
 694             n->is_MemBar() ||
 695             n->is_Mem() ||
 696             n->Opcode() == Op_AryEq ||
 697             n->Opcode() == Op_SCMemProj ||
 698             n->Opcode() == Op_EncodeP ||
 699             n->Opcode() == Op_DecodeN ||
 700             n->Opcode() == Op_ShenandoahEnqueueBarrier ||
 701             n->Opcode() == Op_ShenandoahLoadReferenceBarrier)) {
 702         if (m->bottom_type()->make_oopptr() && m->bottom_type()->make_oopptr()->meet(TypePtr::NULL_PTR) == m->bottom_type()) {
 703           report_verify_failure("Shenandoah verification: null input", n, m);
 704         }
 705       }
 706 
 707       wq.push(m);
 708     }
 709   }
 710 
 711   if (verify_no_useless_barrier) {
 712     for (int i = 0; i < barriers.length(); i++) {
 713       Node* n = barriers.at(i);
 714       if (!barriers_used.member(n)) {
 715         tty->print("XXX useless barrier"); n->dump(-2);
 716         ShouldNotReachHere();
 717       }
 718     }
 719   }
 720 }
 721 #endif
 722 
 723 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 724   // That both nodes have the same control is not sufficient to prove
 725   // domination, verify that there's no path from d to n
 726   ResourceMark rm;
 727   Unique_Node_List wq;
 728   wq.push(d);
 729   for (uint next = 0; next < wq.size(); next++) {
 730     Node *m = wq.at(next);
 731     if (m == n) {
 732       return false;
 733     }
 734     if (m->is_Phi() && m->in(0)->is_Loop()) {
 735       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 736     } else {
 737       for (uint i = 0; i < m->req(); i++) {
 738         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 739           wq.push(m->in(i));
 740         }
 741       }
 742     }
 743   }
 744   return true;
 745 }
 746 
 747 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 748   if (d_c != n_c) {
 749     return phase->is_dominator(d_c, n_c);
 750   }
 751   return is_dominator_same_ctrl(d_c, d, n, phase);
 752 }
 753 
 754 Node* next_mem(Node* mem, int alias) {
 755   Node* res = NULL;
 756   if (mem->is_Proj()) {
 757     res = mem->in(0);
 758   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 759     res = mem->in(TypeFunc::Memory);
 760   } else if (mem->is_Phi()) {
 761     res = mem->in(1);
 762   } else if (mem->is_MergeMem()) {
 763     res = mem->as_MergeMem()->memory_at(alias);
 764   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 765     assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 766     res = mem->in(MemNode::Memory);
 767   } else {
 768 #ifdef ASSERT
 769     mem->dump();
 770 #endif
 771     ShouldNotReachHere();
 772   }
 773   return res;
 774 }
 775 
 776 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 777   Node* iffproj = NULL;
 778   while (c != dom) {
 779     Node* next = phase->idom(c);
 780     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 781     if (c->is_Region()) {
 782       ResourceMark rm;
 783       Unique_Node_List wq;
 784       wq.push(c);
 785       for (uint i = 0; i < wq.size(); i++) {
 786         Node *n = wq.at(i);
 787         if (n == next) {
 788           continue;
 789         }
 790         if (n->is_Region()) {
 791           for (uint j = 1; j < n->req(); j++) {
 792             wq.push(n->in(j));
 793           }
 794         } else {
 795           wq.push(n->in(0));
 796         }
 797       }
 798       for (uint i = 0; i < wq.size(); i++) {
 799         Node *n = wq.at(i);
 800         assert(n->is_CFG(), "");
 801         if (n->is_Multi()) {
 802           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 803             Node* u = n->fast_out(j);
 804             if (u->is_CFG()) {
 805               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 806                 return NodeSentinel;
 807               }
 808             }
 809           }
 810         }
 811       }
 812     } else  if (c->is_Proj()) {
 813       if (c->is_IfProj()) {
 814         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 815           // continue;
 816         } else {
 817           if (!allow_one_proj) {
 818             return NodeSentinel;
 819           }
 820           if (iffproj == NULL) {
 821             iffproj = c;
 822           } else {
 823             return NodeSentinel;
 824           }
 825         }
 826       } else if (c->Opcode() == Op_JumpProj) {
 827         return NodeSentinel; // unsupported
 828       } else if (c->Opcode() == Op_CatchProj) {
 829         return NodeSentinel; // unsupported
 830       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 831         return NodeSentinel; // unsupported
 832       } else {
 833         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 834       }
 835     }
 836     c = next;
 837   }
 838   return iffproj;
 839 }
 840 
 841 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 842   ResourceMark rm;
 843   VectorSet wq(Thread::current()->resource_area());
 844   wq.set(mem->_idx);
 845   mem_ctrl = phase->ctrl_or_self(mem);
 846   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 847     mem = next_mem(mem, alias);
 848     if (wq.test_set(mem->_idx)) {
 849       return NULL;
 850     }
 851     mem_ctrl = phase->ctrl_or_self(mem);
 852   }
 853   if (mem->is_MergeMem()) {
 854     mem = mem->as_MergeMem()->memory_at(alias);
 855     mem_ctrl = phase->ctrl_or_self(mem);
 856   }
 857   return mem;
 858 }
 859 
 860 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 861   Node* mem = NULL;
 862   Node* c = ctrl;
 863   do {
 864     if (c->is_Region()) {
 865       Node* phi_bottom = NULL;
 866       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 867         Node* u = c->fast_out(i);
 868         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 869           if (u->adr_type() == TypePtr::BOTTOM) {
 870             mem = u;
 871           }
 872         }
 873       }
 874     } else {
 875       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 876         CallProjections projs;
 877         c->as_Call()->extract_projections(&projs, true, false);
 878         if (projs.fallthrough_memproj != NULL) {
 879           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 880             if (projs.catchall_memproj == NULL) {
 881               mem = projs.fallthrough_memproj;
 882             } else {
 883               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 884                 mem = projs.fallthrough_memproj;
 885               } else {
 886                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 887                 mem = projs.catchall_memproj;
 888               }
 889             }
 890           }
 891         } else {
 892           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 893           if (proj != NULL &&
 894               proj->adr_type() == TypePtr::BOTTOM) {
 895             mem = proj;
 896           }
 897         }
 898       } else {
 899         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 900           Node* u = c->fast_out(i);
 901           if (u->is_Proj() &&
 902               u->bottom_type() == Type::MEMORY &&
 903               u->adr_type() == TypePtr::BOTTOM) {
 904               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 905               assert(mem == NULL, "only one proj");
 906               mem = u;
 907           }
 908         }
 909         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 910       }
 911     }
 912     c = phase->idom(c);
 913   } while (mem == NULL);
 914   return mem;
 915 }
 916 
 917 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 918   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 919     Node* u = n->fast_out(i);
 920     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 921       uses.push(u);
 922     }
 923   }
 924 }
 925 
 926 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 927   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 928   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 929   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 930   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 931   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 932   phase->lazy_replace(outer, new_outer);
 933   phase->lazy_replace(le, new_le);
 934   inner->clear_strip_mined();
 935 }
 936 
 937 void ShenandoahBarrierC2Support::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
 938                                                   PhaseIdealLoop* phase) {
 939   IdealLoopTree* loop = phase->get_loop(ctrl);
 940   Node* thread = new ThreadLocalNode();
 941   phase->register_new_node(thread, ctrl);
 942   Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 943   phase->set_ctrl(offset, phase->C->root());
 944   Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset);
 945   phase->register_new_node(gc_state_addr, ctrl);
 946   uint gc_state_idx = Compile::AliasIdxRaw;
 947   const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
 948   debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
 949 
 950   Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered);
 951   phase->register_new_node(gc_state, ctrl);
 952   Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED));
 953   phase->register_new_node(heap_stable_and, ctrl);
 954   Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT));
 955   phase->register_new_node(heap_stable_cmp, ctrl);
 956   Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne);
 957   phase->register_new_node(heap_stable_test, ctrl);
 958   IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 959   phase->register_control(heap_stable_iff, loop, ctrl);
 960 
 961   heap_stable_ctrl = new IfFalseNode(heap_stable_iff);
 962   phase->register_control(heap_stable_ctrl, loop, heap_stable_iff);
 963   ctrl = new IfTrueNode(heap_stable_iff);
 964   phase->register_control(ctrl, loop, heap_stable_iff);
 965 
 966   assert(is_heap_stable_test(heap_stable_iff), "Should match the shape");
 967 }
 968 
 969 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 970   const Type* val_t = phase->igvn().type(val);
 971   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 972     IdealLoopTree* loop = phase->get_loop(ctrl);
 973     Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT));
 974     phase->register_new_node(null_cmp, ctrl);
 975     Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
 976     phase->register_new_node(null_test, ctrl);
 977     IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 978     phase->register_control(null_iff, loop, ctrl);
 979     ctrl = new IfTrueNode(null_iff);
 980     phase->register_control(ctrl, loop, null_iff);
 981     null_ctrl = new IfFalseNode(null_iff);
 982     phase->register_control(null_ctrl, loop, null_iff);
 983   }
 984 }
 985 
 986 Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
 987   IdealLoopTree *loop = phase->get_loop(c);
 988   Node* iff = unc_ctrl->in(0);
 989   assert(iff->is_If(), "broken");
 990   Node* new_iff = iff->clone();
 991   new_iff->set_req(0, c);
 992   phase->register_control(new_iff, loop, c);
 993   Node* iffalse = new IfFalseNode(new_iff->as_If());
 994   phase->register_control(iffalse, loop, new_iff);
 995   Node* iftrue = new IfTrueNode(new_iff->as_If());
 996   phase->register_control(iftrue, loop, new_iff);
 997   c = iftrue;
 998   const Type *t = phase->igvn().type(val);
 999   assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
1000   Node* uncasted_val = val->in(1);
1001   val = new CastPPNode(uncasted_val, t);
1002   val->init_req(0, c);
1003   phase->register_new_node(val, c);
1004   return val;
1005 }
1006 
1007 void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
1008                                                 Unique_Node_List& uses, PhaseIdealLoop* phase) {
1009   IfNode* iff = unc_ctrl->in(0)->as_If();
1010   Node* proj = iff->proj_out(0);
1011   assert(proj != unc_ctrl, "bad projection");
1012   Node* use = proj->unique_ctrl_out();
1013 
1014   assert(use == unc || use->is_Region(), "what else?");
1015 
1016   uses.clear();
1017   if (use == unc) {
1018     phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
1019     for (uint i = 1; i < unc->req(); i++) {
1020       Node* n = unc->in(i);
1021       if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
1022         uses.push(n);
1023       }
1024     }
1025   } else {
1026     assert(use->is_Region(), "what else?");
1027     uint idx = 1;
1028     for (; use->in(idx) != proj; idx++);
1029     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1030       Node* u = use->fast_out(i);
1031       if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
1032         uses.push(u->in(idx));
1033       }
1034     }
1035   }
1036   for(uint next = 0; next < uses.size(); next++ ) {
1037     Node *n = uses.at(next);
1038     assert(phase->get_ctrl(n) == proj, "bad control");
1039     phase->set_ctrl_and_loop(n, new_unc_ctrl);
1040     if (n->in(0) == proj) {
1041       phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
1042     }
1043     for (uint i = 0; i < n->req(); i++) {
1044       Node* m = n->in(i);
1045       if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
1046         uses.push(m);
1047       }
1048     }
1049   }
1050 
1051   phase->igvn().rehash_node_delayed(use);
1052   int nb = use->replace_edge(proj, new_unc_ctrl);
1053   assert(nb == 1, "only use expected");
1054 }
1055 
1056 void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
1057   IdealLoopTree *loop = phase->get_loop(ctrl);
1058   Node* raw_rbtrue = new CastP2XNode(ctrl, val);
1059   phase->register_new_node(raw_rbtrue, ctrl);
1060   Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
1061   phase->register_new_node(cset_offset, ctrl);
1062   Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
1063   phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root());
1064   Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset);
1065   phase->register_new_node(in_cset_fast_test_adr, ctrl);
1066   uint in_cset_fast_test_idx = Compile::AliasIdxRaw;
1067   const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument
1068   debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx));
1069   Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered);
1070   phase->register_new_node(in_cset_fast_test_load, ctrl);
1071   Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT));
1072   phase->register_new_node(in_cset_fast_test_cmp, ctrl);
1073   Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq);
1074   phase->register_new_node(in_cset_fast_test_test, ctrl);
1075   IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
1076   phase->register_control(in_cset_fast_test_iff, loop, ctrl);
1077 
1078   not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff);
1079   phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff);
1080 
1081   ctrl = new IfFalseNode(in_cset_fast_test_iff);
1082   phase->register_control(ctrl, loop, in_cset_fast_test_iff);
1083 }
1084 
1085 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, PhaseIdealLoop* phase) {
1086   IdealLoopTree*loop = phase->get_loop(ctrl);
1087   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr()->cast_to_nonconst();
1088 
1089   // The slow path stub consumes and produces raw memory in addition
1090   // to the existing memory edges
1091   Node* base = find_bottom_mem(ctrl, phase);
1092   MergeMemNode* mm = MergeMemNode::make(base);
1093   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1094   phase->register_new_node(mm, ctrl);
1095 
1096   address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
1097           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup_narrow_JRT) :
1098           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup_JRT);
1099 
1100   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), target, "shenandoah_load_reference_barrier", TypeRawPtr::BOTTOM);
1101   call->init_req(TypeFunc::Control, ctrl);
1102   call->init_req(TypeFunc::I_O, phase->C->top());
1103   call->init_req(TypeFunc::Memory, mm);
1104   call->init_req(TypeFunc::FramePtr, phase->C->top());
1105   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1106   call->init_req(TypeFunc::Parms, val);
1107   call->init_req(TypeFunc::Parms+1, load_addr);
1108   phase->register_control(call, loop, ctrl);
1109   ctrl = new ProjNode(call, TypeFunc::Control);
1110   phase->register_control(ctrl, loop, call);
1111   result_mem = new ProjNode(call, TypeFunc::Memory);
1112   phase->register_new_node(result_mem, call);
1113   val = new ProjNode(call, TypeFunc::Parms);
1114   phase->register_new_node(val, call);
1115   val = new CheckCastPPNode(ctrl, val, obj_type);
1116   phase->register_new_node(val, ctrl);
1117 }
1118 
1119 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1120   Node* ctrl = phase->get_ctrl(barrier);
1121   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1122 
1123   // Update the control of all nodes that should be after the
1124   // barrier control flow
1125   uses.clear();
1126   // Every node that is control dependent on the barrier's input
1127   // control will be after the expanded barrier. The raw memory (if
1128   // its memory is control dependent on the barrier's input control)
1129   // must stay above the barrier.
1130   uses_to_ignore.clear();
1131   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1132     uses_to_ignore.push(init_raw_mem);
1133   }
1134   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1135     Node *n = uses_to_ignore.at(next);
1136     for (uint i = 0; i < n->req(); i++) {
1137       Node* in = n->in(i);
1138       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1139         uses_to_ignore.push(in);
1140       }
1141     }
1142   }
1143   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1144     Node* u = ctrl->fast_out(i);
1145     if (u->_idx < last &&
1146         u != barrier &&
1147         !uses_to_ignore.member(u) &&
1148         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1149         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1150       Node* old_c = phase->ctrl_or_self(u);
1151       Node* c = old_c;
1152       if (c != ctrl ||
1153           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1154           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1155         phase->igvn().rehash_node_delayed(u);
1156         int nb = u->replace_edge(ctrl, region);
1157         if (u->is_CFG()) {
1158           if (phase->idom(u) == ctrl) {
1159             phase->set_idom(u, region, phase->dom_depth(region));
1160           }
1161         } else if (phase->get_ctrl(u) == ctrl) {
1162           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1163           uses.push(u);
1164         }
1165         assert(nb == 1, "more than 1 ctrl input?");
1166         --i, imax -= nb;
1167       }
1168     }
1169   }
1170 }
1171 
1172 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1173   Node* region = NULL;
1174   while (c != ctrl) {
1175     if (c->is_Region()) {
1176       region = c;
1177     }
1178     c = phase->idom(c);
1179   }
1180   assert(region != NULL, "");
1181   Node* phi = new PhiNode(region, n->bottom_type());
1182   for (uint j = 1; j < region->req(); j++) {
1183     Node* in = region->in(j);
1184     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1185       phi->init_req(j, n);
1186     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1187       phi->init_req(j, n_clone);
1188     } else {
1189       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1190     }
1191   }
1192   phase->register_new_node(phi, region);
1193   return phi;
1194 }
1195 
1196 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1197   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1198 
1199   Unique_Node_List uses;
1200   for (int i = 0; i < state->enqueue_barriers_count(); i++) {
1201     Node* barrier = state->enqueue_barrier(i);
1202     Node* ctrl = phase->get_ctrl(barrier);
1203     IdealLoopTree* loop = phase->get_loop(ctrl);
1204     if (loop->_head->is_OuterStripMinedLoop()) {
1205       // Expanding a barrier here will break loop strip mining
1206       // verification. Transform the loop so the loop nest doesn't
1207       // appear as strip mined.
1208       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1209       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1210     }
1211   }
1212 
1213   Node_Stack stack(0);
1214   Node_List clones;
1215   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1216     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1217     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1218       continue;
1219     }
1220 
1221     Node* ctrl = phase->get_ctrl(lrb);
1222     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1223 
1224     CallStaticJavaNode* unc = NULL;
1225     Node* unc_ctrl = NULL;
1226     Node* uncasted_val = val;
1227 
1228     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1229       Node* u = lrb->fast_out(i);
1230       if (u->Opcode() == Op_CastPP &&
1231           u->in(0) != NULL &&
1232           phase->is_dominator(u->in(0), ctrl)) {
1233         const Type* u_t = phase->igvn().type(u);
1234 
1235         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1236             u->in(0)->Opcode() == Op_IfTrue &&
1237             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1238             u->in(0)->in(0)->is_If() &&
1239             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1240             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1241             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1242             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1243             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1244           IdealLoopTree* loop = phase->get_loop(ctrl);
1245           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1246 
1247           if (!unc_loop->is_member(loop)) {
1248             continue;
1249           }
1250 
1251           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1252           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1253           if (branch == NodeSentinel) {
1254             continue;
1255           }
1256 
1257           phase->igvn().replace_input_of(u, 1, val);
1258           phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
1259           phase->set_ctrl(u, u->in(0));
1260           phase->set_ctrl(lrb, u->in(0));
1261           unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1262           unc_ctrl = u->in(0);
1263           val = u;
1264 
1265           for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
1266             Node* u = val->fast_out(j);
1267             if (u == lrb) continue;
1268             phase->igvn().rehash_node_delayed(u);
1269             int nb = u->replace_edge(val, lrb);
1270             --j; jmax -= nb;
1271           }
1272 
1273           RegionNode* r = new RegionNode(3);
1274           IfNode* iff = unc_ctrl->in(0)->as_If();
1275 
1276           Node* ctrl_use = unc_ctrl->unique_ctrl_out();
1277           Node* unc_ctrl_clone = unc_ctrl->clone();
1278           phase->register_control(unc_ctrl_clone, loop, iff);
1279           Node* c = unc_ctrl_clone;
1280           Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
1281           r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
1282 
1283           phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
1284           phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
1285           phase->lazy_replace(c, unc_ctrl);
1286           c = NULL;;
1287           phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
1288           phase->set_ctrl(val, unc_ctrl_clone);
1289 
1290           IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
1291           fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
1292           Node* iff_proj = iff->proj_out(0);
1293           r->init_req(2, iff_proj);
1294           phase->register_control(r, phase->ltree_root(), iff);
1295 
1296           Node* new_bol = new_iff->in(1)->clone();
1297           Node* new_cmp = new_bol->in(1)->clone();
1298           assert(new_cmp->Opcode() == Op_CmpP, "broken");
1299           assert(new_cmp->in(1) == val->in(1), "broken");
1300           new_bol->set_req(1, new_cmp);
1301           new_cmp->set_req(1, lrb);
1302           phase->register_new_node(new_bol, new_iff->in(0));
1303           phase->register_new_node(new_cmp, new_iff->in(0));
1304           phase->igvn().replace_input_of(new_iff, 1, new_bol);
1305           phase->igvn().replace_input_of(new_cast, 1, lrb);
1306 
1307           for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1308             Node* u = lrb->fast_out(i);
1309             if (u == new_cast || u == new_cmp) {
1310               continue;
1311             }
1312             phase->igvn().rehash_node_delayed(u);
1313             int nb = u->replace_edge(lrb, new_cast);
1314             assert(nb > 0, "no update?");
1315             --i; imax -= nb;
1316           }
1317 
1318           for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1319             Node* u = val->fast_out(i);
1320             if (u == lrb) {
1321               continue;
1322             }
1323             phase->igvn().rehash_node_delayed(u);
1324             int nb = u->replace_edge(val, new_cast);
1325             assert(nb > 0, "no update?");
1326             --i; imax -= nb;
1327           }
1328 
1329           ctrl = unc_ctrl_clone;
1330           phase->set_ctrl_and_loop(lrb, ctrl);
1331           break;
1332         }
1333       }
1334     }
1335     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1336       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1337       CallProjections projs;
1338       call->extract_projections(&projs, false, false);
1339 
1340       Node* lrb_clone = lrb->clone();
1341       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1342       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1343 
1344       stack.push(lrb, 0);
1345       clones.push(lrb_clone);
1346 
1347       do {
1348         assert(stack.size() == clones.size(), "");
1349         Node* n = stack.node();
1350 #ifdef ASSERT
1351         if (n->is_Load()) {
1352           Node* mem = n->in(MemNode::Memory);
1353           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1354             Node* u = mem->fast_out(j);
1355             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1356           }
1357         }
1358 #endif
1359         uint idx = stack.index();
1360         Node* n_clone = clones.at(clones.size()-1);
1361         if (idx < n->outcnt()) {
1362           Node* u = n->raw_out(idx);
1363           Node* c = phase->ctrl_or_self(u);
1364           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1365             stack.set_index(idx+1);
1366             assert(!u->is_CFG(), "");
1367             stack.push(u, 0);
1368             Node* u_clone = u->clone();
1369             int nb = u_clone->replace_edge(n, n_clone);
1370             assert(nb > 0, "should have replaced some uses");
1371             phase->register_new_node(u_clone, projs.catchall_catchproj);
1372             clones.push(u_clone);
1373             phase->set_ctrl(u, projs.fallthrough_catchproj);
1374           } else {
1375             bool replaced = false;
1376             if (u->is_Phi()) {
1377               for (uint k = 1; k < u->req(); k++) {
1378                 if (u->in(k) == n) {
1379                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1380                     phase->igvn().replace_input_of(u, k, n_clone);
1381                     replaced = true;
1382                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1383                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1384                     replaced = true;
1385                   }
1386                 }
1387               }
1388             } else {
1389               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1390                 phase->igvn().rehash_node_delayed(u);
1391                 int nb = u->replace_edge(n, n_clone);
1392                 assert(nb > 0, "should have replaced some uses");
1393                 replaced = true;
1394               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1395                 phase->igvn().rehash_node_delayed(u);
1396                 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1397                 assert(nb > 0, "should have replaced some uses");
1398                 replaced = true;
1399               }
1400             }
1401             if (!replaced) {
1402               stack.set_index(idx+1);
1403             }
1404           }
1405         } else {
1406           stack.pop();
1407           clones.pop();
1408         }
1409       } while (stack.size() > 0);
1410       assert(stack.size() == 0 && clones.size() == 0, "");
1411     }
1412   }
1413 
1414   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1415     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1416     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1417       continue;
1418     }
1419     Node* ctrl = phase->get_ctrl(lrb);
1420     IdealLoopTree* loop = phase->get_loop(ctrl);
1421     if (loop->_head->is_OuterStripMinedLoop()) {
1422       // Expanding a barrier here will break loop strip mining
1423       // verification. Transform the loop so the loop nest doesn't
1424       // appear as strip mined.
1425       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1426       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1427     }
1428   }
1429 
1430   // Expand load-reference-barriers
1431   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1432   Unique_Node_List uses_to_ignore;
1433   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1434     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1435     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1436       phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1437       continue;
1438     }
1439     uint last = phase->C->unique();
1440     Node* ctrl = phase->get_ctrl(lrb);
1441     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1442 
1443 
1444     Node* orig_ctrl = ctrl;
1445 
1446     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1447     Node* init_raw_mem = raw_mem;
1448     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1449 
1450     IdealLoopTree *loop = phase->get_loop(ctrl);
1451     CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
1452     Node* unc_ctrl = NULL;
1453     if (unc != NULL) {
1454       if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
1455         unc = NULL;
1456       } else {
1457         unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
1458       }
1459     }
1460 
1461     Node* uncasted_val = val;
1462     if (unc != NULL) {
1463       uncasted_val = val->in(1);
1464     }
1465 
1466     Node* heap_stable_ctrl = NULL;
1467     Node* null_ctrl = NULL;
1468 
1469     assert(val->bottom_type()->make_oopptr(), "need oop");
1470     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1471 
1472     enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT };
1473     Node* region = new RegionNode(PATH_LIMIT);
1474     Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
1475     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1476 
1477     // Stable path.
1478     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1479     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1480 
1481     // Heap stable case
1482     region->init_req(_heap_stable, heap_stable_ctrl);
1483     val_phi->init_req(_heap_stable, uncasted_val);
1484     raw_mem_phi->init_req(_heap_stable, raw_mem);
1485 
1486     Node* reg2_ctrl = NULL;
1487     // Null case
1488     test_null(ctrl, val, null_ctrl, phase);
1489     if (null_ctrl != NULL) {
1490       reg2_ctrl = null_ctrl->in(0);
1491       region->init_req(_null_path, null_ctrl);
1492       val_phi->init_req(_null_path, uncasted_val);
1493       raw_mem_phi->init_req(_null_path, raw_mem);
1494     } else {
1495       region->del_req(_null_path);
1496       val_phi->del_req(_null_path);
1497       raw_mem_phi->del_req(_null_path);
1498     }
1499 
1500     // Test for in-cset.
1501     // Wires !in_cset(obj) to slot 2 of region and phis
1502     Node* not_cset_ctrl = NULL;
1503     in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
1504     if (not_cset_ctrl != NULL) {
1505       if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
1506       region->init_req(_not_cset, not_cset_ctrl);
1507       val_phi->init_req(_not_cset, uncasted_val);
1508       raw_mem_phi->init_req(_not_cset, raw_mem);
1509     }
1510 
1511     // Resolve object when orig-value is in cset.
1512     // Make the unconditional resolve for fwdptr.
1513     Node* new_val = uncasted_val;
1514     if (unc_ctrl != NULL) {
1515       // Clone the null check in this branch to allow implicit null check
1516       new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
1517       fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
1518 
1519       IfNode* iff = unc_ctrl->in(0)->as_If();
1520       phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
1521     }
1522 
1523     // Call lrb-stub and wire up that path in slots 4
1524     Node* result_mem = NULL;
1525     Node* fwd = new_val;
1526     Node* addr;
1527     if (ShenandoahSelfFixing) {
1528       VectorSet visited(Thread::current()->resource_area());
1529       addr = get_load_addr(phase, visited, lrb);
1530     } else {
1531       addr = phase->igvn().zerocon(T_OBJECT);
1532     }
1533     if (addr->Opcode() == Op_AddP) {
1534       Node* base = addr->in(AddPNode::Base);
1535       base = new CheckCastPPNode(ctrl, base, base->bottom_type(), true);
1536       phase->register_new_node(base, ctrl);
1537       Node* address = addr->in(AddPNode::Address);
1538       address = new CheckCastPPNode(ctrl, address, address->bottom_type(), true);
1539       phase->register_new_node(address, ctrl);
1540 
1541       addr = addr->clone();
1542       addr->set_req(AddPNode::Base, base);
1543       addr->set_req(AddPNode::Address, address);
1544       phase->register_new_node(addr, ctrl);
1545     }
1546     call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, phase);
1547     region->init_req(_evac_path, ctrl);
1548     val_phi->init_req(_evac_path, fwd);
1549     raw_mem_phi->init_req(_evac_path, result_mem);
1550 
1551     phase->register_control(region, loop, heap_stable_iff);
1552     Node* out_val = val_phi;
1553     phase->register_new_node(val_phi, region);
1554     phase->register_new_node(raw_mem_phi, region);
1555 
1556     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1557 
1558     ctrl = orig_ctrl;
1559 
1560     if (unc != NULL) {
1561       for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1562         Node* u = val->fast_out(i);
1563         Node* c = phase->ctrl_or_self(u);
1564         if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
1565           phase->igvn().rehash_node_delayed(u);
1566           int nb = u->replace_edge(val, out_val);
1567           --i, imax -= nb;
1568         }
1569       }
1570       if (val->outcnt() == 0) {
1571         phase->igvn()._worklist.push(val);
1572       }
1573     }
1574     phase->igvn().replace_node(lrb, out_val);
1575 
1576     follow_barrier_uses(out_val, ctrl, uses, phase);
1577 
1578     for(uint next = 0; next < uses.size(); next++ ) {
1579       Node *n = uses.at(next);
1580       assert(phase->get_ctrl(n) == ctrl, "bad control");
1581       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1582       phase->set_ctrl(n, region);
1583       follow_barrier_uses(n, ctrl, uses, phase);
1584     }
1585 
1586     // The slow path call produces memory: hook the raw memory phi
1587     // from the expanded load reference barrier with the rest of the graph
1588     // which may require adding memory phis at every post dominated
1589     // region and at enclosing loop heads. Use the memory state
1590     // collected in memory_nodes to fix the memory graph. Update that
1591     // memory state as we go.
1592     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1593   }
1594   // Done expanding load-reference-barriers.
1595   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1596 
1597   for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
1598     Node* barrier = state->enqueue_barrier(i);
1599     Node* pre_val = barrier->in(1);
1600 
1601     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1602       ShouldNotReachHere();
1603       continue;
1604     }
1605 
1606     Node* ctrl = phase->get_ctrl(barrier);
1607 
1608     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1609       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1610       ctrl = ctrl->in(0)->in(0);
1611       phase->set_ctrl(barrier, ctrl);
1612     } else if (ctrl->is_CallRuntime()) {
1613       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1614       ctrl = ctrl->in(0);
1615       phase->set_ctrl(barrier, ctrl);
1616     }
1617 
1618     Node* init_ctrl = ctrl;
1619     IdealLoopTree* loop = phase->get_loop(ctrl);
1620     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1621     Node* init_raw_mem = raw_mem;
1622     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1623     Node* heap_stable_ctrl = NULL;
1624     Node* null_ctrl = NULL;
1625     uint last = phase->C->unique();
1626 
1627     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1628     Node* region = new RegionNode(PATH_LIMIT);
1629     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1630 
1631     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1632     Node* region2 = new RegionNode(PATH_LIMIT2);
1633     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1634 
1635     // Stable path.
1636     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1637     region->init_req(_heap_stable, heap_stable_ctrl);
1638     phi->init_req(_heap_stable, raw_mem);
1639 
1640     // Null path
1641     Node* reg2_ctrl = NULL;
1642     test_null(ctrl, pre_val, null_ctrl, phase);
1643     if (null_ctrl != NULL) {
1644       reg2_ctrl = null_ctrl->in(0);
1645       region2->init_req(_null_path, null_ctrl);
1646       phi2->init_req(_null_path, raw_mem);
1647     } else {
1648       region2->del_req(_null_path);
1649       phi2->del_req(_null_path);
1650     }
1651 
1652     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1653     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1654     Node* thread = new ThreadLocalNode();
1655     phase->register_new_node(thread, ctrl);
1656     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1657     phase->register_new_node(buffer_adr, ctrl);
1658     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1659     phase->register_new_node(index_adr, ctrl);
1660 
1661     BasicType index_bt = TypeX_X->basic_type();
1662     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
1663     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1664     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1665     phase->register_new_node(index, ctrl);
1666     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1667     phase->register_new_node(index_cmp, ctrl);
1668     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1669     phase->register_new_node(index_test, ctrl);
1670     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1671     if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1672     phase->register_control(queue_full_iff, loop, ctrl);
1673     Node* not_full = new IfTrueNode(queue_full_iff);
1674     phase->register_control(not_full, loop, queue_full_iff);
1675     Node* full = new IfFalseNode(queue_full_iff);
1676     phase->register_control(full, loop, queue_full_iff);
1677 
1678     ctrl = not_full;
1679 
1680     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1681     phase->register_new_node(next_index, ctrl);
1682 
1683     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1684     phase->register_new_node(buffer, ctrl);
1685     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1686     phase->register_new_node(log_addr, ctrl);
1687     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1688     phase->register_new_node(log_store, ctrl);
1689     // update the index
1690     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1691     phase->register_new_node(index_update, ctrl);
1692 
1693     // Fast-path case
1694     region2->init_req(_fast_path, ctrl);
1695     phi2->init_req(_fast_path, index_update);
1696 
1697     ctrl = full;
1698 
1699     Node* base = find_bottom_mem(ctrl, phase);
1700 
1701     MergeMemNode* mm = MergeMemNode::make(base);
1702     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1703     phase->register_new_node(mm, ctrl);
1704 
1705     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1706     call->init_req(TypeFunc::Control, ctrl);
1707     call->init_req(TypeFunc::I_O, phase->C->top());
1708     call->init_req(TypeFunc::Memory, mm);
1709     call->init_req(TypeFunc::FramePtr, phase->C->top());
1710     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1711     call->init_req(TypeFunc::Parms, pre_val);
1712     call->init_req(TypeFunc::Parms+1, thread);
1713     phase->register_control(call, loop, ctrl);
1714 
1715     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1716     phase->register_control(ctrl_proj, loop, call);
1717     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1718     phase->register_new_node(mem_proj, call);
1719 
1720     // Slow-path case
1721     region2->init_req(_slow_path, ctrl_proj);
1722     phi2->init_req(_slow_path, mem_proj);
1723 
1724     phase->register_control(region2, loop, reg2_ctrl);
1725     phase->register_new_node(phi2, region2);
1726 
1727     region->init_req(_heap_unstable, region2);
1728     phi->init_req(_heap_unstable, phi2);
1729 
1730     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1731     phase->register_new_node(phi, region);
1732 
1733     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1734     for(uint next = 0; next < uses.size(); next++ ) {
1735       Node *n = uses.at(next);
1736       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1737       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1738       phase->set_ctrl(n, region);
1739       follow_barrier_uses(n, init_ctrl, uses, phase);
1740     }
1741     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1742 
1743     phase->igvn().replace_node(barrier, pre_val);
1744   }
1745   assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1746 
1747 }
1748 
1749 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1750   if (visited.test_set(in->_idx)) {
1751     return NULL;
1752   }
1753   switch (in->Opcode()) {
1754     case Op_Proj:
1755       return get_load_addr(phase, visited, in->in(0));
1756     case Op_CastPP:
1757     case Op_CheckCastPP:
1758     case Op_DecodeN:
1759     case Op_EncodeP:
1760       return get_load_addr(phase, visited, in->in(1));
1761     case Op_LoadN:
1762     case Op_LoadP:
1763       return in->in(MemNode::Address);
1764     case Op_CompareAndExchangeN:
1765     case Op_CompareAndExchangeP:
1766     case Op_GetAndSetN:
1767     case Op_GetAndSetP:
1768     case Op_ShenandoahCompareAndExchangeP:
1769     case Op_ShenandoahCompareAndExchangeN:
1770       // Those instructions would just have stored a different
1771       // value into the field. No use to attempt to fix it at this point.
1772       return phase->igvn().zerocon(T_OBJECT);
1773     case Op_Phi: {
1774       Node* addr = NULL;
1775       for (uint i = 1; i < in->req(); i++) {
1776         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1777         if (addr == NULL) {
1778           addr = addr1;
1779         }
1780         if (addr != addr1) {
1781           return phase->igvn().zerocon(T_OBJECT);
1782         }
1783       }
1784       return addr;
1785     }
1786     case Op_ShenandoahLoadReferenceBarrier:
1787       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1788     case Op_ShenandoahEnqueueBarrier:
1789       return get_load_addr(phase, visited, in->in(1));
1790     case Op_CallDynamicJava:
1791     case Op_CallLeaf:
1792     case Op_CallStaticJava:
1793     case Op_ConN:
1794     case Op_ConP:
1795     case Op_Parm:
1796       return phase->igvn().zerocon(T_OBJECT);
1797     default:
1798 #ifdef ASSERT
1799       in->dump();
1800       ShouldNotReachHere();
1801 #endif
1802       return phase->igvn().zerocon(T_OBJECT);
1803   }
1804 
1805 }
1806 
1807 void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1808   IdealLoopTree *loop = phase->get_loop(iff);
1809   Node* loop_head = loop->_head;
1810   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1811 
1812   Node* bol = iff->in(1);
1813   Node* cmp = bol->in(1);
1814   Node* andi = cmp->in(1);
1815   Node* load = andi->in(1);
1816 
1817   assert(is_gc_state_load(load), "broken");
1818   if (!phase->is_dominator(load->in(0), entry_c)) {
1819     Node* mem_ctrl = NULL;
1820     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1821     load = load->clone();
1822     load->set_req(MemNode::Memory, mem);
1823     load->set_req(0, entry_c);
1824     phase->register_new_node(load, entry_c);
1825     andi = andi->clone();
1826     andi->set_req(1, load);
1827     phase->register_new_node(andi, entry_c);
1828     cmp = cmp->clone();
1829     cmp->set_req(1, andi);
1830     phase->register_new_node(cmp, entry_c);
1831     bol = bol->clone();
1832     bol->set_req(1, cmp);
1833     phase->register_new_node(bol, entry_c);
1834 
1835     Node* old_bol =iff->in(1);
1836     phase->igvn().replace_input_of(iff, 1, bol);
1837   }
1838 }
1839 
1840 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1841   if (!n->is_If() || n->is_CountedLoopEnd()) {
1842     return false;
1843   }
1844   Node* region = n->in(0);
1845 
1846   if (!region->is_Region()) {
1847     return false;
1848   }
1849   Node* dom = phase->idom(region);
1850   if (!dom->is_If()) {
1851     return false;
1852   }
1853 
1854   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1855     return false;
1856   }
1857 
1858   IfNode* dom_if = dom->as_If();
1859   Node* proj_true = dom_if->proj_out(1);
1860   Node* proj_false = dom_if->proj_out(0);
1861 
1862   for (uint i = 1; i < region->req(); i++) {
1863     if (phase->is_dominator(proj_true, region->in(i))) {
1864       continue;
1865     }
1866     if (phase->is_dominator(proj_false, region->in(i))) {
1867       continue;
1868     }
1869     return false;
1870   }
1871 
1872   return true;
1873 }
1874 
1875 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1876   assert(is_heap_stable_test(n), "no other tests");
1877   if (identical_backtoback_ifs(n, phase)) {
1878     Node* n_ctrl = n->in(0);
1879     if (phase->can_split_if(n_ctrl)) {
1880       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1881       if (is_heap_stable_test(n)) {
1882         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1883         assert(is_gc_state_load(gc_state_load), "broken");
1884         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1885         assert(is_gc_state_load(dom_gc_state_load), "broken");
1886         if (gc_state_load != dom_gc_state_load) {
1887           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1888         }
1889       }
1890       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1891       Node* proj_true = dom_if->proj_out(1);
1892       Node* proj_false = dom_if->proj_out(0);
1893       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1894       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1895 
1896       for (uint i = 1; i < n_ctrl->req(); i++) {
1897         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1898           bolphi->init_req(i, con_true);
1899         } else {
1900           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1901           bolphi->init_req(i, con_false);
1902         }
1903       }
1904       phase->register_new_node(bolphi, n_ctrl);
1905       phase->igvn().replace_input_of(n, 1, bolphi);
1906       phase->do_split_if(n);
1907     }
1908   }
1909 }
1910 
1911 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1912   // Find first invariant test that doesn't exit the loop
1913   LoopNode *head = loop->_head->as_Loop();
1914   IfNode* unswitch_iff = NULL;
1915   Node* n = head->in(LoopNode::LoopBackControl);
1916   int loop_has_sfpts = -1;
1917   while (n != head) {
1918     Node* n_dom = phase->idom(n);
1919     if (n->is_Region()) {
1920       if (n_dom->is_If()) {
1921         IfNode* iff = n_dom->as_If();
1922         if (iff->in(1)->is_Bool()) {
1923           BoolNode* bol = iff->in(1)->as_Bool();
1924           if (bol->in(1)->is_Cmp()) {
1925             // If condition is invariant and not a loop exit,
1926             // then found reason to unswitch.
1927             if (is_heap_stable_test(iff) &&
1928                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1929               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1930               if (loop_has_sfpts == -1) {
1931                 for(uint i = 0; i < loop->_body.size(); i++) {
1932                   Node *m = loop->_body[i];
1933                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1934                     loop_has_sfpts = 1;
1935                     break;
1936                   }
1937                 }
1938                 if (loop_has_sfpts == -1) {
1939                   loop_has_sfpts = 0;
1940                 }
1941               }
1942               if (!loop_has_sfpts) {
1943                 unswitch_iff = iff;
1944               }
1945             }
1946           }
1947         }
1948       }
1949     }
1950     n = n_dom;
1951   }
1952   return unswitch_iff;
1953 }
1954 
1955 
1956 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1957   Node_List heap_stable_tests;
1958   Node_List gc_state_loads;
1959   stack.push(phase->C->start(), 0);
1960   do {
1961     Node* n = stack.node();
1962     uint i = stack.index();
1963 
1964     if (i < n->outcnt()) {
1965       Node* u = n->raw_out(i);
1966       stack.set_index(i+1);
1967       if (!visited.test_set(u->_idx)) {
1968         stack.push(u, 0);
1969       }
1970     } else {
1971       stack.pop();
1972       if (ShenandoahCommonGCStateLoads && is_gc_state_load(n)) {
1973         gc_state_loads.push(n);
1974       }
1975       if (n->is_If() && is_heap_stable_test(n)) {
1976         heap_stable_tests.push(n);
1977       }
1978     }
1979   } while (stack.size() > 0);
1980 
1981   bool progress;
1982   do {
1983     progress = false;
1984     for (uint i = 0; i < gc_state_loads.size(); i++) {
1985       Node* n = gc_state_loads.at(i);
1986       if (n->outcnt() != 0) {
1987         progress |= try_common_gc_state_load(n, phase);
1988       }
1989     }
1990   } while (progress);
1991 
1992   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1993     Node* n = heap_stable_tests.at(i);
1994     assert(is_heap_stable_test(n), "only evacuation test");
1995     merge_back_to_back_tests(n, phase);
1996   }
1997 
1998   if (!phase->C->major_progress()) {
1999     VectorSet seen(Thread::current()->resource_area());
2000     for (uint i = 0; i < heap_stable_tests.size(); i++) {
2001       Node* n = heap_stable_tests.at(i);
2002       IdealLoopTree* loop = phase->get_loop(n);
2003       if (loop != phase->ltree_root() &&
2004           loop->_child == NULL &&
2005           !loop->_irreducible) {
2006         LoopNode* head = loop->_head->as_Loop();
2007         if ((!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
2008             !seen.test_set(head->_idx)) {
2009           IfNode* iff = find_unswitching_candidate(loop, phase);
2010           if (iff != NULL) {
2011             Node* bol = iff->in(1);
2012             if (head->is_strip_mined()) {
2013               head->verify_strip_mined(0);
2014             }
2015             move_heap_stable_test_out_of_loop(iff, phase);
2016 
2017             AutoNodeBudget node_budget(phase);
2018 
2019             if (loop->policy_unswitching(phase)) {
2020               if (head->is_strip_mined()) {
2021                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
2022                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
2023               }
2024               phase->do_unswitching(loop, old_new);
2025             } else {
2026               // Not proceeding with unswitching. Move load back in
2027               // the loop.
2028               phase->igvn().replace_input_of(iff, 1, bol);
2029             }
2030           }
2031         }
2032       }
2033     }
2034   }
2035 }
2036 
2037 #ifdef ASSERT
2038 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
2039   const bool trace = false;
2040   ResourceMark rm;
2041   Unique_Node_List nodes;
2042   Unique_Node_List controls;
2043   Unique_Node_List memories;
2044 
2045   nodes.push(root);
2046   for (uint next = 0; next < nodes.size(); next++) {
2047     Node *n  = nodes.at(next);
2048     if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
2049       controls.push(n);
2050       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
2051       for (uint next2 = 0; next2 < controls.size(); next2++) {
2052         Node *m = controls.at(next2);
2053         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2054           Node* u = m->fast_out(i);
2055           if (u->is_CFG() && !u->is_Root() &&
2056               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
2057               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
2058             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
2059             controls.push(u);
2060           }
2061         }
2062       }
2063       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
2064       for (uint next2 = 0; next2 < memories.size(); next2++) {
2065         Node *m = memories.at(next2);
2066         assert(m->bottom_type() == Type::MEMORY, "");
2067         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2068           Node* u = m->fast_out(i);
2069           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
2070             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2071             memories.push(u);
2072           } else if (u->is_LoadStore()) {
2073             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
2074             memories.push(u->find_out_with(Op_SCMemProj));
2075           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
2076             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2077             memories.push(u);
2078           } else if (u->is_Phi()) {
2079             assert(u->bottom_type() == Type::MEMORY, "");
2080             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
2081               assert(controls.member(u->in(0)), "");
2082               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2083               memories.push(u);
2084             }
2085           } else if (u->is_SafePoint() || u->is_MemBar()) {
2086             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2087               Node* uu = u->fast_out(j);
2088               if (uu->bottom_type() == Type::MEMORY) {
2089                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
2090                 memories.push(uu);
2091               }
2092             }
2093           }
2094         }
2095       }
2096       for (uint next2 = 0; next2 < controls.size(); next2++) {
2097         Node *m = controls.at(next2);
2098         if (m->is_Region()) {
2099           bool all_in = true;
2100           for (uint i = 1; i < m->req(); i++) {
2101             if (!controls.member(m->in(i))) {
2102               all_in = false;
2103               break;
2104             }
2105           }
2106           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
2107           bool found_phi = false;
2108           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
2109             Node* u = m->fast_out(j);
2110             if (u->is_Phi() && memories.member(u)) {
2111               found_phi = true;
2112               for (uint i = 1; i < u->req() && found_phi; i++) {
2113                 Node* k = u->in(i);
2114                 if (memories.member(k) != controls.member(m->in(i))) {
2115                   found_phi = false;
2116                 }
2117               }
2118             }
2119           }
2120           assert(found_phi || all_in, "");
2121         }
2122       }
2123       controls.clear();
2124       memories.clear();
2125     }
2126     for( uint i = 0; i < n->len(); ++i ) {
2127       Node *m = n->in(i);
2128       if (m != NULL) {
2129         nodes.push(m);
2130       }
2131     }
2132   }
2133 }
2134 #endif
2135 
2136 ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
2137   ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
2138 }
2139 
2140 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
2141   if (in(1) == NULL || in(1)->is_top()) {
2142     return Type::TOP;
2143   }
2144   const Type* t = in(1)->bottom_type();
2145   if (t == TypePtr::NULL_PTR) {
2146     return t;
2147   }
2148   return t->is_oopptr()->cast_to_nonconst();
2149 }
2150 
2151 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
2152   if (in(1) == NULL) {
2153     return Type::TOP;
2154   }
2155   const Type* t = phase->type(in(1));
2156   if (t == Type::TOP) {
2157     return Type::TOP;
2158   }
2159   if (t == TypePtr::NULL_PTR) {
2160     return t;
2161   }
2162   return t->is_oopptr()->cast_to_nonconst();
2163 }
2164 
2165 int ShenandoahEnqueueBarrierNode::needed(Node* n) {
2166   if (n == NULL ||
2167       n->is_Allocate() ||
2168       n->Opcode() == Op_ShenandoahEnqueueBarrier ||
2169       n->bottom_type() == TypePtr::NULL_PTR ||
2170       (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2171     return NotNeeded;
2172   }
2173   if (n->is_Phi() ||
2174       n->is_CMove()) {
2175     return MaybeNeeded;
2176   }
2177   return Needed;
2178 }
2179 
2180 Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
2181   for (;;) {
2182     if (n == NULL) {
2183       return n;
2184     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2185       return n;
2186     } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2187       return n;
2188     } else if (n->is_ConstraintCast() ||
2189                n->Opcode() == Op_DecodeN ||
2190                n->Opcode() == Op_EncodeP) {
2191       n = n->in(1);
2192     } else if (n->is_Proj()) {
2193       n = n->in(0);
2194     } else {
2195       return n;
2196     }
2197   }
2198   ShouldNotReachHere();
2199   return NULL;
2200 }
2201 
2202 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
2203   PhaseIterGVN* igvn = phase->is_IterGVN();
2204 
2205   Node* n = next(in(1));
2206 
2207   int cont = needed(n);
2208 
2209   if (cont == NotNeeded) {
2210     return in(1);
2211   } else if (cont == MaybeNeeded) {
2212     if (igvn == NULL) {
2213       phase->record_for_igvn(this);
2214       return this;
2215     } else {
2216       ResourceMark rm;
2217       Unique_Node_List wq;
2218       uint wq_i = 0;
2219 
2220       for (;;) {
2221         if (n->is_Phi()) {
2222           for (uint i = 1; i < n->req(); i++) {
2223             Node* m = n->in(i);
2224             if (m != NULL) {
2225               wq.push(m);
2226             }
2227           }
2228         } else {
2229           assert(n->is_CMove(), "nothing else here");
2230           Node* m = n->in(CMoveNode::IfFalse);
2231           wq.push(m);
2232           m = n->in(CMoveNode::IfTrue);
2233           wq.push(m);
2234         }
2235         Node* orig_n = NULL;
2236         do {
2237           if (wq_i >= wq.size()) {
2238             return in(1);
2239           }
2240           n = wq.at(wq_i);
2241           wq_i++;
2242           orig_n = n;
2243           n = next(n);
2244           cont = needed(n);
2245           if (cont == Needed) {
2246             return this;
2247           }
2248         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2249       }
2250     }
2251   }
2252 
2253   return this;
2254 }
2255 
2256 #ifdef ASSERT
2257 static bool has_never_branch(Node* root) {
2258   for (uint i = 1; i < root->req(); i++) {
2259     Node* in = root->in(i);
2260     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2261       return true;
2262     }
2263   }
2264   return false;
2265 }
2266 #endif
2267 
2268 void MemoryGraphFixer::collect_memory_nodes() {
2269   Node_Stack stack(0);
2270   VectorSet visited(Thread::current()->resource_area());
2271   Node_List regions;
2272 
2273   // Walk the raw memory graph and create a mapping from CFG node to
2274   // memory node. Exclude phis for now.
2275   stack.push(_phase->C->root(), 1);
2276   do {
2277     Node* n = stack.node();
2278     int opc = n->Opcode();
2279     uint i = stack.index();
2280     if (i < n->req()) {
2281       Node* mem = NULL;
2282       if (opc == Op_Root) {
2283         Node* in = n->in(i);
2284         int in_opc = in->Opcode();
2285         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2286           mem = in->in(TypeFunc::Memory);
2287         } else if (in_opc == Op_Halt) {
2288           if (!in->in(0)->is_Region()) {
2289             Node* proj = in->in(0);
2290             assert(proj->is_Proj(), "");
2291             Node* in = proj->in(0);
2292             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2293             if (in->is_CallStaticJava()) {
2294               mem = in->in(TypeFunc::Memory);
2295             } else if (in->Opcode() == Op_Catch) {
2296               Node* call = in->in(0)->in(0);
2297               assert(call->is_Call(), "");
2298               mem = call->in(TypeFunc::Memory);
2299             } else if (in->Opcode() == Op_NeverBranch) {
2300               ResourceMark rm;
2301               Unique_Node_List wq;
2302               wq.push(in);
2303               wq.push(in->as_Multi()->proj_out(0));
2304               for (uint j = 1; j < wq.size(); j++) {
2305                 Node* c = wq.at(j);
2306                 assert(!c->is_Root(), "shouldn't leave loop");
2307                 if (c->is_SafePoint()) {
2308                   assert(mem == NULL, "only one safepoint");
2309                   mem = c->in(TypeFunc::Memory);
2310                 }
2311                 for (DUIterator_Fast kmax, k = c->fast_outs(kmax); k < kmax; k++) {
2312                   Node* u = c->fast_out(k);
2313                   if (u->is_CFG()) {
2314                     wq.push(u);
2315                   }
2316                 }
2317               }
2318               assert(mem != NULL, "should have found safepoint");
2319             }
2320           }
2321         } else {
2322 #ifdef ASSERT
2323           n->dump();
2324           in->dump();
2325 #endif
2326           ShouldNotReachHere();
2327         }
2328       } else {
2329         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2330         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2331         mem = n->in(i);
2332       }
2333       i++;
2334       stack.set_index(i);
2335       if (mem == NULL) {
2336         continue;
2337       }
2338       for (;;) {
2339         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2340           break;
2341         }
2342         if (mem->is_Phi()) {
2343           stack.push(mem, 2);
2344           mem = mem->in(1);
2345         } else if (mem->is_Proj()) {
2346           stack.push(mem, mem->req());
2347           mem = mem->in(0);
2348         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2349           mem = mem->in(TypeFunc::Memory);
2350         } else if (mem->is_MergeMem()) {
2351           MergeMemNode* mm = mem->as_MergeMem();
2352           mem = mm->memory_at(_alias);
2353         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2354           assert(_alias == Compile::AliasIdxRaw, "");
2355           stack.push(mem, mem->req());
2356           mem = mem->in(MemNode::Memory);
2357         } else {
2358 #ifdef ASSERT
2359           mem->dump();
2360 #endif
2361           ShouldNotReachHere();
2362         }
2363       }
2364     } else {
2365       if (n->is_Phi()) {
2366         // Nothing
2367       } else if (!n->is_Root()) {
2368         Node* c = get_ctrl(n);
2369         _memory_nodes.map(c->_idx, n);
2370       }
2371       stack.pop();
2372     }
2373   } while(stack.is_nonempty());
2374 
2375   // Iterate over CFG nodes in rpo and propagate memory state to
2376   // compute memory state at regions, creating new phis if needed.
2377   Node_List rpo_list;
2378   visited.Clear();
2379   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2380   Node* root = rpo_list.pop();
2381   assert(root == _phase->C->root(), "");
2382 
2383   const bool trace = false;
2384 #ifdef ASSERT
2385   if (trace) {
2386     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2387       Node* c = rpo_list.at(i);
2388       if (_memory_nodes[c->_idx] != NULL) {
2389         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2390       }
2391     }
2392   }
2393 #endif
2394   uint last = _phase->C->unique();
2395 
2396 #ifdef ASSERT
2397   uint8_t max_depth = 0;
2398   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2399     IdealLoopTree* lpt = iter.current();
2400     max_depth = MAX2(max_depth, lpt->_nest);
2401   }
2402 #endif
2403 
2404   bool progress = true;
2405   int iteration = 0;
2406   Node_List dead_phis;
2407   while (progress) {
2408     progress = false;
2409     iteration++;
2410     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2411     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2412     IdealLoopTree* last_updated_ilt = NULL;
2413     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2414       Node* c = rpo_list.at(i);
2415 
2416       Node* prev_mem = _memory_nodes[c->_idx];
2417       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2418         Node* prev_region = regions[c->_idx];
2419         Node* unique = NULL;
2420         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2421           Node* m = _memory_nodes[c->in(j)->_idx];
2422           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2423           if (m != NULL) {
2424             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2425               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), "");
2426               // continue
2427             } else if (unique == NULL) {
2428               unique = m;
2429             } else if (m == unique) {
2430               // continue
2431             } else {
2432               unique = NodeSentinel;
2433             }
2434           }
2435         }
2436         assert(unique != NULL, "empty phi???");
2437         if (unique != NodeSentinel) {
2438           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2439             dead_phis.push(prev_region);
2440           }
2441           regions.map(c->_idx, unique);
2442         } else {
2443           Node* phi = NULL;
2444           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2445             phi = prev_region;
2446             for (uint k = 1; k < c->req(); k++) {
2447               Node* m = _memory_nodes[c->in(k)->_idx];
2448               assert(m != NULL, "expect memory state");
2449               phi->set_req(k, m);
2450             }
2451           } else {
2452             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2453               Node* u = c->fast_out(j);
2454               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2455                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2456                 phi = u;
2457                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2458                   Node* m = _memory_nodes[c->in(k)->_idx];
2459                   assert(m != NULL, "expect memory state");
2460                   if (u->in(k) != m) {
2461                     phi = NULL;
2462                   }
2463                 }
2464               }
2465             }
2466             if (phi == NULL) {
2467               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2468               for (uint k = 1; k < c->req(); k++) {
2469                 Node* m = _memory_nodes[c->in(k)->_idx];
2470                 assert(m != NULL, "expect memory state");
2471                 phi->init_req(k, m);
2472               }
2473             }
2474           }
2475           assert(phi != NULL, "");
2476           regions.map(c->_idx, phi);
2477         }
2478         Node* current_region = regions[c->_idx];
2479         if (current_region != prev_region) {
2480           progress = true;
2481           if (prev_region == prev_mem) {
2482             _memory_nodes.map(c->_idx, current_region);
2483           }
2484         }
2485       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2486         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2487         assert(m != NULL, "expect memory state");
2488         if (m != prev_mem) {
2489           _memory_nodes.map(c->_idx, m);
2490           progress = true;
2491         }
2492       }
2493 #ifdef ASSERT
2494       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2495 #endif
2496     }
2497   }
2498 
2499   // Replace existing phi with computed memory state for that region
2500   // if different (could be a new phi or a dominating memory node if
2501   // that phi was found to be useless).
2502   while (dead_phis.size() > 0) {
2503     Node* n = dead_phis.pop();
2504     n->replace_by(_phase->C->top());
2505     n->destruct();
2506   }
2507   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2508     Node* c = rpo_list.at(i);
2509     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2510       Node* n = regions[c->_idx];
2511       if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2512         _phase->register_new_node(n, c);
2513       }
2514     }
2515   }
2516   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2517     Node* c = rpo_list.at(i);
2518     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2519       Node* n = regions[c->_idx];
2520       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2521         Node* u = c->fast_out(i);
2522         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2523             u != n) {
2524           if (u->adr_type() == TypePtr::BOTTOM) {
2525             fix_memory_uses(u, n, n, c);
2526           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2527             _phase->lazy_replace(u, n);
2528             --i; --imax;
2529           }
2530         }
2531       }
2532     }
2533   }
2534 }
2535 
2536 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2537   Node* c = _phase->get_ctrl(n);
2538   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2539     assert(c == n->in(0), "");
2540     CallNode* call = c->as_Call();
2541     CallProjections projs;
2542     call->extract_projections(&projs, true, false);
2543     if (projs.catchall_memproj != NULL) {
2544       if (projs.fallthrough_memproj == n) {
2545         c = projs.fallthrough_catchproj;
2546       } else {
2547         assert(projs.catchall_memproj == n, "");
2548         c = projs.catchall_catchproj;
2549       }
2550     }
2551   }
2552   return c;
2553 }
2554 
2555 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2556   if (_phase->has_ctrl(n))
2557     return get_ctrl(n);
2558   else {
2559     assert (n->is_CFG(), "must be a CFG node");
2560     return n;
2561   }
2562 }
2563 
2564 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2565   return m != NULL && get_ctrl(m) == c;
2566 }
2567 
2568 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2569   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2570   Node* mem = _memory_nodes[ctrl->_idx];
2571   Node* c = ctrl;
2572   while (!mem_is_valid(mem, c) &&
2573          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2574     c = _phase->idom(c);
2575     mem = _memory_nodes[c->_idx];
2576   }
2577   if (n != NULL && mem_is_valid(mem, c)) {
2578     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2579       mem = next_mem(mem, _alias);
2580     }
2581     if (mem->is_MergeMem()) {
2582       mem = mem->as_MergeMem()->memory_at(_alias);
2583     }
2584     if (!mem_is_valid(mem, c)) {
2585       do {
2586         c = _phase->idom(c);
2587         mem = _memory_nodes[c->_idx];
2588       } while (!mem_is_valid(mem, c) &&
2589                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2590     }
2591   }
2592   assert(mem->bottom_type() == Type::MEMORY, "");
2593   return mem;
2594 }
2595 
2596 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2597   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2598     Node* use = region->fast_out(i);
2599     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2600         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2601       return true;
2602     }
2603   }
2604   return false;
2605 }
2606 
2607 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2608   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2609   const bool trace = false;
2610   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2611   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2612   GrowableArray<Node*> phis;
2613   if (mem_for_ctrl != mem) {
2614     Node* old = mem_for_ctrl;
2615     Node* prev = NULL;
2616     while (old != mem) {
2617       prev = old;
2618       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2619         assert(_alias == Compile::AliasIdxRaw, "");
2620         old = old->in(MemNode::Memory);
2621       } else if (old->Opcode() == Op_SCMemProj) {
2622         assert(_alias == Compile::AliasIdxRaw, "");
2623         old = old->in(0);
2624       } else {
2625         ShouldNotReachHere();
2626       }
2627     }
2628     assert(prev != NULL, "");
2629     if (new_ctrl != ctrl) {
2630       _memory_nodes.map(ctrl->_idx, mem);
2631       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2632     }
2633     uint input = (uint)MemNode::Memory;
2634     _phase->igvn().replace_input_of(prev, input, new_mem);
2635   } else {
2636     uses.clear();
2637     _memory_nodes.map(new_ctrl->_idx, new_mem);
2638     uses.push(new_ctrl);
2639     for(uint next = 0; next < uses.size(); next++ ) {
2640       Node *n = uses.at(next);
2641       assert(n->is_CFG(), "");
2642       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2643       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2644         Node* u = n->fast_out(i);
2645         if (!u->is_Root() && u->is_CFG() && u != n) {
2646           Node* m = _memory_nodes[u->_idx];
2647           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2648               !has_mem_phi(u) &&
2649               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2650             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2651             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2652 
2653             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2654               bool push = true;
2655               bool create_phi = true;
2656               if (_phase->is_dominator(new_ctrl, u)) {
2657                 create_phi = false;
2658               } else if (!_phase->C->has_irreducible_loop()) {
2659                 IdealLoopTree* loop = _phase->get_loop(ctrl);
2660                 bool do_check = true;
2661                 IdealLoopTree* l = loop;
2662                 create_phi = false;
2663                 while (l != _phase->ltree_root()) {
2664                   Node* head = l->_head;
2665                   if (head->in(0) == NULL) {
2666                     head = _phase->get_ctrl(head);
2667                   }
2668                   if (_phase->is_dominator(head, u) && _phase->is_dominator(_phase->idom(u), head)) {
2669                     create_phi = true;
2670                     do_check = false;
2671                     break;
2672                   }
2673                   l = l->_parent;
2674                 }
2675 
2676                 if (do_check) {
2677                   assert(!create_phi, "");
2678                   IdealLoopTree* u_loop = _phase->get_loop(u);
2679                   if (u_loop != _phase->ltree_root() && u_loop->is_member(loop)) {
2680                     Node* c = ctrl;
2681                     while (!_phase->is_dominator(c, u_loop->tail())) {
2682                       c = _phase->idom(c);
2683                     }
2684                     if (!_phase->is_dominator(c, u)) {
2685                       do_check = false;
2686                     }
2687                   }
2688                 }
2689 
2690                 if (do_check && _phase->is_dominator(_phase->idom(u), new_ctrl)) {
2691                   create_phi = true;
2692                 }
2693               }
2694               if (create_phi) {
2695                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2696                 _phase->register_new_node(phi, u);
2697                 phis.push(phi);
2698                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2699                 if (!mem_is_valid(m, u)) {
2700                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2701                   _memory_nodes.map(u->_idx, phi);
2702                 } else {
2703                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2704                   for (;;) {
2705                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2706                     Node* next = NULL;
2707                     if (m->is_Proj()) {
2708                       next = m->in(0);
2709                     } else {
2710                       assert(m->is_Mem() || m->is_LoadStore(), "");
2711                       assert(_alias == Compile::AliasIdxRaw, "");
2712                       next = m->in(MemNode::Memory);
2713                     }
2714                     if (_phase->get_ctrl(next) != u) {
2715                       break;
2716                     }
2717                     if (next->is_MergeMem()) {
2718                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2719                       break;
2720                     }
2721                     if (next->is_Phi()) {
2722                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2723                       break;
2724                     }
2725                     m = next;
2726                   }
2727 
2728                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2729                   assert(m->is_Mem() || m->is_LoadStore(), "");
2730                   uint input = (uint)MemNode::Memory;
2731                   _phase->igvn().replace_input_of(m, input, phi);
2732                   push = false;
2733                 }
2734               } else {
2735                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2736               }
2737               if (push) {
2738                 uses.push(u);
2739               }
2740             }
2741           } else if (!mem_is_valid(m, u) &&
2742                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2743             uses.push(u);
2744           }
2745         }
2746       }
2747     }
2748     for (int i = 0; i < phis.length(); i++) {
2749       Node* n = phis.at(i);
2750       Node* r = n->in(0);
2751       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2752       for (uint j = 1; j < n->req(); j++) {
2753         Node* m = find_mem(r->in(j), NULL);
2754         _phase->igvn().replace_input_of(n, j, m);
2755         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2756       }
2757     }
2758   }
2759   uint last = _phase->C->unique();
2760   MergeMemNode* mm = NULL;
2761   int alias = _alias;
2762   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2763   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2764     Node* u = mem->out(i);
2765     if (u->_idx < last) {
2766       if (u->is_Mem()) {
2767         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2768           Node* m = find_mem(_phase->get_ctrl(u), u);
2769           if (m != mem) {
2770             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2771             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2772             --i;
2773           }
2774         }
2775       } else if (u->is_MergeMem()) {
2776         MergeMemNode* u_mm = u->as_MergeMem();
2777         if (u_mm->memory_at(alias) == mem) {
2778           MergeMemNode* newmm = NULL;
2779           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2780             Node* uu = u->fast_out(j);
2781             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2782             if (uu->is_Phi()) {
2783               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2784               Node* region = uu->in(0);
2785               int nb = 0;
2786               for (uint k = 1; k < uu->req(); k++) {
2787                 if (uu->in(k) == u) {
2788                   Node* m = find_mem(region->in(k), NULL);
2789                   if (m != mem) {
2790                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2791                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2792                     if (newmm != u) {
2793                       _phase->igvn().replace_input_of(uu, k, newmm);
2794                       nb++;
2795                       --jmax;
2796                     }
2797                   }
2798                 }
2799               }
2800               if (nb > 0) {
2801                 --j;
2802               }
2803             } else {
2804               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2805               if (m != mem) {
2806                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2807                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2808                 if (newmm != u) {
2809                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2810                   --j, --jmax;
2811                 }
2812               }
2813             }
2814           }
2815         }
2816       } else if (u->is_Phi()) {
2817         assert(u->bottom_type() == Type::MEMORY, "what else?");
2818         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2819           Node* region = u->in(0);
2820           bool replaced = false;
2821           for (uint j = 1; j < u->req(); j++) {
2822             if (u->in(j) == mem) {
2823               Node* m = find_mem(region->in(j), NULL);
2824               Node* nnew = m;
2825               if (m != mem) {
2826                 if (u->adr_type() == TypePtr::BOTTOM) {
2827                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2828                   nnew = mm;
2829                 }
2830                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2831                 _phase->igvn().replace_input_of(u, j, nnew);
2832                 replaced = true;
2833               }
2834             }
2835           }
2836           if (replaced) {
2837             --i;
2838           }
2839         }
2840       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2841                  u->adr_type() == NULL) {
2842         assert(u->adr_type() != NULL ||
2843                u->Opcode() == Op_Rethrow ||
2844                u->Opcode() == Op_Return ||
2845                u->Opcode() == Op_SafePoint ||
2846                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2847                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2848                u->Opcode() == Op_CallLeaf, "");
2849         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2850         if (m != mem) {
2851           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2852           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2853           --i;
2854         }
2855       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2856         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2857         if (m != mem) {
2858           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2859           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2860           --i;
2861         }
2862       } else if (u->adr_type() != TypePtr::BOTTOM &&
2863                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2864         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2865         assert(m != mem, "");
2866         // u is on the wrong slice...
2867         assert(u->is_ClearArray(), "");
2868         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2869         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2870         --i;
2871       }
2872     }
2873   }
2874 #ifdef ASSERT
2875   assert(new_mem->outcnt() > 0, "");
2876   for (int i = 0; i < phis.length(); i++) {
2877     Node* n = phis.at(i);
2878     assert(n->outcnt() > 0, "new phi must have uses now");
2879   }
2880 #endif
2881 }
2882 
2883 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2884   MergeMemNode* mm = MergeMemNode::make(mem);
2885   mm->set_memory_at(_alias, rep_proj);
2886   _phase->register_new_node(mm, rep_ctrl);
2887   return mm;
2888 }
2889 
2890 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2891   MergeMemNode* newmm = NULL;
2892   MergeMemNode* u_mm = u->as_MergeMem();
2893   Node* c = _phase->get_ctrl(u);
2894   if (_phase->is_dominator(c, rep_ctrl)) {
2895     c = rep_ctrl;
2896   } else {
2897     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2898   }
2899   if (u->outcnt() == 1) {
2900     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2901       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2902       --i;
2903     } else {
2904       _phase->igvn().rehash_node_delayed(u);
2905       u_mm->set_memory_at(_alias, rep_proj);
2906     }
2907     newmm = u_mm;
2908     _phase->set_ctrl_and_loop(u, c);
2909   } else {
2910     // can't simply clone u and then change one of its input because
2911     // it adds and then removes an edge which messes with the
2912     // DUIterator
2913     newmm = MergeMemNode::make(u_mm->base_memory());
2914     for (uint j = 0; j < u->req(); j++) {
2915       if (j < newmm->req()) {
2916         if (j == (uint)_alias) {
2917           newmm->set_req(j, rep_proj);
2918         } else if (newmm->in(j) != u->in(j)) {
2919           newmm->set_req(j, u->in(j));
2920         }
2921       } else if (j == (uint)_alias) {
2922         newmm->add_req(rep_proj);
2923       } else {
2924         newmm->add_req(u->in(j));
2925       }
2926     }
2927     if ((uint)_alias >= u->req()) {
2928       newmm->set_memory_at(_alias, rep_proj);
2929     }
2930     _phase->register_new_node(newmm, c);
2931   }
2932   return newmm;
2933 }
2934 
2935 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2936   if (phi->adr_type() == TypePtr::BOTTOM) {
2937     Node* region = phi->in(0);
2938     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2939       Node* uu = region->fast_out(j);
2940       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2941         return false;
2942       }
2943     }
2944     return true;
2945   }
2946   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2947 }
2948 
2949 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2950   uint last = _phase-> C->unique();
2951   MergeMemNode* mm = NULL;
2952   assert(mem->bottom_type() == Type::MEMORY, "");
2953   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2954     Node* u = mem->out(i);
2955     if (u != replacement && u->_idx < last) {
2956       if (u->is_MergeMem()) {
2957         MergeMemNode* u_mm = u->as_MergeMem();
2958         if (u_mm->memory_at(_alias) == mem) {
2959           MergeMemNode* newmm = NULL;
2960           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2961             Node* uu = u->fast_out(j);
2962             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2963             if (uu->is_Phi()) {
2964               if (should_process_phi(uu)) {
2965                 Node* region = uu->in(0);
2966                 int nb = 0;
2967                 for (uint k = 1; k < uu->req(); k++) {
2968                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2969                     if (newmm == NULL) {
2970                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2971                     }
2972                     if (newmm != u) {
2973                       _phase->igvn().replace_input_of(uu, k, newmm);
2974                       nb++;
2975                       --jmax;
2976                     }
2977                   }
2978                 }
2979                 if (nb > 0) {
2980                   --j;
2981                 }
2982               }
2983             } else {
2984               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2985                 if (newmm == NULL) {
2986                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2987                 }
2988                 if (newmm != u) {
2989                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2990                   --j, --jmax;
2991                 }
2992               }
2993             }
2994           }
2995         }
2996       } else if (u->is_Phi()) {
2997         assert(u->bottom_type() == Type::MEMORY, "what else?");
2998         Node* region = u->in(0);
2999         if (should_process_phi(u)) {
3000           bool replaced = false;
3001           for (uint j = 1; j < u->req(); j++) {
3002             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
3003               Node* nnew = rep_proj;
3004               if (u->adr_type() == TypePtr::BOTTOM) {
3005                 if (mm == NULL) {
3006                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
3007                 }
3008                 nnew = mm;
3009               }
3010               _phase->igvn().replace_input_of(u, j, nnew);
3011               replaced = true;
3012             }
3013           }
3014           if (replaced) {
3015             --i;
3016           }
3017 
3018         }
3019       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
3020                  u->adr_type() == NULL) {
3021         assert(u->adr_type() != NULL ||
3022                u->Opcode() == Op_Rethrow ||
3023                u->Opcode() == Op_Return ||
3024                u->Opcode() == Op_SafePoint ||
3025                u->Opcode() == Op_StoreIConditional ||
3026                u->Opcode() == Op_StoreLConditional ||
3027                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
3028                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
3029                u->Opcode() == Op_CallLeaf, "%s", u->Name());
3030         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3031           if (mm == NULL) {
3032             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
3033           }
3034           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
3035           --i;
3036         }
3037       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
3038         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3039           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
3040           --i;
3041         }
3042       }
3043     }
3044   }
3045 }
3046 
3047 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj)
3048 : Node(ctrl, obj) {
3049   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
3050 }
3051 
3052 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
3053   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
3054     return Type::TOP;
3055   }
3056   const Type* t = in(ValueIn)->bottom_type();
3057   if (t == TypePtr::NULL_PTR) {
3058     return t;
3059   }
3060   return t->is_oopptr();
3061 }
3062 
3063 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
3064   // Either input is TOP ==> the result is TOP
3065   const Type *t2 = phase->type(in(ValueIn));
3066   if( t2 == Type::TOP ) return Type::TOP;
3067 
3068   if (t2 == TypePtr::NULL_PTR) {
3069     return t2;
3070   }
3071 
3072   const Type* type = t2->is_oopptr()/*->cast_to_nonconst()*/;
3073   return type;
3074 }
3075 
3076 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
3077   Node* value = in(ValueIn);
3078   if (!needs_barrier(phase, value)) {
3079     return value;
3080   }
3081   return this;
3082 }
3083 
3084 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
3085   Unique_Node_List visited;
3086   return needs_barrier_impl(phase, n, visited);
3087 }
3088 
3089 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
3090   if (n == NULL) return false;
3091   if (visited.member(n)) {
3092     return false; // Been there.
3093   }
3094   visited.push(n);
3095 
3096   if (n->is_Allocate()) {
3097     // tty->print_cr("optimize barrier on alloc");
3098     return false;
3099   }
3100   if (n->is_Call()) {
3101     // tty->print_cr("optimize barrier on call");
3102     return false;
3103   }
3104 
3105   const Type* type = phase->type(n);
3106   if (type == Type::TOP) {
3107     return false;
3108   }
3109   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
3110     // tty->print_cr("optimize barrier on null");
3111     return false;
3112   }
3113   if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
3114     // tty->print_cr("optimize barrier on constant");
3115     return false;
3116   }
3117 
3118   switch (n->Opcode()) {
3119     case Op_AddP:
3120       return true; // TODO: Can refine?
3121     case Op_LoadP:
3122     case Op_ShenandoahCompareAndExchangeN:
3123     case Op_ShenandoahCompareAndExchangeP:
3124     case Op_CompareAndExchangeN:
3125     case Op_CompareAndExchangeP:
3126     case Op_GetAndSetN:
3127     case Op_GetAndSetP:
3128       return true;
3129     case Op_Phi: {
3130       for (uint i = 1; i < n->req(); i++) {
3131         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3132       }
3133       return false;
3134     }
3135     case Op_CheckCastPP:
3136     case Op_CastPP:
3137       return needs_barrier_impl(phase, n->in(1), visited);
3138     case Op_Proj:
3139       return needs_barrier_impl(phase, n->in(0), visited);
3140     case Op_ShenandoahLoadReferenceBarrier:
3141       // tty->print_cr("optimize barrier on barrier");
3142       return false;
3143     case Op_Parm:
3144       // tty->print_cr("optimize barrier on input arg");
3145       return false;
3146     case Op_DecodeN:
3147     case Op_EncodeP:
3148       return needs_barrier_impl(phase, n->in(1), visited);
3149     case Op_LoadN:
3150       return true;
3151     case Op_CMoveP:
3152       return needs_barrier_impl(phase, n->in(2), visited) ||
3153              needs_barrier_impl(phase, n->in(3), visited);
3154     case Op_ShenandoahEnqueueBarrier:
3155       return needs_barrier_impl(phase, n->in(1), visited);
3156     default:
3157       break;
3158   }
3159 #ifdef ASSERT
3160   tty->print("need barrier on?: ");
3161   tty->print_cr("ins:");
3162   n->dump(2);
3163   tty->print_cr("outs:");
3164   n->dump(-2);
3165   ShouldNotReachHere();
3166 #endif
3167   return true;
3168 }
3169 
3170 ShenandoahLoadReferenceBarrierNode::Strength ShenandoahLoadReferenceBarrierNode::get_barrier_strength() {
3171   Unique_Node_List visited;
3172   Node_Stack stack(0);
3173   stack.push(this, 0);
3174   Strength strength = NONE;
3175   while (strength != STRONG && stack.size() > 0) {
3176     Node* n = stack.node();
3177     if (visited.member(n)) {
3178       stack.pop();
3179       continue;
3180     }
3181     visited.push(n);
3182     bool visit_users = false;
3183     switch (n->Opcode()) {
3184       case Op_StoreN:
3185       case Op_StoreP: {
3186         strength = STRONG;
3187         break;
3188       }
3189       case Op_CmpP: {
3190         if (!n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) &&
3191             !n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3192           strength = STRONG;
3193         }
3194         break;
3195       }
3196       case Op_CallStaticJava: {
3197         strength = STRONG;
3198         break;
3199       }
3200       case Op_CallDynamicJava:
3201       case Op_CallLeaf:
3202       case Op_CallLeafNoFP:
3203       case Op_CompareAndSwapL:
3204       case Op_CompareAndSwapI:
3205       case Op_CompareAndSwapB:
3206       case Op_CompareAndSwapS:
3207       case Op_CompareAndSwapN:
3208       case Op_CompareAndSwapP:
3209       case Op_CompareAndExchangeL:
3210       case Op_CompareAndExchangeI:
3211       case Op_CompareAndExchangeB:
3212       case Op_CompareAndExchangeS:
3213       case Op_CompareAndExchangeN:
3214       case Op_CompareAndExchangeP:
3215       case Op_WeakCompareAndSwapL:
3216       case Op_WeakCompareAndSwapI:
3217       case Op_WeakCompareAndSwapB:
3218       case Op_WeakCompareAndSwapS:
3219       case Op_WeakCompareAndSwapN:
3220       case Op_WeakCompareAndSwapP:
3221       case Op_ShenandoahCompareAndSwapN:
3222       case Op_ShenandoahCompareAndSwapP:
3223       case Op_ShenandoahWeakCompareAndSwapN:
3224       case Op_ShenandoahWeakCompareAndSwapP:
3225       case Op_ShenandoahCompareAndExchangeN:
3226       case Op_ShenandoahCompareAndExchangeP:
3227       case Op_GetAndSetL:
3228       case Op_GetAndSetI:
3229       case Op_GetAndSetB:
3230       case Op_GetAndSetS:
3231       case Op_GetAndSetP:
3232       case Op_GetAndSetN:
3233       case Op_GetAndAddL:
3234       case Op_GetAndAddI:
3235       case Op_GetAndAddB:
3236       case Op_GetAndAddS:
3237       case Op_ShenandoahEnqueueBarrier:
3238       case Op_FastLock:
3239       case Op_FastUnlock:
3240       case Op_Rethrow:
3241       case Op_Return:
3242       case Op_StoreB:
3243       case Op_StoreC:
3244       case Op_StoreD:
3245       case Op_StoreF:
3246       case Op_StoreL:
3247       case Op_StoreLConditional:
3248       case Op_StoreI:
3249       case Op_StoreIConditional:
3250       case Op_StoreVector:
3251       case Op_StrInflatedCopy:
3252       case Op_StrCompressedCopy:
3253       case Op_EncodeP:
3254       case Op_CastP2X:
3255       case Op_SafePoint:
3256       case Op_EncodeISOArray:
3257         strength = STRONG;
3258         break;
3259       case Op_LoadB:
3260       case Op_LoadUB:
3261       case Op_LoadUS:
3262       case Op_LoadD:
3263       case Op_LoadF:
3264       case Op_LoadL:
3265       case Op_LoadI:
3266       case Op_LoadS:
3267       case Op_LoadN:
3268       case Op_LoadP:
3269       case Op_LoadVector: {
3270         const TypePtr* adr_type = n->adr_type();
3271         int alias_idx = Compile::current()->get_alias_index(adr_type);
3272         Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3273         ciField* field = alias_type->field();
3274         bool is_static = field != NULL && field->is_static();
3275         bool is_final = field != NULL && field->is_final();
3276         bool is_stable = field != NULL && field->is_stable();
3277         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3278           // Leave strength as is.
3279         } else if (ShenandoahOptimizeInstanceFinals && !is_static && is_final) {
3280           // Leave strength as is.
3281         } else if (ShenandoahOptimizeStableFinals && (is_stable || (adr_type->isa_aryptr() && adr_type->isa_aryptr()->is_stable()))) {
3282           // Leave strength as is.
3283         } else {
3284           strength = WEAK;
3285         }
3286         break;
3287       }
3288       case Op_AryEq: {
3289         Node* n1 = n->in(2);
3290         Node* n2 = n->in(3);
3291         if (!ShenandoahOptimizeStableFinals ||
3292             !n1->bottom_type()->isa_aryptr() || !n1->bottom_type()->isa_aryptr()->is_stable() ||
3293             !n2->bottom_type()->isa_aryptr() || !n2->bottom_type()->isa_aryptr()->is_stable()) {
3294           strength = WEAK;
3295         }
3296         break;
3297       }
3298       case Op_StrEquals:
3299       case Op_StrComp:
3300       case Op_StrIndexOf:
3301       case Op_StrIndexOfChar:
3302         if (!ShenandoahOptimizeStableFinals) {
3303            strength = WEAK;
3304         }
3305         break;
3306       case Op_Conv2B:
3307       case Op_LoadRange:
3308       case Op_LoadKlass:
3309       case Op_LoadNKlass:
3310         // NONE, i.e. leave current strength as is
3311         break;
3312       case Op_AddP:
3313       case Op_CheckCastPP:
3314       case Op_CastPP:
3315       case Op_CMoveP:
3316       case Op_Phi:
3317       case Op_ShenandoahLoadReferenceBarrier:
3318         visit_users = true;
3319         break;
3320       default: {
3321 #ifdef ASSERT
3322         tty->print_cr("Unknown node in get_barrier_strength:");
3323         n->dump(1);
3324         ShouldNotReachHere();
3325 #else
3326         strength = STRONG;
3327 #endif
3328       }
3329     }
3330 #ifdef ASSERT
3331 /*
3332     if (strength == STRONG) {
3333       tty->print("strengthening node: ");
3334       n->dump();
3335     }
3336     */
3337 #endif
3338     stack.pop();
3339     if (visit_users) {
3340       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3341         Node* user = n->fast_out(i);
3342         if (user != NULL) {
3343           stack.push(user, 0);
3344         }
3345       }
3346     }
3347   }
3348   return strength;
3349 }
3350 
3351 CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
3352   Node* val = in(ValueIn);
3353 
3354   const Type* val_t = igvn.type(val);
3355 
3356   if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
3357       val->Opcode() == Op_CastPP &&
3358       val->in(0) != NULL &&
3359       val->in(0)->Opcode() == Op_IfTrue &&
3360       val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
3361       val->in(0)->in(0)->is_If() &&
3362       val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
3363       val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
3364       val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
3365       val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
3366       val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
3367     assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
3368     CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
3369     return unc;
3370   }
3371   return NULL;
3372 }