1 /*
   2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  27 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahForwarding.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.hpp"
  31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  32 #include "gc/shenandoah/shenandoahRuntime.hpp"
  33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/block.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/castnode.hpp"
  38 #include "opto/movenode.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/subnode.hpp"
  43 
  44 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  45   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  46   if ((state->enqueue_barriers_count() +
  47        state->load_reference_barriers_count()) > 0) {
  48     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  49     C->clear_major_progress();
  50     PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
  51     if (C->failing()) return false;
  52     PhaseIdealLoop::verify(igvn);
  53     DEBUG_ONLY(verify_raw_mem(C->root());)
  54     if (attempt_more_loopopts) {
  55       C->set_major_progress();
  56       int cnt = 0;
  57       if (!C->optimize_loops(cnt, igvn, LoopOptsShenandoahPostExpand)) {
  58         return false;
  59       }
  60       C->clear_major_progress();
  61     }
  62   }
  63   return true;
  64 }
  65 
  66 bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) {
  67   if (!UseShenandoahGC) {
  68     return false;
  69   }
  70   assert(iff->is_If(), "bad input");
  71   if (iff->Opcode() != Op_If) {
  72     return false;
  73   }
  74   Node* bol = iff->in(1);
  75   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  76     return false;
  77   }
  78   Node* cmp = bol->in(1);
  79   if (cmp->Opcode() != Op_CmpI) {
  80     return false;
  81   }
  82   Node* in1 = cmp->in(1);
  83   Node* in2 = cmp->in(2);
  84   if (in2->find_int_con(-1) != 0) {
  85     return false;
  86   }
  87   if (in1->Opcode() != Op_AndI) {
  88     return false;
  89   }
  90   in2 = in1->in(2);
  91   if (in2->find_int_con(-1) != mask) {
  92     return false;
  93   }
  94   in1 = in1->in(1);
  95 
  96   return is_gc_state_load(in1);
  97 }
  98 
  99 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 100   return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 101 }
 102 
 103 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 104   if (!UseShenandoahGC) {
 105     return false;
 106   }
 107   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 108     return false;
 109   }
 110   Node* addp = n->in(MemNode::Address);
 111   if (!addp->is_AddP()) {
 112     return false;
 113   }
 114   Node* base = addp->in(AddPNode::Address);
 115   Node* off = addp->in(AddPNode::Offset);
 116   if (base->Opcode() != Op_ThreadLocal) {
 117     return false;
 118   }
 119   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 120     return false;
 121   }
 122   return true;
 123 }
 124 
 125 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 126   assert(phase->is_dominator(stop, start), "bad inputs");
 127   ResourceMark rm;
 128   Unique_Node_List wq;
 129   wq.push(start);
 130   for (uint next = 0; next < wq.size(); next++) {
 131     Node *m = wq.at(next);
 132     if (m == stop) {
 133       continue;
 134     }
 135     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 136       return true;
 137     }
 138     if (m->is_Region()) {
 139       for (uint i = 1; i < m->req(); i++) {
 140         wq.push(m->in(i));
 141       }
 142     } else {
 143       wq.push(m->in(0));
 144     }
 145   }
 146   return false;
 147 }
 148 
 149 bool ShenandoahBarrierC2Support::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) {
 150   assert(is_gc_state_load(n), "inconsistent");
 151   Node* addp = n->in(MemNode::Address);
 152   Node* dominator = NULL;
 153   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 154     Node* u = addp->fast_out(i);
 155     assert(is_gc_state_load(u), "inconsistent");
 156     if (u != n && phase->is_dominator(u->in(0), n->in(0))) {
 157       if (dominator == NULL) {
 158         dominator = u;
 159       } else {
 160         if (phase->dom_depth(u->in(0)) < phase->dom_depth(dominator->in(0))) {
 161           dominator = u;
 162         }
 163       }
 164     }
 165   }
 166   if (dominator == NULL || has_safepoint_between(n->in(0), dominator->in(0), phase)) {
 167     return false;
 168   }
 169   phase->igvn().replace_node(n, dominator);
 170 
 171   return true;
 172 }
 173 
 174 #ifdef ASSERT
 175 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 176   assert(phis.size() == 0, "");
 177 
 178   while (true) {
 179     if (in->bottom_type() == TypePtr::NULL_PTR) {
 180       if (trace) {tty->print_cr("NULL");}
 181     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 182       if (trace) {tty->print_cr("Non oop");}
 183     } else if (in->bottom_type()->make_ptr()->make_oopptr() == TypeInstPtr::MIRROR) {
 184       if (trace) {tty->print_cr("Java mirror");}
 185     } else {
 186       if (in->is_ConstraintCast()) {
 187         in = in->in(1);
 188         continue;
 189       } else if (in->is_AddP()) {
 190         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 191         in = in->in(AddPNode::Address);
 192         continue;
 193       } else if (in->is_Con()) {
 194         if (trace) {
 195           tty->print("Found constant");
 196           in->dump();
 197         }
 198       } else if (in->Opcode() == Op_Parm) {
 199         if (trace) {
 200           tty->print("Found argument");
 201         }
 202       } else if (in->Opcode() == Op_CreateEx) {
 203         if (trace) {
 204           tty->print("Found create-exception");
 205         }
 206       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 207         if (trace) {
 208           tty->print("Found raw LoadP (OSR argument?)");
 209         }
 210       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 211         if (t == ShenandoahOopStore) {
 212           uint i = 0;
 213           for (; i < phis.size(); i++) {
 214             Node* n = phis.node_at(i);
 215             if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
 216               break;
 217             }
 218           }
 219           if (i == phis.size()) {
 220             return false;
 221           }
 222         }
 223         barriers_used.push(in);
 224         if (trace) {tty->print("Found barrier"); in->dump();}
 225       } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
 226         if (t != ShenandoahOopStore) {
 227           in = in->in(1);
 228           continue;
 229         }
 230         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 231         phis.push(in, in->req());
 232         in = in->in(1);
 233         continue;
 234       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 235         if (trace) {
 236           tty->print("Found alloc");
 237           in->in(0)->dump();
 238         }
 239       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 240         if (trace) {
 241           tty->print("Found Java call");
 242         }
 243       } else if (in->is_Phi()) {
 244         if (!visited.test_set(in->_idx)) {
 245           if (trace) {tty->print("Pushed phi:"); in->dump();}
 246           phis.push(in, 2);
 247           in = in->in(1);
 248           continue;
 249         }
 250         if (trace) {tty->print("Already seen phi:"); in->dump();}
 251       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 252         if (!visited.test_set(in->_idx)) {
 253           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 254           phis.push(in, CMoveNode::IfTrue);
 255           in = in->in(CMoveNode::IfFalse);
 256           continue;
 257         }
 258         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 259       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 260         in = in->in(1);
 261         continue;
 262       } else {
 263         return false;
 264       }
 265     }
 266     bool cont = false;
 267     while (phis.is_nonempty()) {
 268       uint idx = phis.index();
 269       Node* phi = phis.node();
 270       if (idx >= phi->req()) {
 271         if (trace) {tty->print("Popped phi:"); phi->dump();}
 272         phis.pop();
 273         continue;
 274       }
 275       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 276       in = phi->in(idx);
 277       phis.set_index(idx+1);
 278       cont = true;
 279       break;
 280     }
 281     if (!cont) {
 282       break;
 283     }
 284   }
 285   return true;
 286 }
 287 
 288 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 289   if (n1 != NULL) {
 290     n1->dump(+10);
 291   }
 292   if (n2 != NULL) {
 293     n2->dump(+10);
 294   }
 295   fatal("%s", msg);
 296 }
 297 
 298 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 299   ResourceMark rm;
 300   Unique_Node_List wq;
 301   GrowableArray<Node*> barriers;
 302   Unique_Node_List barriers_used;
 303   Node_Stack phis(0);
 304   VectorSet visited(Thread::current()->resource_area());
 305   const bool trace = false;
 306   const bool verify_no_useless_barrier = false;
 307 
 308   wq.push(root);
 309   for (uint next = 0; next < wq.size(); next++) {
 310     Node *n = wq.at(next);
 311     if (n->is_Load()) {
 312       const bool trace = false;
 313       if (trace) {tty->print("Verifying"); n->dump();}
 314       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 315         if (trace) {tty->print_cr("Load range/klass");}
 316       } else {
 317         const TypePtr* adr_type = n->as_Load()->adr_type();
 318 
 319         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 320           if (trace) {tty->print_cr("Mark load");}
 321         } else if (adr_type->isa_instptr() &&
 322                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 323                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
 324           if (trace) {tty->print_cr("Reference.get()");}
 325         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 326           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 327         }
 328       }
 329     } else if (n->is_Store()) {
 330       const bool trace = false;
 331 
 332       if (trace) {tty->print("Verifying"); n->dump();}
 333       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 334         Node* adr = n->in(MemNode::Address);
 335         bool verify = true;
 336 
 337         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 338           adr = adr->in(AddPNode::Address);
 339           if (adr->is_AddP()) {
 340             assert(adr->in(AddPNode::Base)->is_top(), "");
 341             adr = adr->in(AddPNode::Address);
 342             if (adr->Opcode() == Op_LoadP &&
 343                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 344                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 345                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 346               if (trace) {tty->print_cr("SATB prebarrier");}
 347               verify = false;
 348             }
 349           }
 350         }
 351 
 352         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 353           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 354         }
 355       }
 356       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 357         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 358       }
 359     } else if (n->Opcode() == Op_CmpP) {
 360       const bool trace = false;
 361 
 362       Node* in1 = n->in(1);
 363       Node* in2 = n->in(2);
 364       if (in1->bottom_type()->isa_oopptr()) {
 365         if (trace) {tty->print("Verifying"); n->dump();}
 366 
 367         bool mark_inputs = false;
 368         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 369             (in1->is_Con() || in2->is_Con())) {
 370           if (trace) {tty->print_cr("Comparison against a constant");}
 371           mark_inputs = true;
 372         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 373                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 374           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 375           mark_inputs = true;
 376         } else {
 377           assert(in2->bottom_type()->isa_oopptr(), "");
 378 
 379           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 380               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 381             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 382           }
 383         }
 384         if (verify_no_useless_barrier &&
 385             mark_inputs &&
 386             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 387              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 388           phis.clear();
 389           visited.Reset();
 390         }
 391       }
 392     } else if (n->is_LoadStore()) {
 393       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 394           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 395         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 396       }
 397 
 398       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 399         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 400       }
 401     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 402       CallNode* call = n->as_Call();
 403 
 404       static struct {
 405         const char* name;
 406         struct {
 407           int pos;
 408           verify_type t;
 409         } args[6];
 410       } calls[] = {
 411         "aescrypt_encryptBlock",
 412         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 413           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 414         "aescrypt_decryptBlock",
 415         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 416           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 417         "multiplyToLen",
 418         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 419           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 420         "squareToLen",
 421         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 422           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 423         "montgomery_multiply",
 424         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 425           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 426         "montgomery_square",
 427         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 428           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 429         "mulAdd",
 430         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 431           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 432         "vectorizedMismatch",
 433         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 434           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 435         "updateBytesCRC32",
 436         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 437           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 438         "updateBytesAdler32",
 439         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 440           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 441         "updateBytesCRC32C",
 442         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 443           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 444         "counterMode_AESCrypt",
 445         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 446           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 447         "cipherBlockChaining_encryptAESCrypt",
 448         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 449           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 450         "cipherBlockChaining_decryptAESCrypt",
 451         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 452           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 453         "shenandoah_clone_barrier",
 454         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 455           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 456         "ghash_processBlocks",
 457         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 458           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 459         "sha1_implCompress",
 460         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 461           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 462         "sha256_implCompress",
 463         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 464           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 465         "sha512_implCompress",
 466         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 467           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 468         "sha1_implCompressMB",
 469         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 470           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 471         "sha256_implCompressMB",
 472         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 473           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 474         "sha512_implCompressMB",
 475         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 476           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 477         "encodeBlock",
 478         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 479           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 480       };
 481 
 482       if (call->is_call_to_arraycopystub()) {
 483         Node* dest = NULL;
 484         const TypeTuple* args = n->as_Call()->_tf->domain();
 485         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 486           if (args->field_at(i)->isa_ptr()) {
 487             j++;
 488             if (j == 2) {
 489               dest = n->in(i);
 490               break;
 491             }
 492           }
 493         }
 494         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 495             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 496           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 497         }
 498       } else if (strlen(call->_name) > 5 &&
 499                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 500         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 501           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 502         }
 503       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 504         // skip
 505       } else {
 506         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 507         int i = 0;
 508         for (; i < calls_len; i++) {
 509           if (!strcmp(calls[i].name, call->_name)) {
 510             break;
 511           }
 512         }
 513         if (i != calls_len) {
 514           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 515           for (uint j = 0; j < args_len; j++) {
 516             int pos = calls[i].args[j].pos;
 517             if (pos == -1) {
 518               break;
 519             }
 520             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 521               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 522             }
 523           }
 524           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 525             if (call->in(j)->bottom_type()->make_ptr() &&
 526                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 527               uint k = 0;
 528               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 529               if (k == args_len) {
 530                 fatal("arg %d for call %s not covered", j, call->_name);
 531               }
 532             }
 533           }
 534         } else {
 535           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 536             if (call->in(j)->bottom_type()->make_ptr() &&
 537                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 538               fatal("%s not covered", call->_name);
 539             }
 540           }
 541         }
 542       }
 543     } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 544       // skip
 545     } else if (n->is_AddP()
 546                || n->is_Phi()
 547                || n->is_ConstraintCast()
 548                || n->Opcode() == Op_Return
 549                || n->Opcode() == Op_CMoveP
 550                || n->Opcode() == Op_CMoveN
 551                || n->Opcode() == Op_Rethrow
 552                || n->is_MemBar()
 553                || n->Opcode() == Op_Conv2B
 554                || n->Opcode() == Op_SafePoint
 555                || n->is_CallJava()
 556                || n->Opcode() == Op_Unlock
 557                || n->Opcode() == Op_EncodeP
 558                || n->Opcode() == Op_DecodeN) {
 559       // nothing to do
 560     } else {
 561       static struct {
 562         int opcode;
 563         struct {
 564           int pos;
 565           verify_type t;
 566         } inputs[2];
 567       } others[] = {
 568         Op_FastLock,
 569         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 570         Op_Lock,
 571         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 572         Op_ArrayCopy,
 573         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 574         Op_StrCompressedCopy,
 575         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 576         Op_StrInflatedCopy,
 577         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 578         Op_AryEq,
 579         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 580         Op_StrIndexOf,
 581         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 582         Op_StrComp,
 583         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 584         Op_StrEquals,
 585         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 586         Op_EncodeISOArray,
 587         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 588         Op_HasNegatives,
 589         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 590         Op_CastP2X,
 591         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 592         Op_StrIndexOfChar,
 593         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 594       };
 595 
 596       const int others_len = sizeof(others) / sizeof(others[0]);
 597       int i = 0;
 598       for (; i < others_len; i++) {
 599         if (others[i].opcode == n->Opcode()) {
 600           break;
 601         }
 602       }
 603       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 604       if (i != others_len) {
 605         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 606         for (uint j = 0; j < inputs_len; j++) {
 607           int pos = others[i].inputs[j].pos;
 608           if (pos == -1) {
 609             break;
 610           }
 611           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 612             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 613           }
 614         }
 615         for (uint j = 1; j < stop; j++) {
 616           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 617               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 618             uint k = 0;
 619             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 620             if (k == inputs_len) {
 621               fatal("arg %d for node %s not covered", j, n->Name());
 622             }
 623           }
 624         }
 625       } else {
 626         for (uint j = 1; j < stop; j++) {
 627           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 628               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 629             fatal("%s not covered", n->Name());
 630           }
 631         }
 632       }
 633     }
 634 
 635     if (n->is_SafePoint()) {
 636       SafePointNode* sfpt = n->as_SafePoint();
 637       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 638         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 639           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 640             phis.clear();
 641             visited.Reset();
 642           }
 643         }
 644       }
 645     }
 646   }
 647 
 648   if (verify_no_useless_barrier) {
 649     for (int i = 0; i < barriers.length(); i++) {
 650       Node* n = barriers.at(i);
 651       if (!barriers_used.member(n)) {
 652         tty->print("XXX useless barrier"); n->dump(-2);
 653         ShouldNotReachHere();
 654       }
 655     }
 656   }
 657 }
 658 #endif
 659 
 660 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 661   // That both nodes have the same control is not sufficient to prove
 662   // domination, verify that there's no path from d to n
 663   ResourceMark rm;
 664   Unique_Node_List wq;
 665   wq.push(d);
 666   for (uint next = 0; next < wq.size(); next++) {
 667     Node *m = wq.at(next);
 668     if (m == n) {
 669       return false;
 670     }
 671     if (m->is_Phi() && m->in(0)->is_Loop()) {
 672       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 673     } else {
 674       for (uint i = 0; i < m->req(); i++) {
 675         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 676           wq.push(m->in(i));
 677         }
 678       }
 679     }
 680   }
 681   return true;
 682 }
 683 
 684 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 685   if (d_c != n_c) {
 686     return phase->is_dominator(d_c, n_c);
 687   }
 688   return is_dominator_same_ctrl(d_c, d, n, phase);
 689 }
 690 
 691 Node* next_mem(Node* mem, int alias) {
 692   Node* res = NULL;
 693   if (mem->is_Proj()) {
 694     res = mem->in(0);
 695   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 696     res = mem->in(TypeFunc::Memory);
 697   } else if (mem->is_Phi()) {
 698     res = mem->in(1);
 699   } else if (mem->is_MergeMem()) {
 700     res = mem->as_MergeMem()->memory_at(alias);
 701   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 702     assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 703     res = mem->in(MemNode::Memory);
 704   } else {
 705 #ifdef ASSERT
 706     mem->dump();
 707 #endif
 708     ShouldNotReachHere();
 709   }
 710   return res;
 711 }
 712 
 713 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 714   Node* iffproj = NULL;
 715   while (c != dom) {
 716     Node* next = phase->idom(c);
 717     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 718     if (c->is_Region()) {
 719       ResourceMark rm;
 720       Unique_Node_List wq;
 721       wq.push(c);
 722       for (uint i = 0; i < wq.size(); i++) {
 723         Node *n = wq.at(i);
 724         if (n == next) {
 725           continue;
 726         }
 727         if (n->is_Region()) {
 728           for (uint j = 1; j < n->req(); j++) {
 729             wq.push(n->in(j));
 730           }
 731         } else {
 732           wq.push(n->in(0));
 733         }
 734       }
 735       for (uint i = 0; i < wq.size(); i++) {
 736         Node *n = wq.at(i);
 737         assert(n->is_CFG(), "");
 738         if (n->is_Multi()) {
 739           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 740             Node* u = n->fast_out(j);
 741             if (u->is_CFG()) {
 742               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 743                 return NodeSentinel;
 744               }
 745             }
 746           }
 747         }
 748       }
 749     } else  if (c->is_Proj()) {
 750       if (c->is_IfProj()) {
 751         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 752           // continue;
 753         } else {
 754           if (!allow_one_proj) {
 755             return NodeSentinel;
 756           }
 757           if (iffproj == NULL) {
 758             iffproj = c;
 759           } else {
 760             return NodeSentinel;
 761           }
 762         }
 763       } else if (c->Opcode() == Op_JumpProj) {
 764         return NodeSentinel; // unsupported
 765       } else if (c->Opcode() == Op_CatchProj) {
 766         return NodeSentinel; // unsupported
 767       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 768         return NodeSentinel; // unsupported
 769       } else {
 770         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 771       }
 772     }
 773     c = next;
 774   }
 775   return iffproj;
 776 }
 777 
 778 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 779   ResourceMark rm;
 780   VectorSet wq(Thread::current()->resource_area());
 781   wq.set(mem->_idx);
 782   mem_ctrl = phase->ctrl_or_self(mem);
 783   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 784     mem = next_mem(mem, alias);
 785     if (wq.test_set(mem->_idx)) {
 786       return NULL;
 787     }
 788     mem_ctrl = phase->ctrl_or_self(mem);
 789   }
 790   if (mem->is_MergeMem()) {
 791     mem = mem->as_MergeMem()->memory_at(alias);
 792     mem_ctrl = phase->ctrl_or_self(mem);
 793   }
 794   return mem;
 795 }
 796 
 797 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 798   Node* mem = NULL;
 799   Node* c = ctrl;
 800   do {
 801     if (c->is_Region()) {
 802       Node* phi_bottom = NULL;
 803       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 804         Node* u = c->fast_out(i);
 805         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 806           if (u->adr_type() == TypePtr::BOTTOM) {
 807             mem = u;
 808           }
 809         }
 810       }
 811     } else {
 812       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 813         CallProjections projs;
 814         c->as_Call()->extract_projections(&projs, true, false);
 815         if (projs.fallthrough_memproj != NULL) {
 816           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 817             if (projs.catchall_memproj == NULL) {
 818               mem = projs.fallthrough_memproj;
 819             } else {
 820               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 821                 mem = projs.fallthrough_memproj;
 822               } else {
 823                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 824                 mem = projs.catchall_memproj;
 825               }
 826             }
 827           }
 828         } else {
 829           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 830           if (proj != NULL &&
 831               proj->adr_type() == TypePtr::BOTTOM) {
 832             mem = proj;
 833           }
 834         }
 835       } else {
 836         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 837           Node* u = c->fast_out(i);
 838           if (u->is_Proj() &&
 839               u->bottom_type() == Type::MEMORY &&
 840               u->adr_type() == TypePtr::BOTTOM) {
 841               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 842               assert(mem == NULL, "only one proj");
 843               mem = u;
 844           }
 845         }
 846         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 847       }
 848     }
 849     c = phase->idom(c);
 850   } while (mem == NULL);
 851   return mem;
 852 }
 853 
 854 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 855   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 856     Node* u = n->fast_out(i);
 857     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 858       uses.push(u);
 859     }
 860   }
 861 }
 862 
 863 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 864   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 865   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 866   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 867   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 868   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 869   phase->lazy_replace(outer, new_outer);
 870   phase->lazy_replace(le, new_le);
 871   inner->clear_strip_mined();
 872 }
 873 
 874 void ShenandoahBarrierC2Support::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
 875                                                   PhaseIdealLoop* phase) {
 876   IdealLoopTree* loop = phase->get_loop(ctrl);
 877   Node* thread = new ThreadLocalNode();
 878   phase->register_new_node(thread, ctrl);
 879   Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 880   phase->set_ctrl(offset, phase->C->root());
 881   Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset);
 882   phase->register_new_node(gc_state_addr, ctrl);
 883   uint gc_state_idx = Compile::AliasIdxRaw;
 884   const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
 885   debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
 886 
 887   Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered);
 888   phase->register_new_node(gc_state, ctrl);
 889   Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED));
 890   phase->register_new_node(heap_stable_and, ctrl);
 891   Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT));
 892   phase->register_new_node(heap_stable_cmp, ctrl);
 893   Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne);
 894   phase->register_new_node(heap_stable_test, ctrl);
 895   IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 896   phase->register_control(heap_stable_iff, loop, ctrl);
 897 
 898   heap_stable_ctrl = new IfFalseNode(heap_stable_iff);
 899   phase->register_control(heap_stable_ctrl, loop, heap_stable_iff);
 900   ctrl = new IfTrueNode(heap_stable_iff);
 901   phase->register_control(ctrl, loop, heap_stable_iff);
 902 
 903   assert(is_heap_stable_test(heap_stable_iff), "Should match the shape");
 904 }
 905 
 906 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 907   const Type* val_t = phase->igvn().type(val);
 908   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 909     IdealLoopTree* loop = phase->get_loop(ctrl);
 910     Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT));
 911     phase->register_new_node(null_cmp, ctrl);
 912     Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
 913     phase->register_new_node(null_test, ctrl);
 914     IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 915     phase->register_control(null_iff, loop, ctrl);
 916     ctrl = new IfTrueNode(null_iff);
 917     phase->register_control(ctrl, loop, null_iff);
 918     null_ctrl = new IfFalseNode(null_iff);
 919     phase->register_control(null_ctrl, loop, null_iff);
 920   }
 921 }
 922 
 923 Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
 924   IdealLoopTree *loop = phase->get_loop(c);
 925   Node* iff = unc_ctrl->in(0);
 926   assert(iff->is_If(), "broken");
 927   Node* new_iff = iff->clone();
 928   new_iff->set_req(0, c);
 929   phase->register_control(new_iff, loop, c);
 930   Node* iffalse = new IfFalseNode(new_iff->as_If());
 931   phase->register_control(iffalse, loop, new_iff);
 932   Node* iftrue = new IfTrueNode(new_iff->as_If());
 933   phase->register_control(iftrue, loop, new_iff);
 934   c = iftrue;
 935   const Type *t = phase->igvn().type(val);
 936   assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
 937   Node* uncasted_val = val->in(1);
 938   val = new CastPPNode(uncasted_val, t);
 939   val->init_req(0, c);
 940   phase->register_new_node(val, c);
 941   return val;
 942 }
 943 
 944 void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
 945                                                 Unique_Node_List& uses, PhaseIdealLoop* phase) {
 946   IfNode* iff = unc_ctrl->in(0)->as_If();
 947   Node* proj = iff->proj_out(0);
 948   assert(proj != unc_ctrl, "bad projection");
 949   Node* use = proj->unique_ctrl_out();
 950 
 951   assert(use == unc || use->is_Region(), "what else?");
 952 
 953   uses.clear();
 954   if (use == unc) {
 955     phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
 956     for (uint i = 1; i < unc->req(); i++) {
 957       Node* n = unc->in(i);
 958       if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
 959         uses.push(n);
 960       }
 961     }
 962   } else {
 963     assert(use->is_Region(), "what else?");
 964     uint idx = 1;
 965     for (; use->in(idx) != proj; idx++);
 966     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
 967       Node* u = use->fast_out(i);
 968       if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
 969         uses.push(u->in(idx));
 970       }
 971     }
 972   }
 973   for(uint next = 0; next < uses.size(); next++ ) {
 974     Node *n = uses.at(next);
 975     assert(phase->get_ctrl(n) == proj, "bad control");
 976     phase->set_ctrl_and_loop(n, new_unc_ctrl);
 977     if (n->in(0) == proj) {
 978       phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
 979     }
 980     for (uint i = 0; i < n->req(); i++) {
 981       Node* m = n->in(i);
 982       if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
 983         uses.push(m);
 984       }
 985     }
 986   }
 987 
 988   phase->igvn().rehash_node_delayed(use);
 989   int nb = use->replace_edge(proj, new_unc_ctrl);
 990   assert(nb == 1, "only use expected");
 991 }
 992 
 993 void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 994   IdealLoopTree *loop = phase->get_loop(ctrl);
 995   Node* raw_rbtrue = new CastP2XNode(ctrl, val);
 996   phase->register_new_node(raw_rbtrue, ctrl);
 997   Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 998   phase->register_new_node(cset_offset, ctrl);
 999   Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
1000   phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root());
1001   Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset);
1002   phase->register_new_node(in_cset_fast_test_adr, ctrl);
1003   uint in_cset_fast_test_idx = Compile::AliasIdxRaw;
1004   const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument
1005   debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx));
1006   Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered);
1007   phase->register_new_node(in_cset_fast_test_load, ctrl);
1008   Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT));
1009   phase->register_new_node(in_cset_fast_test_cmp, ctrl);
1010   Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq);
1011   phase->register_new_node(in_cset_fast_test_test, ctrl);
1012   IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
1013   phase->register_control(in_cset_fast_test_iff, loop, ctrl);
1014 
1015   not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff);
1016   phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff);
1017 
1018   ctrl = new IfFalseNode(in_cset_fast_test_iff);
1019   phase->register_control(ctrl, loop, in_cset_fast_test_iff);
1020 }
1021 
1022 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) {
1023   IdealLoopTree*loop = phase->get_loop(ctrl);
1024   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
1025 
1026   // The slow path stub consumes and produces raw memory in addition
1027   // to the existing memory edges
1028   Node* base = find_bottom_mem(ctrl, phase);
1029   MergeMemNode* mm = MergeMemNode::make(base);
1030   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1031   phase->register_new_node(mm, ctrl);
1032 
1033   address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
1034           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) :
1035           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier);
1036 
1037   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(),
1038                                 target,
1039                                 "shenandoah_load_reference_barrier", TypeRawPtr::BOTTOM);
1040   call->init_req(TypeFunc::Control, ctrl);
1041   call->init_req(TypeFunc::I_O, phase->C->top());
1042   call->init_req(TypeFunc::Memory, mm);
1043   call->init_req(TypeFunc::FramePtr, phase->C->top());
1044   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1045   call->init_req(TypeFunc::Parms, val);
1046   call->init_req(TypeFunc::Parms+1, load_addr);
1047   phase->register_control(call, loop, ctrl);
1048   ctrl = new ProjNode(call, TypeFunc::Control);
1049   phase->register_control(ctrl, loop, call);
1050   result_mem = new ProjNode(call, TypeFunc::Memory);
1051   phase->register_new_node(result_mem, call);
1052   val = new ProjNode(call, TypeFunc::Parms);
1053   phase->register_new_node(val, call);
1054   val = new CheckCastPPNode(ctrl, val, obj_type);
1055   phase->register_new_node(val, ctrl);
1056 }
1057 
1058 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1059   Node* ctrl = phase->get_ctrl(barrier);
1060   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1061 
1062   // Update the control of all nodes that should be after the
1063   // barrier control flow
1064   uses.clear();
1065   // Every node that is control dependent on the barrier's input
1066   // control will be after the expanded barrier. The raw memory (if
1067   // its memory is control dependent on the barrier's input control)
1068   // must stay above the barrier.
1069   uses_to_ignore.clear();
1070   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1071     uses_to_ignore.push(init_raw_mem);
1072   }
1073   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1074     Node *n = uses_to_ignore.at(next);
1075     for (uint i = 0; i < n->req(); i++) {
1076       Node* in = n->in(i);
1077       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1078         uses_to_ignore.push(in);
1079       }
1080     }
1081   }
1082   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1083     Node* u = ctrl->fast_out(i);
1084     if (u->_idx < last &&
1085         u != barrier &&
1086         !uses_to_ignore.member(u) &&
1087         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1088         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1089       Node* old_c = phase->ctrl_or_self(u);
1090       Node* c = old_c;
1091       if (c != ctrl ||
1092           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1093           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1094         phase->igvn().rehash_node_delayed(u);
1095         int nb = u->replace_edge(ctrl, region);
1096         if (u->is_CFG()) {
1097           if (phase->idom(u) == ctrl) {
1098             phase->set_idom(u, region, phase->dom_depth(region));
1099           }
1100         } else if (phase->get_ctrl(u) == ctrl) {
1101           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1102           uses.push(u);
1103         }
1104         assert(nb == 1, "more than 1 ctrl input?");
1105         --i, imax -= nb;
1106       }
1107     }
1108   }
1109 }
1110 
1111 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1112   Node* region = NULL;
1113   while (c != ctrl) {
1114     if (c->is_Region()) {
1115       region = c;
1116     }
1117     c = phase->idom(c);
1118   }
1119   assert(region != NULL, "");
1120   Node* phi = new PhiNode(region, n->bottom_type());
1121   for (uint j = 1; j < region->req(); j++) {
1122     Node* in = region->in(j);
1123     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1124       phi->init_req(j, n);
1125     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1126       phi->init_req(j, n_clone);
1127     } else {
1128       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1129     }
1130   }
1131   phase->register_new_node(phi, region);
1132   return phi;
1133 }
1134 
1135 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1136   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1137 
1138   Unique_Node_List uses;
1139   for (int i = 0; i < state->enqueue_barriers_count(); i++) {
1140     Node* barrier = state->enqueue_barrier(i);
1141     Node* ctrl = phase->get_ctrl(barrier);
1142     IdealLoopTree* loop = phase->get_loop(ctrl);
1143     if (loop->_head->is_OuterStripMinedLoop()) {
1144       // Expanding a barrier here will break loop strip mining
1145       // verification. Transform the loop so the loop nest doesn't
1146       // appear as strip mined.
1147       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1148       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1149     }
1150   }
1151 
1152   Node_Stack stack(0);
1153   Node_List clones;
1154   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1155     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1156     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1157       continue;
1158     }
1159 
1160     Node* ctrl = phase->get_ctrl(lrb);
1161     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1162 
1163     CallStaticJavaNode* unc = NULL;
1164     Node* unc_ctrl = NULL;
1165     Node* uncasted_val = val;
1166 
1167     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1168       Node* u = lrb->fast_out(i);
1169       if (u->Opcode() == Op_CastPP &&
1170           u->in(0) != NULL &&
1171           phase->is_dominator(u->in(0), ctrl)) {
1172         const Type* u_t = phase->igvn().type(u);
1173 
1174         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1175             u->in(0)->Opcode() == Op_IfTrue &&
1176             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1177             u->in(0)->in(0)->is_If() &&
1178             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1179             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1180             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1181             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1182             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1183           IdealLoopTree* loop = phase->get_loop(ctrl);
1184           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1185 
1186           if (!unc_loop->is_member(loop)) {
1187             continue;
1188           }
1189 
1190           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1191           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1192           if (branch == NodeSentinel) {
1193             continue;
1194           }
1195 
1196           phase->igvn().replace_input_of(u, 1, val);
1197           phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
1198           phase->set_ctrl(u, u->in(0));
1199           phase->set_ctrl(lrb, u->in(0));
1200           unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1201           unc_ctrl = u->in(0);
1202           val = u;
1203 
1204           for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
1205             Node* u = val->fast_out(j);
1206             if (u == lrb) continue;
1207             phase->igvn().rehash_node_delayed(u);
1208             int nb = u->replace_edge(val, lrb);
1209             --j; jmax -= nb;
1210           }
1211 
1212           RegionNode* r = new RegionNode(3);
1213           IfNode* iff = unc_ctrl->in(0)->as_If();
1214 
1215           Node* ctrl_use = unc_ctrl->unique_ctrl_out();
1216           Node* unc_ctrl_clone = unc_ctrl->clone();
1217           phase->register_control(unc_ctrl_clone, loop, iff);
1218           Node* c = unc_ctrl_clone;
1219           Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
1220           r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
1221 
1222           phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
1223           phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
1224           phase->lazy_replace(c, unc_ctrl);
1225           c = NULL;;
1226           phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
1227           phase->set_ctrl(val, unc_ctrl_clone);
1228 
1229           IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
1230           fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
1231           Node* iff_proj = iff->proj_out(0);
1232           r->init_req(2, iff_proj);
1233           phase->register_control(r, phase->ltree_root(), iff);
1234 
1235           Node* new_bol = new_iff->in(1)->clone();
1236           Node* new_cmp = new_bol->in(1)->clone();
1237           assert(new_cmp->Opcode() == Op_CmpP, "broken");
1238           assert(new_cmp->in(1) == val->in(1), "broken");
1239           new_bol->set_req(1, new_cmp);
1240           new_cmp->set_req(1, lrb);
1241           phase->register_new_node(new_bol, new_iff->in(0));
1242           phase->register_new_node(new_cmp, new_iff->in(0));
1243           phase->igvn().replace_input_of(new_iff, 1, new_bol);
1244           phase->igvn().replace_input_of(new_cast, 1, lrb);
1245 
1246           for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1247             Node* u = lrb->fast_out(i);
1248             if (u == new_cast || u == new_cmp) {
1249               continue;
1250             }
1251             phase->igvn().rehash_node_delayed(u);
1252             int nb = u->replace_edge(lrb, new_cast);
1253             assert(nb > 0, "no update?");
1254             --i; imax -= nb;
1255           }
1256 
1257           for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1258             Node* u = val->fast_out(i);
1259             if (u == lrb) {
1260               continue;
1261             }
1262             phase->igvn().rehash_node_delayed(u);
1263             int nb = u->replace_edge(val, new_cast);
1264             assert(nb > 0, "no update?");
1265             --i; imax -= nb;
1266           }
1267 
1268           ctrl = unc_ctrl_clone;
1269           phase->set_ctrl_and_loop(lrb, ctrl);
1270           break;
1271         }
1272       }
1273     }
1274     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1275       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1276       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1277         // The rethrow call may have too many projections to be
1278         // properly handled here. Given there's no reason for a
1279         // barrier to depend on the call, move it above the call
1280         if (phase->get_ctrl(val) == ctrl) {
1281           assert(val->Opcode() == Op_DecodeN, "unexpected node");
1282           assert(phase->is_dominator(phase->get_ctrl(val->in(1)), call->in(0)), "Load is too low");
1283           phase->set_ctrl(val, call->in(0));
1284         }
1285         phase->set_ctrl(lrb, call->in(0));
1286         continue;
1287       }
1288       CallProjections projs;
1289       call->extract_projections(&projs, false, false);
1290 
1291       Node* lrb_clone = lrb->clone();
1292       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1293       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1294 
1295       stack.push(lrb, 0);
1296       clones.push(lrb_clone);
1297 
1298       do {
1299         assert(stack.size() == clones.size(), "");
1300         Node* n = stack.node();
1301 #ifdef ASSERT
1302         if (n->is_Load()) {
1303           Node* mem = n->in(MemNode::Memory);
1304           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1305             Node* u = mem->fast_out(j);
1306             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1307           }
1308         }
1309 #endif
1310         uint idx = stack.index();
1311         Node* n_clone = clones.at(clones.size()-1);
1312         if (idx < n->outcnt()) {
1313           Node* u = n->raw_out(idx);
1314           Node* c = phase->ctrl_or_self(u);
1315           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1316             stack.set_index(idx+1);
1317             assert(!u->is_CFG(), "");
1318             stack.push(u, 0);
1319             Node* u_clone = u->clone();
1320             int nb = u_clone->replace_edge(n, n_clone);
1321             assert(nb > 0, "should have replaced some uses");
1322             phase->register_new_node(u_clone, projs.catchall_catchproj);
1323             clones.push(u_clone);
1324             phase->set_ctrl(u, projs.fallthrough_catchproj);
1325           } else {
1326             bool replaced = false;
1327             if (u->is_Phi()) {
1328               for (uint k = 1; k < u->req(); k++) {
1329                 if (u->in(k) == n) {
1330                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1331                     phase->igvn().replace_input_of(u, k, n_clone);
1332                     replaced = true;
1333                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1334                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1335                     replaced = true;
1336                   }
1337                 }
1338               }
1339             } else {
1340               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1341                 phase->igvn().rehash_node_delayed(u);
1342                 int nb = u->replace_edge(n, n_clone);
1343                 assert(nb > 0, "should have replaced some uses");
1344                 replaced = true;
1345               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1346                 phase->igvn().rehash_node_delayed(u);
1347                 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1348                 assert(nb > 0, "should have replaced some uses");
1349                 replaced = true;
1350               }
1351             }
1352             if (!replaced) {
1353               stack.set_index(idx+1);
1354             }
1355           }
1356         } else {
1357           stack.pop();
1358           clones.pop();
1359         }
1360       } while (stack.size() > 0);
1361       assert(stack.size() == 0 && clones.size() == 0, "");
1362     }
1363   }
1364 
1365   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1366     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1367     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1368       continue;
1369     }
1370     Node* ctrl = phase->get_ctrl(lrb);
1371     IdealLoopTree* loop = phase->get_loop(ctrl);
1372     if (loop->_head->is_OuterStripMinedLoop()) {
1373       // Expanding a barrier here will break loop strip mining
1374       // verification. Transform the loop so the loop nest doesn't
1375       // appear as strip mined.
1376       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1377       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1378     }
1379   }
1380 
1381   // Expand load-reference-barriers
1382   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1383   Unique_Node_List uses_to_ignore;
1384   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1385     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1386     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1387       phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1388       continue;
1389     }
1390     uint last = phase->C->unique();
1391     Node* ctrl = phase->get_ctrl(lrb);
1392     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1393 
1394 
1395     Node* orig_ctrl = ctrl;
1396 
1397     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1398     Node* init_raw_mem = raw_mem;
1399     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1400 
1401     IdealLoopTree *loop = phase->get_loop(ctrl);
1402     CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
1403     Node* unc_ctrl = NULL;
1404     if (unc != NULL) {
1405       if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
1406         unc = NULL;
1407       } else {
1408         unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
1409       }
1410     }
1411 
1412     Node* uncasted_val = val;
1413     if (unc != NULL) {
1414       uncasted_val = val->in(1);
1415     }
1416 
1417     Node* heap_stable_ctrl = NULL;
1418     Node* null_ctrl = NULL;
1419 
1420     assert(val->bottom_type()->make_oopptr(), "need oop");
1421     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1422 
1423     enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT };
1424     Node* region = new RegionNode(PATH_LIMIT);
1425     Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
1426     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1427 
1428     // Stable path.
1429     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1430     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1431 
1432     // Heap stable case
1433     region->init_req(_heap_stable, heap_stable_ctrl);
1434     val_phi->init_req(_heap_stable, uncasted_val);
1435     raw_mem_phi->init_req(_heap_stable, raw_mem);
1436 
1437     Node* reg2_ctrl = NULL;
1438     // Null case
1439     test_null(ctrl, val, null_ctrl, phase);
1440     if (null_ctrl != NULL) {
1441       reg2_ctrl = null_ctrl->in(0);
1442       region->init_req(_null_path, null_ctrl);
1443       val_phi->init_req(_null_path, uncasted_val);
1444       raw_mem_phi->init_req(_null_path, raw_mem);
1445     } else {
1446       region->del_req(_null_path);
1447       val_phi->del_req(_null_path);
1448       raw_mem_phi->del_req(_null_path);
1449     }
1450 
1451     // Test for in-cset.
1452     // Wires !in_cset(obj) to slot 2 of region and phis
1453     Node* not_cset_ctrl = NULL;
1454     in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
1455     if (not_cset_ctrl != NULL) {
1456       if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
1457       region->init_req(_not_cset, not_cset_ctrl);
1458       val_phi->init_req(_not_cset, uncasted_val);
1459       raw_mem_phi->init_req(_not_cset, raw_mem);
1460     }
1461 
1462     // Resolve object when orig-value is in cset.
1463     // Make the unconditional resolve for fwdptr.
1464     Node* new_val = uncasted_val;
1465     if (unc_ctrl != NULL) {
1466       // Clone the null check in this branch to allow implicit null check
1467       new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
1468       fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
1469 
1470       IfNode* iff = unc_ctrl->in(0)->as_If();
1471       phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
1472     }
1473 
1474     // Call lrb-stub and wire up that path in slots 4
1475     Node* result_mem = NULL;
1476 
1477     Node* fwd = new_val;
1478     Node* addr;
1479     if (ShenandoahSelfFixing) {
1480       VectorSet visited(Thread::current()->resource_area());
1481       addr = get_load_addr(phase, visited, lrb);
1482     } else {
1483       addr = phase->igvn().zerocon(T_OBJECT);
1484     }
1485     if (addr->Opcode() == Op_AddP) {
1486       Node* orig_base = addr->in(AddPNode::Base);
1487       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), true);
1488       phase->register_new_node(base, ctrl);
1489       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1490         // Field access
1491         addr = addr->clone();
1492         addr->set_req(AddPNode::Base, base);
1493         addr->set_req(AddPNode::Address, base);
1494         phase->register_new_node(addr, ctrl);
1495       } else {
1496         Node* addr2 = addr->in(AddPNode::Address);
1497         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1498               addr2->in(AddPNode::Base) == orig_base) {
1499           addr2 = addr2->clone();
1500           addr2->set_req(AddPNode::Base, base);
1501           addr2->set_req(AddPNode::Address, base);
1502           phase->register_new_node(addr2, ctrl);
1503           addr = addr->clone();
1504           addr->set_req(AddPNode::Base, base);
1505           addr->set_req(AddPNode::Address, addr2);
1506           phase->register_new_node(addr, ctrl);
1507         }
1508       }
1509     }
1510     call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, false, phase);
1511     region->init_req(_evac_path, ctrl);
1512     val_phi->init_req(_evac_path, fwd);
1513     raw_mem_phi->init_req(_evac_path, result_mem);
1514 
1515     phase->register_control(region, loop, heap_stable_iff);
1516     Node* out_val = val_phi;
1517     phase->register_new_node(val_phi, region);
1518     phase->register_new_node(raw_mem_phi, region);
1519 
1520     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1521 
1522     ctrl = orig_ctrl;
1523 
1524     if (unc != NULL) {
1525       for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1526         Node* u = val->fast_out(i);
1527         Node* c = phase->ctrl_or_self(u);
1528         if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
1529           phase->igvn().rehash_node_delayed(u);
1530           int nb = u->replace_edge(val, out_val);
1531           --i, imax -= nb;
1532         }
1533       }
1534       if (val->outcnt() == 0) {
1535         phase->igvn()._worklist.push(val);
1536       }
1537     }
1538     phase->igvn().replace_node(lrb, out_val);
1539 
1540     follow_barrier_uses(out_val, ctrl, uses, phase);
1541 
1542     for(uint next = 0; next < uses.size(); next++ ) {
1543       Node *n = uses.at(next);
1544       assert(phase->get_ctrl(n) == ctrl, "bad control");
1545       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1546       phase->set_ctrl(n, region);
1547       follow_barrier_uses(n, ctrl, uses, phase);
1548     }
1549 
1550     // The slow path call produces memory: hook the raw memory phi
1551     // from the expanded load reference barrier with the rest of the graph
1552     // which may require adding memory phis at every post dominated
1553     // region and at enclosing loop heads. Use the memory state
1554     // collected in memory_nodes to fix the memory graph. Update that
1555     // memory state as we go.
1556     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1557   }
1558   // Done expanding load-reference-barriers.
1559   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1560 
1561   for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
1562     Node* barrier = state->enqueue_barrier(i);
1563     Node* pre_val = barrier->in(1);
1564 
1565     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1566       ShouldNotReachHere();
1567       continue;
1568     }
1569 
1570     Node* ctrl = phase->get_ctrl(barrier);
1571 
1572     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1573       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1574       ctrl = ctrl->in(0)->in(0);
1575       phase->set_ctrl(barrier, ctrl);
1576     } else if (ctrl->is_CallRuntime()) {
1577       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1578       ctrl = ctrl->in(0);
1579       phase->set_ctrl(barrier, ctrl);
1580     }
1581 
1582     Node* init_ctrl = ctrl;
1583     IdealLoopTree* loop = phase->get_loop(ctrl);
1584     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1585     Node* init_raw_mem = raw_mem;
1586     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1587     Node* heap_stable_ctrl = NULL;
1588     Node* null_ctrl = NULL;
1589     uint last = phase->C->unique();
1590 
1591     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1592     Node* region = new RegionNode(PATH_LIMIT);
1593     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1594 
1595     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1596     Node* region2 = new RegionNode(PATH_LIMIT2);
1597     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1598 
1599     // Stable path.
1600     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1601     region->init_req(_heap_stable, heap_stable_ctrl);
1602     phi->init_req(_heap_stable, raw_mem);
1603 
1604     // Null path
1605     Node* reg2_ctrl = NULL;
1606     test_null(ctrl, pre_val, null_ctrl, phase);
1607     if (null_ctrl != NULL) {
1608       reg2_ctrl = null_ctrl->in(0);
1609       region2->init_req(_null_path, null_ctrl);
1610       phi2->init_req(_null_path, raw_mem);
1611     } else {
1612       region2->del_req(_null_path);
1613       phi2->del_req(_null_path);
1614     }
1615 
1616     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1617     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1618     Node* thread = new ThreadLocalNode();
1619     phase->register_new_node(thread, ctrl);
1620     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1621     phase->register_new_node(buffer_adr, ctrl);
1622     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1623     phase->register_new_node(index_adr, ctrl);
1624 
1625     BasicType index_bt = TypeX_X->basic_type();
1626     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
1627     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1628     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1629     phase->register_new_node(index, ctrl);
1630     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1631     phase->register_new_node(index_cmp, ctrl);
1632     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1633     phase->register_new_node(index_test, ctrl);
1634     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1635     if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1636     phase->register_control(queue_full_iff, loop, ctrl);
1637     Node* not_full = new IfTrueNode(queue_full_iff);
1638     phase->register_control(not_full, loop, queue_full_iff);
1639     Node* full = new IfFalseNode(queue_full_iff);
1640     phase->register_control(full, loop, queue_full_iff);
1641 
1642     ctrl = not_full;
1643 
1644     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1645     phase->register_new_node(next_index, ctrl);
1646 
1647     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1648     phase->register_new_node(buffer, ctrl);
1649     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1650     phase->register_new_node(log_addr, ctrl);
1651     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1652     phase->register_new_node(log_store, ctrl);
1653     // update the index
1654     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1655     phase->register_new_node(index_update, ctrl);
1656 
1657     // Fast-path case
1658     region2->init_req(_fast_path, ctrl);
1659     phi2->init_req(_fast_path, index_update);
1660 
1661     ctrl = full;
1662 
1663     Node* base = find_bottom_mem(ctrl, phase);
1664 
1665     MergeMemNode* mm = MergeMemNode::make(base);
1666     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1667     phase->register_new_node(mm, ctrl);
1668 
1669     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1670     call->init_req(TypeFunc::Control, ctrl);
1671     call->init_req(TypeFunc::I_O, phase->C->top());
1672     call->init_req(TypeFunc::Memory, mm);
1673     call->init_req(TypeFunc::FramePtr, phase->C->top());
1674     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1675     call->init_req(TypeFunc::Parms, pre_val);
1676     call->init_req(TypeFunc::Parms+1, thread);
1677     phase->register_control(call, loop, ctrl);
1678 
1679     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1680     phase->register_control(ctrl_proj, loop, call);
1681     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1682     phase->register_new_node(mem_proj, call);
1683 
1684     // Slow-path case
1685     region2->init_req(_slow_path, ctrl_proj);
1686     phi2->init_req(_slow_path, mem_proj);
1687 
1688     phase->register_control(region2, loop, reg2_ctrl);
1689     phase->register_new_node(phi2, region2);
1690 
1691     region->init_req(_heap_unstable, region2);
1692     phi->init_req(_heap_unstable, phi2);
1693 
1694     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1695     phase->register_new_node(phi, region);
1696 
1697     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1698     for(uint next = 0; next < uses.size(); next++ ) {
1699       Node *n = uses.at(next);
1700       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1701       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1702       phase->set_ctrl(n, region);
1703       follow_barrier_uses(n, init_ctrl, uses, phase);
1704     }
1705     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1706 
1707     phase->igvn().replace_node(barrier, pre_val);
1708   }
1709   assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1710 
1711 }
1712 
1713 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1714   if (visited.test_set(in->_idx)) {
1715     return NULL;
1716   }
1717   switch (in->Opcode()) {
1718     case Op_Proj:
1719       return get_load_addr(phase, visited, in->in(0));
1720     case Op_CastPP:
1721     case Op_CheckCastPP:
1722     case Op_DecodeN:
1723     case Op_EncodeP:
1724       return get_load_addr(phase, visited, in->in(1));
1725     case Op_LoadN:
1726     case Op_LoadP:
1727       return in->in(MemNode::Address);
1728     case Op_CompareAndExchangeN:
1729     case Op_CompareAndExchangeP:
1730     case Op_GetAndSetN:
1731     case Op_GetAndSetP:
1732     case Op_ShenandoahCompareAndExchangeP:
1733     case Op_ShenandoahCompareAndExchangeN:
1734       // Those instructions would just have stored a different
1735       // value into the field. No use to attempt to fix it at this point.
1736       return phase->igvn().zerocon(T_OBJECT);
1737     case Op_CMoveP:
1738     case Op_CMoveN: {
1739       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1740       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1741       // Handle unambiguous cases: single address reported on both branches.
1742       if (t != NULL && f == NULL) return t;
1743       if (t == NULL && f != NULL) return f;
1744       if (t != NULL && t == f)    return t;
1745       // Ambiguity.
1746       return phase->igvn().zerocon(T_OBJECT);
1747     }
1748     case Op_Phi: {
1749       Node* addr = NULL;
1750       for (uint i = 1; i < in->req(); i++) {
1751         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1752         if (addr == NULL) {
1753           addr = addr1;
1754         }
1755         if (addr != addr1) {
1756           return phase->igvn().zerocon(T_OBJECT);
1757         }
1758       }
1759       return addr;
1760     }
1761     case Op_ShenandoahLoadReferenceBarrier:
1762       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1763     case Op_ShenandoahEnqueueBarrier:
1764       return get_load_addr(phase, visited, in->in(1));
1765     case Op_CallDynamicJava:
1766     case Op_CallLeaf:
1767     case Op_CallStaticJava:
1768     case Op_ConN:
1769     case Op_ConP:
1770     case Op_Parm:
1771     case Op_CreateEx:
1772       return phase->igvn().zerocon(T_OBJECT);
1773     default:
1774 #ifdef ASSERT
1775       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1776 #endif
1777       return phase->igvn().zerocon(T_OBJECT);
1778   }
1779 
1780 }
1781 
1782 void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1783   IdealLoopTree *loop = phase->get_loop(iff);
1784   Node* loop_head = loop->_head;
1785   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1786 
1787   Node* bol = iff->in(1);
1788   Node* cmp = bol->in(1);
1789   Node* andi = cmp->in(1);
1790   Node* load = andi->in(1);
1791 
1792   assert(is_gc_state_load(load), "broken");
1793   if (!phase->is_dominator(load->in(0), entry_c)) {
1794     Node* mem_ctrl = NULL;
1795     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1796     load = load->clone();
1797     load->set_req(MemNode::Memory, mem);
1798     load->set_req(0, entry_c);
1799     phase->register_new_node(load, entry_c);
1800     andi = andi->clone();
1801     andi->set_req(1, load);
1802     phase->register_new_node(andi, entry_c);
1803     cmp = cmp->clone();
1804     cmp->set_req(1, andi);
1805     phase->register_new_node(cmp, entry_c);
1806     bol = bol->clone();
1807     bol->set_req(1, cmp);
1808     phase->register_new_node(bol, entry_c);
1809 
1810     Node* old_bol =iff->in(1);
1811     phase->igvn().replace_input_of(iff, 1, bol);
1812   }
1813 }
1814 
1815 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1816   if (!n->is_If() || n->is_CountedLoopEnd()) {
1817     return false;
1818   }
1819   Node* region = n->in(0);
1820 
1821   if (!region->is_Region()) {
1822     return false;
1823   }
1824   Node* dom = phase->idom(region);
1825   if (!dom->is_If()) {
1826     return false;
1827   }
1828 
1829   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1830     return false;
1831   }
1832 
1833   IfNode* dom_if = dom->as_If();
1834   Node* proj_true = dom_if->proj_out(1);
1835   Node* proj_false = dom_if->proj_out(0);
1836 
1837   for (uint i = 1; i < region->req(); i++) {
1838     if (phase->is_dominator(proj_true, region->in(i))) {
1839       continue;
1840     }
1841     if (phase->is_dominator(proj_false, region->in(i))) {
1842       continue;
1843     }
1844     return false;
1845   }
1846 
1847   return true;
1848 }
1849 
1850 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1851   assert(is_heap_stable_test(n), "no other tests");
1852   if (identical_backtoback_ifs(n, phase)) {
1853     Node* n_ctrl = n->in(0);
1854     if (phase->can_split_if(n_ctrl)) {
1855       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1856       if (is_heap_stable_test(n)) {
1857         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1858         assert(is_gc_state_load(gc_state_load), "broken");
1859         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1860         assert(is_gc_state_load(dom_gc_state_load), "broken");
1861         if (gc_state_load != dom_gc_state_load) {
1862           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1863         }
1864       }
1865       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1866       Node* proj_true = dom_if->proj_out(1);
1867       Node* proj_false = dom_if->proj_out(0);
1868       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1869       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1870 
1871       for (uint i = 1; i < n_ctrl->req(); i++) {
1872         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1873           bolphi->init_req(i, con_true);
1874         } else {
1875           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1876           bolphi->init_req(i, con_false);
1877         }
1878       }
1879       phase->register_new_node(bolphi, n_ctrl);
1880       phase->igvn().replace_input_of(n, 1, bolphi);
1881       phase->do_split_if(n);
1882     }
1883   }
1884 }
1885 
1886 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1887   // Find first invariant test that doesn't exit the loop
1888   LoopNode *head = loop->_head->as_Loop();
1889   IfNode* unswitch_iff = NULL;
1890   Node* n = head->in(LoopNode::LoopBackControl);
1891   int loop_has_sfpts = -1;
1892   while (n != head) {
1893     Node* n_dom = phase->idom(n);
1894     if (n->is_Region()) {
1895       if (n_dom->is_If()) {
1896         IfNode* iff = n_dom->as_If();
1897         if (iff->in(1)->is_Bool()) {
1898           BoolNode* bol = iff->in(1)->as_Bool();
1899           if (bol->in(1)->is_Cmp()) {
1900             // If condition is invariant and not a loop exit,
1901             // then found reason to unswitch.
1902             if (is_heap_stable_test(iff) &&
1903                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1904               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1905               if (loop_has_sfpts == -1) {
1906                 for(uint i = 0; i < loop->_body.size(); i++) {
1907                   Node *m = loop->_body[i];
1908                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1909                     loop_has_sfpts = 1;
1910                     break;
1911                   }
1912                 }
1913                 if (loop_has_sfpts == -1) {
1914                   loop_has_sfpts = 0;
1915                 }
1916               }
1917               if (!loop_has_sfpts) {
1918                 unswitch_iff = iff;
1919               }
1920             }
1921           }
1922         }
1923       }
1924     }
1925     n = n_dom;
1926   }
1927   return unswitch_iff;
1928 }
1929 
1930 
1931 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1932   Node_List heap_stable_tests;
1933   Node_List gc_state_loads;
1934   stack.push(phase->C->start(), 0);
1935   do {
1936     Node* n = stack.node();
1937     uint i = stack.index();
1938 
1939     if (i < n->outcnt()) {
1940       Node* u = n->raw_out(i);
1941       stack.set_index(i+1);
1942       if (!visited.test_set(u->_idx)) {
1943         stack.push(u, 0);
1944       }
1945     } else {
1946       stack.pop();
1947       if (ShenandoahCommonGCStateLoads && is_gc_state_load(n)) {
1948         gc_state_loads.push(n);
1949       }
1950       if (n->is_If() && is_heap_stable_test(n)) {
1951         heap_stable_tests.push(n);
1952       }
1953     }
1954   } while (stack.size() > 0);
1955 
1956   bool progress;
1957   do {
1958     progress = false;
1959     for (uint i = 0; i < gc_state_loads.size(); i++) {
1960       Node* n = gc_state_loads.at(i);
1961       if (n->outcnt() != 0) {
1962         progress |= try_common_gc_state_load(n, phase);
1963       }
1964     }
1965   } while (progress);
1966 
1967   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1968     Node* n = heap_stable_tests.at(i);
1969     assert(is_heap_stable_test(n), "only evacuation test");
1970     merge_back_to_back_tests(n, phase);
1971   }
1972 
1973   if (!phase->C->major_progress()) {
1974     VectorSet seen(Thread::current()->resource_area());
1975     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1976       Node* n = heap_stable_tests.at(i);
1977       IdealLoopTree* loop = phase->get_loop(n);
1978       if (loop != phase->ltree_root() &&
1979           loop->_child == NULL &&
1980           !loop->_irreducible) {
1981         LoopNode* head = loop->_head->as_Loop();
1982         if ((!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1983             !seen.test_set(head->_idx)) {
1984           IfNode* iff = find_unswitching_candidate(loop, phase);
1985           if (iff != NULL) {
1986             Node* bol = iff->in(1);
1987             if (head->is_strip_mined()) {
1988               head->verify_strip_mined(0);
1989             }
1990             move_heap_stable_test_out_of_loop(iff, phase);
1991             if (loop->policy_unswitching(phase)) {
1992               if (head->is_strip_mined()) {
1993                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1994                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1995               }
1996               phase->do_unswitching(loop, old_new);
1997             } else {
1998               // Not proceeding with unswitching. Move load back in
1999               // the loop.
2000               phase->igvn().replace_input_of(iff, 1, bol);
2001             }
2002           }
2003         }
2004       }
2005     }
2006   }
2007 }
2008 
2009 #ifdef ASSERT
2010 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
2011   const bool trace = false;
2012   ResourceMark rm;
2013   Unique_Node_List nodes;
2014   Unique_Node_List controls;
2015   Unique_Node_List memories;
2016 
2017   nodes.push(root);
2018   for (uint next = 0; next < nodes.size(); next++) {
2019     Node *n  = nodes.at(next);
2020     if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
2021       controls.push(n);
2022       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
2023       for (uint next2 = 0; next2 < controls.size(); next2++) {
2024         Node *m = controls.at(next2);
2025         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2026           Node* u = m->fast_out(i);
2027           if (u->is_CFG() && !u->is_Root() &&
2028               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
2029               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
2030             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
2031             controls.push(u);
2032           }
2033         }
2034       }
2035       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
2036       for (uint next2 = 0; next2 < memories.size(); next2++) {
2037         Node *m = memories.at(next2);
2038         assert(m->bottom_type() == Type::MEMORY, "");
2039         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2040           Node* u = m->fast_out(i);
2041           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
2042             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2043             memories.push(u);
2044           } else if (u->is_LoadStore()) {
2045             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
2046             memories.push(u->find_out_with(Op_SCMemProj));
2047           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
2048             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2049             memories.push(u);
2050           } else if (u->is_Phi()) {
2051             assert(u->bottom_type() == Type::MEMORY, "");
2052             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
2053               assert(controls.member(u->in(0)), "");
2054               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2055               memories.push(u);
2056             }
2057           } else if (u->is_SafePoint() || u->is_MemBar()) {
2058             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2059               Node* uu = u->fast_out(j);
2060               if (uu->bottom_type() == Type::MEMORY) {
2061                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
2062                 memories.push(uu);
2063               }
2064             }
2065           }
2066         }
2067       }
2068       for (uint next2 = 0; next2 < controls.size(); next2++) {
2069         Node *m = controls.at(next2);
2070         if (m->is_Region()) {
2071           bool all_in = true;
2072           for (uint i = 1; i < m->req(); i++) {
2073             if (!controls.member(m->in(i))) {
2074               all_in = false;
2075               break;
2076             }
2077           }
2078           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
2079           bool found_phi = false;
2080           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
2081             Node* u = m->fast_out(j);
2082             if (u->is_Phi() && memories.member(u)) {
2083               found_phi = true;
2084               for (uint i = 1; i < u->req() && found_phi; i++) {
2085                 Node* k = u->in(i);
2086                 if (memories.member(k) != controls.member(m->in(i))) {
2087                   found_phi = false;
2088                 }
2089               }
2090             }
2091           }
2092           assert(found_phi || all_in, "");
2093         }
2094       }
2095       controls.clear();
2096       memories.clear();
2097     }
2098     for( uint i = 0; i < n->len(); ++i ) {
2099       Node *m = n->in(i);
2100       if (m != NULL) {
2101         nodes.push(m);
2102       }
2103     }
2104   }
2105 }
2106 #endif
2107 
2108 ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
2109   ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
2110 }
2111 
2112 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
2113   if (in(1) == NULL || in(1)->is_top()) {
2114     return Type::TOP;
2115   }
2116   const Type* t = in(1)->bottom_type();
2117   if (t == TypePtr::NULL_PTR) {
2118     return t;
2119   }
2120   return t->is_oopptr();
2121 }
2122 
2123 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
2124   if (in(1) == NULL) {
2125     return Type::TOP;
2126   }
2127   const Type* t = phase->type(in(1));
2128   if (t == Type::TOP) {
2129     return Type::TOP;
2130   }
2131   if (t == TypePtr::NULL_PTR) {
2132     return t;
2133   }
2134   return t->is_oopptr();
2135 }
2136 
2137 int ShenandoahEnqueueBarrierNode::needed(Node* n) {
2138   if (n == NULL ||
2139       n->is_Allocate() ||
2140       n->Opcode() == Op_ShenandoahEnqueueBarrier ||
2141       n->bottom_type() == TypePtr::NULL_PTR ||
2142       (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2143     return NotNeeded;
2144   }
2145   if (n->is_Phi() ||
2146       n->is_CMove()) {
2147     return MaybeNeeded;
2148   }
2149   return Needed;
2150 }
2151 
2152 Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
2153   for (;;) {
2154     if (n == NULL) {
2155       return n;
2156     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2157       return n;
2158     } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2159       return n;
2160     } else if (n->is_ConstraintCast() ||
2161                n->Opcode() == Op_DecodeN ||
2162                n->Opcode() == Op_EncodeP) {
2163       n = n->in(1);
2164     } else if (n->is_Proj()) {
2165       n = n->in(0);
2166     } else {
2167       return n;
2168     }
2169   }
2170   ShouldNotReachHere();
2171   return NULL;
2172 }
2173 
2174 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
2175   PhaseIterGVN* igvn = phase->is_IterGVN();
2176 
2177   Node* n = next(in(1));
2178 
2179   int cont = needed(n);
2180 
2181   if (cont == NotNeeded) {
2182     return in(1);
2183   } else if (cont == MaybeNeeded) {
2184     if (igvn == NULL) {
2185       phase->record_for_igvn(this);
2186       return this;
2187     } else {
2188       ResourceMark rm;
2189       Unique_Node_List wq;
2190       uint wq_i = 0;
2191 
2192       for (;;) {
2193         if (n->is_Phi()) {
2194           for (uint i = 1; i < n->req(); i++) {
2195             Node* m = n->in(i);
2196             if (m != NULL) {
2197               wq.push(m);
2198             }
2199           }
2200         } else {
2201           assert(n->is_CMove(), "nothing else here");
2202           Node* m = n->in(CMoveNode::IfFalse);
2203           wq.push(m);
2204           m = n->in(CMoveNode::IfTrue);
2205           wq.push(m);
2206         }
2207         Node* orig_n = NULL;
2208         do {
2209           if (wq_i >= wq.size()) {
2210             return in(1);
2211           }
2212           n = wq.at(wq_i);
2213           wq_i++;
2214           orig_n = n;
2215           n = next(n);
2216           cont = needed(n);
2217           if (cont == Needed) {
2218             return this;
2219           }
2220         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2221       }
2222     }
2223   }
2224 
2225   return this;
2226 }
2227 
2228 #ifdef ASSERT
2229 static bool has_never_branch(Node* root) {
2230   for (uint i = 1; i < root->req(); i++) {
2231     Node* in = root->in(i);
2232     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2233       return true;
2234     }
2235   }
2236   return false;
2237 }
2238 #endif
2239 
2240 void MemoryGraphFixer::collect_memory_nodes() {
2241   Node_Stack stack(0);
2242   VectorSet visited(Thread::current()->resource_area());
2243   Node_List regions;
2244 
2245   // Walk the raw memory graph and create a mapping from CFG node to
2246   // memory node. Exclude phis for now.
2247   stack.push(_phase->C->root(), 1);
2248   do {
2249     Node* n = stack.node();
2250     int opc = n->Opcode();
2251     uint i = stack.index();
2252     if (i < n->req()) {
2253       Node* mem = NULL;
2254       if (opc == Op_Root) {
2255         Node* in = n->in(i);
2256         int in_opc = in->Opcode();
2257         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2258           mem = in->in(TypeFunc::Memory);
2259         } else if (in_opc == Op_Halt) {
2260           if (!in->in(0)->is_Region()) {
2261             Node* proj = in->in(0);
2262             assert(proj->is_Proj(), "");
2263             Node* in = proj->in(0);
2264             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2265             if (in->is_CallStaticJava()) {
2266               mem = in->in(TypeFunc::Memory);
2267             } else if (in->Opcode() == Op_Catch) {
2268               Node* call = in->in(0)->in(0);
2269               assert(call->is_Call(), "");
2270               mem = call->in(TypeFunc::Memory);
2271             } else if (in->Opcode() == Op_NeverBranch) {
2272               ResourceMark rm;
2273               Unique_Node_List wq;
2274               wq.push(in);
2275               wq.push(in->as_Multi()->proj_out(0));
2276               for (uint j = 1; j < wq.size(); j++) {
2277                 Node* c = wq.at(j);
2278                 assert(!c->is_Root(), "shouldn't leave loop");
2279                 if (c->is_SafePoint()) {
2280                   assert(mem == NULL, "only one safepoint");
2281                   mem = c->in(TypeFunc::Memory);
2282                 }
2283                 for (DUIterator_Fast kmax, k = c->fast_outs(kmax); k < kmax; k++) {
2284                   Node* u = c->fast_out(k);
2285                   if (u->is_CFG()) {
2286                     wq.push(u);
2287                   }
2288                 }
2289               }
2290               assert(mem != NULL, "should have found safepoint");
2291             }
2292           }
2293         } else {
2294 #ifdef ASSERT
2295           n->dump();
2296           in->dump();
2297 #endif
2298           ShouldNotReachHere();
2299         }
2300       } else {
2301         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2302         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2303         mem = n->in(i);
2304       }
2305       i++;
2306       stack.set_index(i);
2307       if (mem == NULL) {
2308         continue;
2309       }
2310       for (;;) {
2311         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2312           break;
2313         }
2314         if (mem->is_Phi()) {
2315           stack.push(mem, 2);
2316           mem = mem->in(1);
2317         } else if (mem->is_Proj()) {
2318           stack.push(mem, mem->req());
2319           mem = mem->in(0);
2320         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2321           mem = mem->in(TypeFunc::Memory);
2322         } else if (mem->is_MergeMem()) {
2323           MergeMemNode* mm = mem->as_MergeMem();
2324           mem = mm->memory_at(_alias);
2325         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2326           assert(_alias == Compile::AliasIdxRaw, "");
2327           stack.push(mem, mem->req());
2328           mem = mem->in(MemNode::Memory);
2329         } else {
2330 #ifdef ASSERT
2331           mem->dump();
2332 #endif
2333           ShouldNotReachHere();
2334         }
2335       }
2336     } else {
2337       if (n->is_Phi()) {
2338         // Nothing
2339       } else if (!n->is_Root()) {
2340         Node* c = get_ctrl(n);
2341         _memory_nodes.map(c->_idx, n);
2342       }
2343       stack.pop();
2344     }
2345   } while(stack.is_nonempty());
2346 
2347   // Iterate over CFG nodes in rpo and propagate memory state to
2348   // compute memory state at regions, creating new phis if needed.
2349   Node_List rpo_list;
2350   visited.Clear();
2351   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2352   Node* root = rpo_list.pop();
2353   assert(root == _phase->C->root(), "");
2354 
2355   const bool trace = false;
2356 #ifdef ASSERT
2357   if (trace) {
2358     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2359       Node* c = rpo_list.at(i);
2360       if (_memory_nodes[c->_idx] != NULL) {
2361         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2362       }
2363     }
2364   }
2365 #endif
2366   uint last = _phase->C->unique();
2367 
2368 #ifdef ASSERT
2369   uint8_t max_depth = 0;
2370   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2371     IdealLoopTree* lpt = iter.current();
2372     max_depth = MAX2(max_depth, lpt->_nest);
2373   }
2374 #endif
2375 
2376   bool progress = true;
2377   int iteration = 0;
2378   Node_List dead_phis;
2379   while (progress) {
2380     progress = false;
2381     iteration++;
2382     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2383     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2384     IdealLoopTree* last_updated_ilt = NULL;
2385     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2386       Node* c = rpo_list.at(i);
2387 
2388       Node* prev_mem = _memory_nodes[c->_idx];
2389       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2390         Node* prev_region = regions[c->_idx];
2391         Node* unique = NULL;
2392         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2393           Node* m = _memory_nodes[c->in(j)->_idx];
2394           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2395           if (m != NULL) {
2396             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2397               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), "");
2398               // continue
2399             } else if (unique == NULL) {
2400               unique = m;
2401             } else if (m == unique) {
2402               // continue
2403             } else {
2404               unique = NodeSentinel;
2405             }
2406           }
2407         }
2408         assert(unique != NULL, "empty phi???");
2409         if (unique != NodeSentinel) {
2410           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2411             dead_phis.push(prev_region);
2412           }
2413           regions.map(c->_idx, unique);
2414         } else {
2415           Node* phi = NULL;
2416           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2417             phi = prev_region;
2418             for (uint k = 1; k < c->req(); k++) {
2419               Node* m = _memory_nodes[c->in(k)->_idx];
2420               assert(m != NULL, "expect memory state");
2421               phi->set_req(k, m);
2422             }
2423           } else {
2424             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2425               Node* u = c->fast_out(j);
2426               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2427                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2428                 phi = u;
2429                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2430                   Node* m = _memory_nodes[c->in(k)->_idx];
2431                   assert(m != NULL, "expect memory state");
2432                   if (u->in(k) != m) {
2433                     phi = NULL;
2434                   }
2435                 }
2436               }
2437             }
2438             if (phi == NULL) {
2439               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2440               for (uint k = 1; k < c->req(); k++) {
2441                 Node* m = _memory_nodes[c->in(k)->_idx];
2442                 assert(m != NULL, "expect memory state");
2443                 phi->init_req(k, m);
2444               }
2445             }
2446           }
2447           assert(phi != NULL, "");
2448           regions.map(c->_idx, phi);
2449         }
2450         Node* current_region = regions[c->_idx];
2451         if (current_region != prev_region) {
2452           progress = true;
2453           if (prev_region == prev_mem) {
2454             _memory_nodes.map(c->_idx, current_region);
2455           }
2456         }
2457       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2458         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2459         assert(m != NULL, "expect memory state");
2460         if (m != prev_mem) {
2461           _memory_nodes.map(c->_idx, m);
2462           progress = true;
2463         }
2464       }
2465 #ifdef ASSERT
2466       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2467 #endif
2468     }
2469   }
2470 
2471   // Replace existing phi with computed memory state for that region
2472   // if different (could be a new phi or a dominating memory node if
2473   // that phi was found to be useless).
2474   while (dead_phis.size() > 0) {
2475     Node* n = dead_phis.pop();
2476     n->replace_by(_phase->C->top());
2477     n->destruct();
2478   }
2479   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2480     Node* c = rpo_list.at(i);
2481     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2482       Node* n = regions[c->_idx];
2483       if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2484         _phase->register_new_node(n, c);
2485       }
2486     }
2487   }
2488   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2489     Node* c = rpo_list.at(i);
2490     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2491       Node* n = regions[c->_idx];
2492       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2493         Node* u = c->fast_out(i);
2494         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2495             u != n) {
2496           if (u->adr_type() == TypePtr::BOTTOM) {
2497             fix_memory_uses(u, n, n, c);
2498           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2499             _phase->lazy_replace(u, n);
2500             --i; --imax;
2501           }
2502         }
2503       }
2504     }
2505   }
2506 }
2507 
2508 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2509   Node* c = _phase->get_ctrl(n);
2510   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2511     assert(c == n->in(0), "");
2512     CallNode* call = c->as_Call();
2513     CallProjections projs;
2514     call->extract_projections(&projs, true, false);
2515     if (projs.catchall_memproj != NULL) {
2516       if (projs.fallthrough_memproj == n) {
2517         c = projs.fallthrough_catchproj;
2518       } else {
2519         assert(projs.catchall_memproj == n, "");
2520         c = projs.catchall_catchproj;
2521       }
2522     }
2523   }
2524   return c;
2525 }
2526 
2527 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2528   if (_phase->has_ctrl(n))
2529     return get_ctrl(n);
2530   else {
2531     assert (n->is_CFG(), "must be a CFG node");
2532     return n;
2533   }
2534 }
2535 
2536 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2537   return m != NULL && get_ctrl(m) == c;
2538 }
2539 
2540 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2541   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2542   Node* mem = _memory_nodes[ctrl->_idx];
2543   Node* c = ctrl;
2544   while (!mem_is_valid(mem, c) &&
2545          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2546     c = _phase->idom(c);
2547     mem = _memory_nodes[c->_idx];
2548   }
2549   if (n != NULL && mem_is_valid(mem, c)) {
2550     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2551       mem = next_mem(mem, _alias);
2552     }
2553     if (mem->is_MergeMem()) {
2554       mem = mem->as_MergeMem()->memory_at(_alias);
2555     }
2556     if (!mem_is_valid(mem, c)) {
2557       do {
2558         c = _phase->idom(c);
2559         mem = _memory_nodes[c->_idx];
2560       } while (!mem_is_valid(mem, c) &&
2561                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2562     }
2563   }
2564   assert(mem->bottom_type() == Type::MEMORY, "");
2565   return mem;
2566 }
2567 
2568 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2569   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2570     Node* use = region->fast_out(i);
2571     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2572         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2573       return true;
2574     }
2575   }
2576   return false;
2577 }
2578 
2579 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2580   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2581   const bool trace = false;
2582   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2583   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2584   GrowableArray<Node*> phis;
2585   if (mem_for_ctrl != mem) {
2586     Node* old = mem_for_ctrl;
2587     Node* prev = NULL;
2588     while (old != mem) {
2589       prev = old;
2590       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2591         assert(_alias == Compile::AliasIdxRaw, "");
2592         old = old->in(MemNode::Memory);
2593       } else if (old->Opcode() == Op_SCMemProj) {
2594         assert(_alias == Compile::AliasIdxRaw, "");
2595         old = old->in(0);
2596       } else {
2597         ShouldNotReachHere();
2598       }
2599     }
2600     assert(prev != NULL, "");
2601     if (new_ctrl != ctrl) {
2602       _memory_nodes.map(ctrl->_idx, mem);
2603       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2604     }
2605     uint input = (uint)MemNode::Memory;
2606     _phase->igvn().replace_input_of(prev, input, new_mem);
2607   } else {
2608     uses.clear();
2609     _memory_nodes.map(new_ctrl->_idx, new_mem);
2610     uses.push(new_ctrl);
2611     for(uint next = 0; next < uses.size(); next++ ) {
2612       Node *n = uses.at(next);
2613       assert(n->is_CFG(), "");
2614       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2615       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2616         Node* u = n->fast_out(i);
2617         if (!u->is_Root() && u->is_CFG() && u != n) {
2618           Node* m = _memory_nodes[u->_idx];
2619           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2620               !has_mem_phi(u) &&
2621               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2622             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2623             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2624 
2625             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2626               bool push = true;
2627               bool create_phi = true;
2628               if (_phase->is_dominator(new_ctrl, u)) {
2629                 create_phi = false;
2630               } else if (!_phase->C->has_irreducible_loop()) {
2631                 IdealLoopTree* loop = _phase->get_loop(ctrl);
2632                 bool do_check = true;
2633                 IdealLoopTree* l = loop;
2634                 create_phi = false;
2635                 while (l != _phase->ltree_root()) {
2636                   Node* head = l->_head;
2637                   if (head->in(0) == NULL) {
2638                     head = _phase->get_ctrl(head);
2639                   }
2640                   if (_phase->is_dominator(head, u) && _phase->is_dominator(_phase->idom(u), head)) {
2641                     create_phi = true;
2642                     do_check = false;
2643                     break;
2644                   }
2645                   l = l->_parent;
2646                 }
2647 
2648                 if (do_check) {
2649                   assert(!create_phi, "");
2650                   IdealLoopTree* u_loop = _phase->get_loop(u);
2651                   if (u_loop != _phase->ltree_root() && u_loop->is_member(loop)) {
2652                     Node* c = ctrl;
2653                     while (!_phase->is_dominator(c, u_loop->tail())) {
2654                       c = _phase->idom(c);
2655                     }
2656                     if (!_phase->is_dominator(c, u)) {
2657                       do_check = false;
2658                     }
2659                   }
2660                 }
2661 
2662                 if (do_check && _phase->is_dominator(_phase->idom(u), new_ctrl)) {
2663                   create_phi = true;
2664                 }
2665               }
2666               if (create_phi) {
2667                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2668                 _phase->register_new_node(phi, u);
2669                 phis.push(phi);
2670                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2671                 if (!mem_is_valid(m, u)) {
2672                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2673                   _memory_nodes.map(u->_idx, phi);
2674                 } else {
2675                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2676                   for (;;) {
2677                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2678                     Node* next = NULL;
2679                     if (m->is_Proj()) {
2680                       next = m->in(0);
2681                     } else {
2682                       assert(m->is_Mem() || m->is_LoadStore(), "");
2683                       assert(_alias == Compile::AliasIdxRaw, "");
2684                       next = m->in(MemNode::Memory);
2685                     }
2686                     if (_phase->get_ctrl(next) != u) {
2687                       break;
2688                     }
2689                     if (next->is_MergeMem()) {
2690                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2691                       break;
2692                     }
2693                     if (next->is_Phi()) {
2694                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2695                       break;
2696                     }
2697                     m = next;
2698                   }
2699 
2700                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2701                   assert(m->is_Mem() || m->is_LoadStore(), "");
2702                   uint input = (uint)MemNode::Memory;
2703                   _phase->igvn().replace_input_of(m, input, phi);
2704                   push = false;
2705                 }
2706               } else {
2707                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2708               }
2709               if (push) {
2710                 uses.push(u);
2711               }
2712             }
2713           } else if (!mem_is_valid(m, u) &&
2714                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2715             uses.push(u);
2716           }
2717         }
2718       }
2719     }
2720     for (int i = 0; i < phis.length(); i++) {
2721       Node* n = phis.at(i);
2722       Node* r = n->in(0);
2723       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2724       for (uint j = 1; j < n->req(); j++) {
2725         Node* m = find_mem(r->in(j), NULL);
2726         _phase->igvn().replace_input_of(n, j, m);
2727         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2728       }
2729     }
2730   }
2731   uint last = _phase->C->unique();
2732   MergeMemNode* mm = NULL;
2733   int alias = _alias;
2734   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2735   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2736     Node* u = mem->out(i);
2737     if (u->_idx < last) {
2738       if (u->is_Mem()) {
2739         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2740           Node* m = find_mem(_phase->get_ctrl(u), u);
2741           if (m != mem) {
2742             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2743             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2744             --i;
2745           }
2746         }
2747       } else if (u->is_MergeMem()) {
2748         MergeMemNode* u_mm = u->as_MergeMem();
2749         if (u_mm->memory_at(alias) == mem) {
2750           MergeMemNode* newmm = NULL;
2751           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2752             Node* uu = u->fast_out(j);
2753             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2754             if (uu->is_Phi()) {
2755               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2756               Node* region = uu->in(0);
2757               int nb = 0;
2758               for (uint k = 1; k < uu->req(); k++) {
2759                 if (uu->in(k) == u) {
2760                   Node* m = find_mem(region->in(k), NULL);
2761                   if (m != mem) {
2762                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2763                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2764                     if (newmm != u) {
2765                       _phase->igvn().replace_input_of(uu, k, newmm);
2766                       nb++;
2767                       --jmax;
2768                     }
2769                   }
2770                 }
2771               }
2772               if (nb > 0) {
2773                 --j;
2774               }
2775             } else {
2776               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2777               if (m != mem) {
2778                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2779                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2780                 if (newmm != u) {
2781                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2782                   --j, --jmax;
2783                 }
2784               }
2785             }
2786           }
2787         }
2788       } else if (u->is_Phi()) {
2789         assert(u->bottom_type() == Type::MEMORY, "what else?");
2790         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2791           Node* region = u->in(0);
2792           bool replaced = false;
2793           for (uint j = 1; j < u->req(); j++) {
2794             if (u->in(j) == mem) {
2795               Node* m = find_mem(region->in(j), NULL);
2796               Node* nnew = m;
2797               if (m != mem) {
2798                 if (u->adr_type() == TypePtr::BOTTOM) {
2799                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2800                   nnew = mm;
2801                 }
2802                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2803                 _phase->igvn().replace_input_of(u, j, nnew);
2804                 replaced = true;
2805               }
2806             }
2807           }
2808           if (replaced) {
2809             --i;
2810           }
2811         }
2812       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2813                  u->adr_type() == NULL) {
2814         assert(u->adr_type() != NULL ||
2815                u->Opcode() == Op_Rethrow ||
2816                u->Opcode() == Op_Return ||
2817                u->Opcode() == Op_SafePoint ||
2818                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2819                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2820                u->Opcode() == Op_CallLeaf, "");
2821         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2822         if (m != mem) {
2823           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2824           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2825           --i;
2826         }
2827       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2828         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2829         if (m != mem) {
2830           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2831           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2832           --i;
2833         }
2834       } else if (u->adr_type() != TypePtr::BOTTOM &&
2835                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2836         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2837         assert(m != mem, "");
2838         // u is on the wrong slice...
2839         assert(u->is_ClearArray(), "");
2840         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2841         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2842         --i;
2843       }
2844     }
2845   }
2846 #ifdef ASSERT
2847   assert(new_mem->outcnt() > 0, "");
2848   for (int i = 0; i < phis.length(); i++) {
2849     Node* n = phis.at(i);
2850     assert(n->outcnt() > 0, "new phi must have uses now");
2851   }
2852 #endif
2853 }
2854 
2855 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2856   MergeMemNode* mm = MergeMemNode::make(mem);
2857   mm->set_memory_at(_alias, rep_proj);
2858   _phase->register_new_node(mm, rep_ctrl);
2859   return mm;
2860 }
2861 
2862 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2863   MergeMemNode* newmm = NULL;
2864   MergeMemNode* u_mm = u->as_MergeMem();
2865   Node* c = _phase->get_ctrl(u);
2866   if (_phase->is_dominator(c, rep_ctrl)) {
2867     c = rep_ctrl;
2868   } else {
2869     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2870   }
2871   if (u->outcnt() == 1) {
2872     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2873       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2874       --i;
2875     } else {
2876       _phase->igvn().rehash_node_delayed(u);
2877       u_mm->set_memory_at(_alias, rep_proj);
2878     }
2879     newmm = u_mm;
2880     _phase->set_ctrl_and_loop(u, c);
2881   } else {
2882     // can't simply clone u and then change one of its input because
2883     // it adds and then removes an edge which messes with the
2884     // DUIterator
2885     newmm = MergeMemNode::make(u_mm->base_memory());
2886     for (uint j = 0; j < u->req(); j++) {
2887       if (j < newmm->req()) {
2888         if (j == (uint)_alias) {
2889           newmm->set_req(j, rep_proj);
2890         } else if (newmm->in(j) != u->in(j)) {
2891           newmm->set_req(j, u->in(j));
2892         }
2893       } else if (j == (uint)_alias) {
2894         newmm->add_req(rep_proj);
2895       } else {
2896         newmm->add_req(u->in(j));
2897       }
2898     }
2899     if ((uint)_alias >= u->req()) {
2900       newmm->set_memory_at(_alias, rep_proj);
2901     }
2902     _phase->register_new_node(newmm, c);
2903   }
2904   return newmm;
2905 }
2906 
2907 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2908   if (phi->adr_type() == TypePtr::BOTTOM) {
2909     Node* region = phi->in(0);
2910     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2911       Node* uu = region->fast_out(j);
2912       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2913         return false;
2914       }
2915     }
2916     return true;
2917   }
2918   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2919 }
2920 
2921 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2922   uint last = _phase-> C->unique();
2923   MergeMemNode* mm = NULL;
2924   assert(mem->bottom_type() == Type::MEMORY, "");
2925   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2926     Node* u = mem->out(i);
2927     if (u != replacement && u->_idx < last) {
2928       if (u->is_MergeMem()) {
2929         MergeMemNode* u_mm = u->as_MergeMem();
2930         if (u_mm->memory_at(_alias) == mem) {
2931           MergeMemNode* newmm = NULL;
2932           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2933             Node* uu = u->fast_out(j);
2934             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2935             if (uu->is_Phi()) {
2936               if (should_process_phi(uu)) {
2937                 Node* region = uu->in(0);
2938                 int nb = 0;
2939                 for (uint k = 1; k < uu->req(); k++) {
2940                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2941                     if (newmm == NULL) {
2942                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2943                     }
2944                     if (newmm != u) {
2945                       _phase->igvn().replace_input_of(uu, k, newmm);
2946                       nb++;
2947                       --jmax;
2948                     }
2949                   }
2950                 }
2951                 if (nb > 0) {
2952                   --j;
2953                 }
2954               }
2955             } else {
2956               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2957                 if (newmm == NULL) {
2958                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2959                 }
2960                 if (newmm != u) {
2961                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2962                   --j, --jmax;
2963                 }
2964               }
2965             }
2966           }
2967         }
2968       } else if (u->is_Phi()) {
2969         assert(u->bottom_type() == Type::MEMORY, "what else?");
2970         Node* region = u->in(0);
2971         if (should_process_phi(u)) {
2972           bool replaced = false;
2973           for (uint j = 1; j < u->req(); j++) {
2974             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2975               Node* nnew = rep_proj;
2976               if (u->adr_type() == TypePtr::BOTTOM) {
2977                 if (mm == NULL) {
2978                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2979                 }
2980                 nnew = mm;
2981               }
2982               _phase->igvn().replace_input_of(u, j, nnew);
2983               replaced = true;
2984             }
2985           }
2986           if (replaced) {
2987             --i;
2988           }
2989 
2990         }
2991       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2992                  u->adr_type() == NULL) {
2993         assert(u->adr_type() != NULL ||
2994                u->Opcode() == Op_Rethrow ||
2995                u->Opcode() == Op_Return ||
2996                u->Opcode() == Op_SafePoint ||
2997                u->Opcode() == Op_StoreIConditional ||
2998                u->Opcode() == Op_StoreLConditional ||
2999                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
3000                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
3001                u->Opcode() == Op_CallLeaf, "%s", u->Name());
3002         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3003           if (mm == NULL) {
3004             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
3005           }
3006           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
3007           --i;
3008         }
3009       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
3010         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3011           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
3012           --i;
3013         }
3014       }
3015     }
3016   }
3017 }
3018 
3019 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj)
3020 : Node(ctrl, obj) {
3021   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
3022 }
3023 
3024 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
3025   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
3026     return Type::TOP;
3027   }
3028   const Type* t = in(ValueIn)->bottom_type();
3029   if (t == TypePtr::NULL_PTR) {
3030     return t;
3031   }
3032   return t->is_oopptr();
3033 }
3034 
3035 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
3036   // Either input is TOP ==> the result is TOP
3037   const Type *t2 = phase->type(in(ValueIn));
3038   if( t2 == Type::TOP ) return Type::TOP;
3039 
3040   if (t2 == TypePtr::NULL_PTR) {
3041     return t2;
3042   }
3043 
3044   const Type* type = t2->is_oopptr();
3045   return type;
3046 }
3047 
3048 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
3049   Node* value = in(ValueIn);
3050   if (!needs_barrier(phase, value)) {
3051     return value;
3052   }
3053   return this;
3054 }
3055 
3056 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
3057   Unique_Node_List visited;
3058   return needs_barrier_impl(phase, n, visited);
3059 }
3060 
3061 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
3062   if (n == NULL) return false;
3063   if (visited.member(n)) {
3064     return false; // Been there.
3065   }
3066   visited.push(n);
3067 
3068   if (n->is_Allocate()) {
3069     // tty->print_cr("optimize barrier on alloc");
3070     return false;
3071   }
3072   if (n->is_Call()) {
3073     // tty->print_cr("optimize barrier on call");
3074     return false;
3075   }
3076 
3077   const Type* type = phase->type(n);
3078   if (type == Type::TOP) {
3079     return false;
3080   }
3081   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
3082     // tty->print_cr("optimize barrier on null");
3083     return false;
3084   }
3085   if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
3086     // tty->print_cr("optimize barrier on constant");
3087     return false;
3088   }
3089 
3090   switch (n->Opcode()) {
3091     case Op_AddP:
3092       return true; // TODO: Can refine?
3093     case Op_LoadP:
3094     case Op_ShenandoahCompareAndExchangeN:
3095     case Op_ShenandoahCompareAndExchangeP:
3096     case Op_CompareAndExchangeN:
3097     case Op_CompareAndExchangeP:
3098     case Op_GetAndSetN:
3099     case Op_GetAndSetP:
3100       return true;
3101     case Op_Phi: {
3102       for (uint i = 1; i < n->req(); i++) {
3103         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3104       }
3105       return false;
3106     }
3107     case Op_CheckCastPP:
3108     case Op_CastPP:
3109       return needs_barrier_impl(phase, n->in(1), visited);
3110     case Op_Proj:
3111       return needs_barrier_impl(phase, n->in(0), visited);
3112     case Op_ShenandoahLoadReferenceBarrier:
3113       // tty->print_cr("optimize barrier on barrier");
3114       return false;
3115     case Op_Parm:
3116       // tty->print_cr("optimize barrier on input arg");
3117       return false;
3118     case Op_DecodeN:
3119     case Op_EncodeP:
3120       return needs_barrier_impl(phase, n->in(1), visited);
3121     case Op_LoadN:
3122       return true;
3123     case Op_CMoveN:
3124     case Op_CMoveP:
3125       return needs_barrier_impl(phase, n->in(2), visited) ||
3126              needs_barrier_impl(phase, n->in(3), visited);
3127     case Op_ShenandoahEnqueueBarrier:
3128       return needs_barrier_impl(phase, n->in(1), visited);
3129     case Op_CreateEx:
3130       return false;
3131     default:
3132       break;
3133   }
3134 #ifdef ASSERT
3135   tty->print("need barrier on?: ");
3136   tty->print_cr("ins:");
3137   n->dump(2);
3138   tty->print_cr("outs:");
3139   n->dump(-2);
3140   ShouldNotReachHere();
3141 #endif
3142   return true;
3143 }
3144 
3145 ShenandoahLoadReferenceBarrierNode::Strength ShenandoahLoadReferenceBarrierNode::get_barrier_strength() {
3146   Unique_Node_List visited;
3147   Node_Stack stack(0);
3148   stack.push(this, 0);
3149 
3150   // Look for strongest strength: go over nodes looking for STRONG ones.
3151   // Stop once we encountered STRONG. Otherwise, walk until we ran out of nodes,
3152   // and then the overall strength is NONE.
3153   Strength strength = NONE;
3154   while (strength != STRONG && stack.size() > 0) {
3155     Node* n = stack.node();
3156     if (visited.member(n)) {
3157       stack.pop();
3158       continue;
3159     }
3160     visited.push(n);
3161     bool visit_users = false;
3162     switch (n->Opcode()) {
3163       case Op_CallStaticJava:
3164       case Op_CallDynamicJava:
3165       case Op_CallLeaf:
3166       case Op_CallLeafNoFP:
3167       case Op_CompareAndSwapL:
3168       case Op_CompareAndSwapI:
3169       case Op_CompareAndSwapB:
3170       case Op_CompareAndSwapS:
3171       case Op_CompareAndSwapN:
3172       case Op_CompareAndSwapP:
3173       case Op_CompareAndExchangeL:
3174       case Op_CompareAndExchangeI:
3175       case Op_CompareAndExchangeB:
3176       case Op_CompareAndExchangeS:
3177       case Op_CompareAndExchangeN:
3178       case Op_CompareAndExchangeP:
3179       case Op_WeakCompareAndSwapL:
3180       case Op_WeakCompareAndSwapI:
3181       case Op_WeakCompareAndSwapB:
3182       case Op_WeakCompareAndSwapS:
3183       case Op_WeakCompareAndSwapN:
3184       case Op_WeakCompareAndSwapP:
3185       case Op_ShenandoahCompareAndSwapN:
3186       case Op_ShenandoahCompareAndSwapP:
3187       case Op_ShenandoahWeakCompareAndSwapN:
3188       case Op_ShenandoahWeakCompareAndSwapP:
3189       case Op_ShenandoahCompareAndExchangeN:
3190       case Op_ShenandoahCompareAndExchangeP:
3191       case Op_GetAndSetL:
3192       case Op_GetAndSetI:
3193       case Op_GetAndSetB:
3194       case Op_GetAndSetS:
3195       case Op_GetAndSetP:
3196       case Op_GetAndSetN:
3197       case Op_GetAndAddL:
3198       case Op_GetAndAddI:
3199       case Op_GetAndAddB:
3200       case Op_GetAndAddS:
3201       case Op_ShenandoahEnqueueBarrier:
3202       case Op_FastLock:
3203       case Op_FastUnlock:
3204       case Op_Rethrow:
3205       case Op_Return:
3206       case Op_StoreB:
3207       case Op_StoreC:
3208       case Op_StoreD:
3209       case Op_StoreF:
3210       case Op_StoreL:
3211       case Op_StoreLConditional:
3212       case Op_StoreI:
3213       case Op_StoreIConditional:
3214       case Op_StoreN:
3215       case Op_StoreP:
3216       case Op_StoreVector:
3217       case Op_StrInflatedCopy:
3218       case Op_StrCompressedCopy:
3219       case Op_EncodeP:
3220       case Op_CastP2X:
3221       case Op_SafePoint:
3222       case Op_EncodeISOArray:
3223       case Op_AryEq:
3224       case Op_StrEquals:
3225       case Op_StrComp:
3226       case Op_StrIndexOf:
3227       case Op_StrIndexOfChar:
3228       case Op_HasNegatives:
3229         // Known to require barriers
3230         strength = STRONG;
3231         break;
3232       case Op_CmpP: {
3233         if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) ||
3234             n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3235           // One of the sides is known null, no need for barrier.
3236         } else {
3237           strength = STRONG;
3238         }
3239         break;
3240       }
3241       case Op_LoadB:
3242       case Op_LoadUB:
3243       case Op_LoadUS:
3244       case Op_LoadD:
3245       case Op_LoadF:
3246       case Op_LoadL:
3247       case Op_LoadI:
3248       case Op_LoadS:
3249       case Op_LoadN:
3250       case Op_LoadP:
3251       case Op_LoadVector: {
3252         const TypePtr* adr_type = n->adr_type();
3253         int alias_idx = Compile::current()->get_alias_index(adr_type);
3254         Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3255         ciField* field = alias_type->field();
3256         bool is_static = field != NULL && field->is_static();
3257         bool is_final = field != NULL && field->is_final();
3258 
3259         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3260           // Loading the constant does not require barriers: it should be handled
3261           // as part of GC roots already.
3262         } else {
3263           strength = STRONG;
3264         }
3265         break;
3266       }
3267       case Op_Conv2B:
3268       case Op_LoadRange:
3269       case Op_LoadKlass:
3270       case Op_LoadNKlass:
3271         // Do not require barriers
3272         break;
3273       case Op_AddP:
3274       case Op_CheckCastPP:
3275       case Op_CastPP:
3276       case Op_CMoveP:
3277       case Op_Phi:
3278       case Op_ShenandoahLoadReferenceBarrier:
3279         // Whether or not these need the barriers depends on their users
3280         visit_users = true;
3281         break;
3282       default: {
3283 #ifdef ASSERT
3284         fatal("Unknown node in get_barrier_strength: %s", NodeClassNames[n->Opcode()]);
3285 #else
3286         // Default to strong: better to have excess barriers, rather than miss some.
3287         strength = STRONG;
3288 #endif
3289       }
3290     }
3291 
3292     stack.pop();
3293     if (visit_users) {
3294       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3295         Node* user = n->fast_out(i);
3296         if (user != NULL) {
3297           stack.push(user, 0);
3298         }
3299       }
3300     }
3301   }
3302   return strength;
3303 }
3304 
3305 CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
3306   Node* val = in(ValueIn);
3307 
3308   const Type* val_t = igvn.type(val);
3309 
3310   if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
3311       val->Opcode() == Op_CastPP &&
3312       val->in(0) != NULL &&
3313       val->in(0)->Opcode() == Op_IfTrue &&
3314       val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
3315       val->in(0)->in(0)->is_If() &&
3316       val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
3317       val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
3318       val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
3319       val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
3320       val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
3321     assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
3322     CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
3323     return unc;
3324   }
3325   return NULL;
3326 }