1 /*
   2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  27 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahForwarding.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.hpp"
  31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  32 #include "gc/shenandoah/shenandoahRuntime.hpp"
  33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/block.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/castnode.hpp"
  38 #include "opto/movenode.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/subnode.hpp"
  43 
  44 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  45   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  46   if ((state->enqueue_barriers_count() +
  47        state->load_reference_barriers_count()) > 0) {
  48     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  49     C->clear_major_progress();
  50     PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
  51     if (C->failing()) return false;
  52     PhaseIdealLoop::verify(igvn);
  53     DEBUG_ONLY(verify_raw_mem(C->root());)
  54     if (attempt_more_loopopts) {
  55       C->set_major_progress();
  56       if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
  57         return false;
  58       }
  59       C->clear_major_progress();
  60     }
  61   }
  62   return true;
  63 }
  64 
  65 bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) {
  66   if (!UseShenandoahGC) {
  67     return false;
  68   }
  69   assert(iff->is_If(), "bad input");
  70   if (iff->Opcode() != Op_If) {
  71     return false;
  72   }
  73   Node* bol = iff->in(1);
  74   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  75     return false;
  76   }
  77   Node* cmp = bol->in(1);
  78   if (cmp->Opcode() != Op_CmpI) {
  79     return false;
  80   }
  81   Node* in1 = cmp->in(1);
  82   Node* in2 = cmp->in(2);
  83   if (in2->find_int_con(-1) != 0) {
  84     return false;
  85   }
  86   if (in1->Opcode() != Op_AndI) {
  87     return false;
  88   }
  89   in2 = in1->in(2);
  90   if (in2->find_int_con(-1) != mask) {
  91     return false;
  92   }
  93   in1 = in1->in(1);
  94 
  95   return is_gc_state_load(in1);
  96 }
  97 
  98 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
  99   return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 100 }
 101 
 102 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 103   if (!UseShenandoahGC) {
 104     return false;
 105   }
 106   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 107     return false;
 108   }
 109   Node* addp = n->in(MemNode::Address);
 110   if (!addp->is_AddP()) {
 111     return false;
 112   }
 113   Node* base = addp->in(AddPNode::Address);
 114   Node* off = addp->in(AddPNode::Offset);
 115   if (base->Opcode() != Op_ThreadLocal) {
 116     return false;
 117   }
 118   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 119     return false;
 120   }
 121   return true;
 122 }
 123 
 124 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 125   assert(phase->is_dominator(stop, start), "bad inputs");
 126   ResourceMark rm;
 127   Unique_Node_List wq;
 128   wq.push(start);
 129   for (uint next = 0; next < wq.size(); next++) {
 130     Node *m = wq.at(next);
 131     if (m == stop) {
 132       continue;
 133     }
 134     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 135       return true;
 136     }
 137     if (m->is_Region()) {
 138       for (uint i = 1; i < m->req(); i++) {
 139         wq.push(m->in(i));
 140       }
 141     } else {
 142       wq.push(m->in(0));
 143     }
 144   }
 145   return false;
 146 }
 147 
 148 bool ShenandoahBarrierC2Support::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) {
 149   assert(is_gc_state_load(n), "inconsistent");
 150   Node* addp = n->in(MemNode::Address);
 151   Node* dominator = NULL;
 152   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 153     Node* u = addp->fast_out(i);
 154     assert(is_gc_state_load(u), "inconsistent");
 155     if (u != n && phase->is_dominator(u->in(0), n->in(0))) {
 156       if (dominator == NULL) {
 157         dominator = u;
 158       } else {
 159         if (phase->dom_depth(u->in(0)) < phase->dom_depth(dominator->in(0))) {
 160           dominator = u;
 161         }
 162       }
 163     }
 164   }
 165   if (dominator == NULL || has_safepoint_between(n->in(0), dominator->in(0), phase)) {
 166     return false;
 167   }
 168   phase->igvn().replace_node(n, dominator);
 169 
 170   return true;
 171 }
 172 
 173 #ifdef ASSERT
 174 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 175   assert(phis.size() == 0, "");
 176 
 177   while (true) {
 178     if (in->bottom_type() == TypePtr::NULL_PTR) {
 179       if (trace) {tty->print_cr("NULL");}
 180     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 181       if (trace) {tty->print_cr("Non oop");}
 182     } else {
 183       if (in->is_ConstraintCast()) {
 184         in = in->in(1);
 185         continue;
 186       } else if (in->is_AddP()) {
 187         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 188         in = in->in(AddPNode::Address);
 189         continue;
 190       } else if (in->is_Con()) {
 191         if (trace) {
 192           tty->print("Found constant");
 193           in->dump();
 194         }
 195       } else if (in->Opcode() == Op_Parm) {
 196         if (trace) {
 197           tty->print("Found argument");
 198         }
 199       } else if (in->Opcode() == Op_CreateEx) {
 200         if (trace) {
 201           tty->print("Found create-exception");
 202         }
 203       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 204         if (trace) {
 205           tty->print("Found raw LoadP (OSR argument?)");
 206         }
 207       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 208         if (t == ShenandoahOopStore) {
 209           uint i = 0;
 210           for (; i < phis.size(); i++) {
 211             Node* n = phis.node_at(i);
 212             if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
 213               break;
 214             }
 215           }
 216           if (i == phis.size()) {
 217             return false;
 218           }
 219         }
 220         barriers_used.push(in);
 221         if (trace) {tty->print("Found barrier"); in->dump();}
 222       } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
 223         if (t != ShenandoahOopStore) {
 224           in = in->in(1);
 225           continue;
 226         }
 227         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 228         phis.push(in, in->req());
 229         in = in->in(1);
 230         continue;
 231       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 232         if (trace) {
 233           tty->print("Found alloc");
 234           in->in(0)->dump();
 235         }
 236       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 237         if (trace) {
 238           tty->print("Found Java call");
 239         }
 240       } else if (in->is_Phi()) {
 241         if (!visited.test_set(in->_idx)) {
 242           if (trace) {tty->print("Pushed phi:"); in->dump();}
 243           phis.push(in, 2);
 244           in = in->in(1);
 245           continue;
 246         }
 247         if (trace) {tty->print("Already seen phi:"); in->dump();}
 248       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 249         if (!visited.test_set(in->_idx)) {
 250           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 251           phis.push(in, CMoveNode::IfTrue);
 252           in = in->in(CMoveNode::IfFalse);
 253           continue;
 254         }
 255         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 256       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 257         in = in->in(1);
 258         continue;
 259       } else {
 260         return false;
 261       }
 262     }
 263     bool cont = false;
 264     while (phis.is_nonempty()) {
 265       uint idx = phis.index();
 266       Node* phi = phis.node();
 267       if (idx >= phi->req()) {
 268         if (trace) {tty->print("Popped phi:"); phi->dump();}
 269         phis.pop();
 270         continue;
 271       }
 272       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 273       in = phi->in(idx);
 274       phis.set_index(idx+1);
 275       cont = true;
 276       break;
 277     }
 278     if (!cont) {
 279       break;
 280     }
 281   }
 282   return true;
 283 }
 284 
 285 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 286   if (n1 != NULL) {
 287     n1->dump(+10);
 288   }
 289   if (n2 != NULL) {
 290     n2->dump(+10);
 291   }
 292   fatal("%s", msg);
 293 }
 294 
 295 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 296   ResourceMark rm;
 297   Unique_Node_List wq;
 298   GrowableArray<Node*> barriers;
 299   Unique_Node_List barriers_used;
 300   Node_Stack phis(0);
 301   VectorSet visited(Thread::current()->resource_area());
 302   const bool trace = false;
 303   const bool verify_no_useless_barrier = false;
 304 
 305   wq.push(root);
 306   for (uint next = 0; next < wq.size(); next++) {
 307     Node *n = wq.at(next);
 308     if (n->is_Load()) {
 309       const bool trace = false;
 310       if (trace) {tty->print("Verifying"); n->dump();}
 311       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 312         if (trace) {tty->print_cr("Load range/klass");}
 313       } else {
 314         const TypePtr* adr_type = n->as_Load()->adr_type();
 315 
 316         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 317           if (trace) {tty->print_cr("Mark load");}
 318         } else if (adr_type->isa_instptr() &&
 319                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 320                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
 321           if (trace) {tty->print_cr("Reference.get()");}
 322         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 323           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 324         }
 325       }
 326     } else if (n->is_Store()) {
 327       const bool trace = false;
 328 
 329       if (trace) {tty->print("Verifying"); n->dump();}
 330       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 331         Node* adr = n->in(MemNode::Address);
 332         bool verify = true;
 333 
 334         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 335           adr = adr->in(AddPNode::Address);
 336           if (adr->is_AddP()) {
 337             assert(adr->in(AddPNode::Base)->is_top(), "");
 338             adr = adr->in(AddPNode::Address);
 339             if (adr->Opcode() == Op_LoadP &&
 340                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 341                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 342                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 343               if (trace) {tty->print_cr("SATB prebarrier");}
 344               verify = false;
 345             }
 346           }
 347         }
 348 
 349         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 350           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 351         }
 352       }
 353       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 354         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 355       }
 356     } else if (n->Opcode() == Op_CmpP) {
 357       const bool trace = false;
 358 
 359       Node* in1 = n->in(1);
 360       Node* in2 = n->in(2);
 361       if (in1->bottom_type()->isa_oopptr()) {
 362         if (trace) {tty->print("Verifying"); n->dump();}
 363 
 364         bool mark_inputs = false;
 365         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 366             (in1->is_Con() || in2->is_Con())) {
 367           if (trace) {tty->print_cr("Comparison against a constant");}
 368           mark_inputs = true;
 369         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 370                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 371           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 372           mark_inputs = true;
 373         } else {
 374           assert(in2->bottom_type()->isa_oopptr(), "");
 375 
 376           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 377               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 378             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 379           }
 380         }
 381         if (verify_no_useless_barrier &&
 382             mark_inputs &&
 383             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 384              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 385           phis.clear();
 386           visited.reset();
 387         }
 388       }
 389     } else if (n->is_LoadStore()) {
 390       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 391           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 392         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 393       }
 394 
 395       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 396         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 397       }
 398     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 399       CallNode* call = n->as_Call();
 400 
 401       static struct {
 402         const char* name;
 403         struct {
 404           int pos;
 405           verify_type t;
 406         } args[6];
 407       } calls[] = {
 408         "aescrypt_encryptBlock",
 409         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 410           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 411         "aescrypt_decryptBlock",
 412         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 413           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 414         "multiplyToLen",
 415         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 416           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 417         "squareToLen",
 418         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 419           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 420         "montgomery_multiply",
 421         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 422           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 423         "montgomery_square",
 424         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 425           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 426         "mulAdd",
 427         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 428           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 429         "vectorizedMismatch",
 430         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 431           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 432         "updateBytesCRC32",
 433         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 434           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 435         "updateBytesAdler32",
 436         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 437           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 438         "updateBytesCRC32C",
 439         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 440           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 441         "counterMode_AESCrypt",
 442         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 443           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 444         "cipherBlockChaining_encryptAESCrypt",
 445         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 446           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 447         "cipherBlockChaining_decryptAESCrypt",
 448         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 449           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 450         "shenandoah_clone_barrier",
 451         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 452           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 453         "ghash_processBlocks",
 454         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 455           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 456         "sha1_implCompress",
 457         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 458           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 459         "sha256_implCompress",
 460         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 461           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 462         "sha512_implCompress",
 463         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 464           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 465         "sha1_implCompressMB",
 466         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 467           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 468         "sha256_implCompressMB",
 469         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 470           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 471         "sha512_implCompressMB",
 472         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 473           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 474         "encodeBlock",
 475         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 476           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 477       };
 478 
 479       if (call->is_call_to_arraycopystub()) {
 480         Node* dest = NULL;
 481         const TypeTuple* args = n->as_Call()->_tf->domain();
 482         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 483           if (args->field_at(i)->isa_ptr()) {
 484             j++;
 485             if (j == 2) {
 486               dest = n->in(i);
 487               break;
 488             }
 489           }
 490         }
 491         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 492             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 493           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 494         }
 495       } else if (strlen(call->_name) > 5 &&
 496                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 497         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 498           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 499         }
 500       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 501         // skip
 502       } else {
 503         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 504         int i = 0;
 505         for (; i < calls_len; i++) {
 506           if (!strcmp(calls[i].name, call->_name)) {
 507             break;
 508           }
 509         }
 510         if (i != calls_len) {
 511           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 512           for (uint j = 0; j < args_len; j++) {
 513             int pos = calls[i].args[j].pos;
 514             if (pos == -1) {
 515               break;
 516             }
 517             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 518               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 519             }
 520           }
 521           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 522             if (call->in(j)->bottom_type()->make_ptr() &&
 523                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 524               uint k = 0;
 525               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 526               if (k == args_len) {
 527                 fatal("arg %d for call %s not covered", j, call->_name);
 528               }
 529             }
 530           }
 531         } else {
 532           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 533             if (call->in(j)->bottom_type()->make_ptr() &&
 534                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 535               fatal("%s not covered", call->_name);
 536             }
 537           }
 538         }
 539       }
 540     } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 541       // skip
 542     } else if (n->is_AddP()
 543                || n->is_Phi()
 544                || n->is_ConstraintCast()
 545                || n->Opcode() == Op_Return
 546                || n->Opcode() == Op_CMoveP
 547                || n->Opcode() == Op_CMoveN
 548                || n->Opcode() == Op_Rethrow
 549                || n->is_MemBar()
 550                || n->Opcode() == Op_Conv2B
 551                || n->Opcode() == Op_SafePoint
 552                || n->is_CallJava()
 553                || n->Opcode() == Op_Unlock
 554                || n->Opcode() == Op_EncodeP
 555                || n->Opcode() == Op_DecodeN) {
 556       // nothing to do
 557     } else {
 558       static struct {
 559         int opcode;
 560         struct {
 561           int pos;
 562           verify_type t;
 563         } inputs[2];
 564       } others[] = {
 565         Op_FastLock,
 566         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 567         Op_Lock,
 568         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 569         Op_ArrayCopy,
 570         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 571         Op_StrCompressedCopy,
 572         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 573         Op_StrInflatedCopy,
 574         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 575         Op_AryEq,
 576         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 577         Op_StrIndexOf,
 578         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 579         Op_StrComp,
 580         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 581         Op_StrEquals,
 582         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 583         Op_EncodeISOArray,
 584         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 585         Op_HasNegatives,
 586         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 587         Op_CastP2X,
 588         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 589         Op_StrIndexOfChar,
 590         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 591       };
 592 
 593       const int others_len = sizeof(others) / sizeof(others[0]);
 594       int i = 0;
 595       for (; i < others_len; i++) {
 596         if (others[i].opcode == n->Opcode()) {
 597           break;
 598         }
 599       }
 600       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 601       if (i != others_len) {
 602         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 603         for (uint j = 0; j < inputs_len; j++) {
 604           int pos = others[i].inputs[j].pos;
 605           if (pos == -1) {
 606             break;
 607           }
 608           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 609             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 610           }
 611         }
 612         for (uint j = 1; j < stop; j++) {
 613           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 614               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 615             uint k = 0;
 616             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 617             if (k == inputs_len) {
 618               fatal("arg %d for node %s not covered", j, n->Name());
 619             }
 620           }
 621         }
 622       } else {
 623         for (uint j = 1; j < stop; j++) {
 624           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 625               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 626             fatal("%s not covered", n->Name());
 627           }
 628         }
 629       }
 630     }
 631 
 632     if (n->is_SafePoint()) {
 633       SafePointNode* sfpt = n->as_SafePoint();
 634       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 635         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 636           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 637             phis.clear();
 638             visited.reset();
 639           }
 640         }
 641       }
 642     }
 643   }
 644 
 645   if (verify_no_useless_barrier) {
 646     for (int i = 0; i < barriers.length(); i++) {
 647       Node* n = barriers.at(i);
 648       if (!barriers_used.member(n)) {
 649         tty->print("XXX useless barrier"); n->dump(-2);
 650         ShouldNotReachHere();
 651       }
 652     }
 653   }
 654 }
 655 #endif
 656 
 657 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 658   // That both nodes have the same control is not sufficient to prove
 659   // domination, verify that there's no path from d to n
 660   ResourceMark rm;
 661   Unique_Node_List wq;
 662   wq.push(d);
 663   for (uint next = 0; next < wq.size(); next++) {
 664     Node *m = wq.at(next);
 665     if (m == n) {
 666       return false;
 667     }
 668     if (m->is_Phi() && m->in(0)->is_Loop()) {
 669       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 670     } else {
 671       for (uint i = 0; i < m->req(); i++) {
 672         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 673           wq.push(m->in(i));
 674         }
 675       }
 676     }
 677   }
 678   return true;
 679 }
 680 
 681 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 682   if (d_c != n_c) {
 683     return phase->is_dominator(d_c, n_c);
 684   }
 685   return is_dominator_same_ctrl(d_c, d, n, phase);
 686 }
 687 
 688 Node* next_mem(Node* mem, int alias) {
 689   Node* res = NULL;
 690   if (mem->is_Proj()) {
 691     res = mem->in(0);
 692   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 693     res = mem->in(TypeFunc::Memory);
 694   } else if (mem->is_Phi()) {
 695     res = mem->in(1);
 696   } else if (mem->is_MergeMem()) {
 697     res = mem->as_MergeMem()->memory_at(alias);
 698   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 699     assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 700     res = mem->in(MemNode::Memory);
 701   } else {
 702 #ifdef ASSERT
 703     mem->dump();
 704 #endif
 705     ShouldNotReachHere();
 706   }
 707   return res;
 708 }
 709 
 710 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 711   Node* iffproj = NULL;
 712   while (c != dom) {
 713     Node* next = phase->idom(c);
 714     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 715     if (c->is_Region()) {
 716       ResourceMark rm;
 717       Unique_Node_List wq;
 718       wq.push(c);
 719       for (uint i = 0; i < wq.size(); i++) {
 720         Node *n = wq.at(i);
 721         if (n == next) {
 722           continue;
 723         }
 724         if (n->is_Region()) {
 725           for (uint j = 1; j < n->req(); j++) {
 726             wq.push(n->in(j));
 727           }
 728         } else {
 729           wq.push(n->in(0));
 730         }
 731       }
 732       for (uint i = 0; i < wq.size(); i++) {
 733         Node *n = wq.at(i);
 734         assert(n->is_CFG(), "");
 735         if (n->is_Multi()) {
 736           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 737             Node* u = n->fast_out(j);
 738             if (u->is_CFG()) {
 739               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 740                 return NodeSentinel;
 741               }
 742             }
 743           }
 744         }
 745       }
 746     } else  if (c->is_Proj()) {
 747       if (c->is_IfProj()) {
 748         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 749           // continue;
 750         } else {
 751           if (!allow_one_proj) {
 752             return NodeSentinel;
 753           }
 754           if (iffproj == NULL) {
 755             iffproj = c;
 756           } else {
 757             return NodeSentinel;
 758           }
 759         }
 760       } else if (c->Opcode() == Op_JumpProj) {
 761         return NodeSentinel; // unsupported
 762       } else if (c->Opcode() == Op_CatchProj) {
 763         return NodeSentinel; // unsupported
 764       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 765         return NodeSentinel; // unsupported
 766       } else {
 767         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 768       }
 769     }
 770     c = next;
 771   }
 772   return iffproj;
 773 }
 774 
 775 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 776   ResourceMark rm;
 777   VectorSet wq(Thread::current()->resource_area());
 778   wq.set(mem->_idx);
 779   mem_ctrl = phase->ctrl_or_self(mem);
 780   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 781     mem = next_mem(mem, alias);
 782     if (wq.test_set(mem->_idx)) {
 783       return NULL;
 784     }
 785     mem_ctrl = phase->ctrl_or_self(mem);
 786   }
 787   if (mem->is_MergeMem()) {
 788     mem = mem->as_MergeMem()->memory_at(alias);
 789     mem_ctrl = phase->ctrl_or_self(mem);
 790   }
 791   return mem;
 792 }
 793 
 794 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 795   Node* mem = NULL;
 796   Node* c = ctrl;
 797   do {
 798     if (c->is_Region()) {
 799       Node* phi_bottom = NULL;
 800       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 801         Node* u = c->fast_out(i);
 802         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 803           if (u->adr_type() == TypePtr::BOTTOM) {
 804             mem = u;
 805           }
 806         }
 807       }
 808     } else {
 809       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 810         CallProjections projs;
 811         c->as_Call()->extract_projections(&projs, true, false);
 812         if (projs.fallthrough_memproj != NULL) {
 813           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 814             if (projs.catchall_memproj == NULL) {
 815               mem = projs.fallthrough_memproj;
 816             } else {
 817               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 818                 mem = projs.fallthrough_memproj;
 819               } else {
 820                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 821                 mem = projs.catchall_memproj;
 822               }
 823             }
 824           }
 825         } else {
 826           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 827           if (proj != NULL &&
 828               proj->adr_type() == TypePtr::BOTTOM) {
 829             mem = proj;
 830           }
 831         }
 832       } else {
 833         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 834           Node* u = c->fast_out(i);
 835           if (u->is_Proj() &&
 836               u->bottom_type() == Type::MEMORY &&
 837               u->adr_type() == TypePtr::BOTTOM) {
 838               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 839               assert(mem == NULL, "only one proj");
 840               mem = u;
 841           }
 842         }
 843         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 844       }
 845     }
 846     c = phase->idom(c);
 847   } while (mem == NULL);
 848   return mem;
 849 }
 850 
 851 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 852   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 853     Node* u = n->fast_out(i);
 854     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 855       uses.push(u);
 856     }
 857   }
 858 }
 859 
 860 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 861   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 862   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 863   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 864   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 865   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 866   phase->lazy_replace(outer, new_outer);
 867   phase->lazy_replace(le, new_le);
 868   inner->clear_strip_mined();
 869 }
 870 
 871 void ShenandoahBarrierC2Support::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
 872                                                   PhaseIdealLoop* phase) {
 873   IdealLoopTree* loop = phase->get_loop(ctrl);
 874   Node* thread = new ThreadLocalNode();
 875   phase->register_new_node(thread, ctrl);
 876   Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 877   phase->set_ctrl(offset, phase->C->root());
 878   Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset);
 879   phase->register_new_node(gc_state_addr, ctrl);
 880   uint gc_state_idx = Compile::AliasIdxRaw;
 881   const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
 882   debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
 883 
 884   Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered);
 885   phase->register_new_node(gc_state, ctrl);
 886   Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED));
 887   phase->register_new_node(heap_stable_and, ctrl);
 888   Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT));
 889   phase->register_new_node(heap_stable_cmp, ctrl);
 890   Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne);
 891   phase->register_new_node(heap_stable_test, ctrl);
 892   IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 893   phase->register_control(heap_stable_iff, loop, ctrl);
 894 
 895   heap_stable_ctrl = new IfFalseNode(heap_stable_iff);
 896   phase->register_control(heap_stable_ctrl, loop, heap_stable_iff);
 897   ctrl = new IfTrueNode(heap_stable_iff);
 898   phase->register_control(ctrl, loop, heap_stable_iff);
 899 
 900   assert(is_heap_stable_test(heap_stable_iff), "Should match the shape");
 901 }
 902 
 903 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 904   const Type* val_t = phase->igvn().type(val);
 905   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 906     IdealLoopTree* loop = phase->get_loop(ctrl);
 907     Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT));
 908     phase->register_new_node(null_cmp, ctrl);
 909     Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
 910     phase->register_new_node(null_test, ctrl);
 911     IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 912     phase->register_control(null_iff, loop, ctrl);
 913     ctrl = new IfTrueNode(null_iff);
 914     phase->register_control(ctrl, loop, null_iff);
 915     null_ctrl = new IfFalseNode(null_iff);
 916     phase->register_control(null_ctrl, loop, null_iff);
 917   }
 918 }
 919 
 920 Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
 921   IdealLoopTree *loop = phase->get_loop(c);
 922   Node* iff = unc_ctrl->in(0);
 923   assert(iff->is_If(), "broken");
 924   Node* new_iff = iff->clone();
 925   new_iff->set_req(0, c);
 926   phase->register_control(new_iff, loop, c);
 927   Node* iffalse = new IfFalseNode(new_iff->as_If());
 928   phase->register_control(iffalse, loop, new_iff);
 929   Node* iftrue = new IfTrueNode(new_iff->as_If());
 930   phase->register_control(iftrue, loop, new_iff);
 931   c = iftrue;
 932   const Type *t = phase->igvn().type(val);
 933   assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
 934   Node* uncasted_val = val->in(1);
 935   val = new CastPPNode(uncasted_val, t);
 936   val->init_req(0, c);
 937   phase->register_new_node(val, c);
 938   return val;
 939 }
 940 
 941 void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
 942                                                 Unique_Node_List& uses, PhaseIdealLoop* phase) {
 943   IfNode* iff = unc_ctrl->in(0)->as_If();
 944   Node* proj = iff->proj_out(0);
 945   assert(proj != unc_ctrl, "bad projection");
 946   Node* use = proj->unique_ctrl_out();
 947 
 948   assert(use == unc || use->is_Region(), "what else?");
 949 
 950   uses.clear();
 951   if (use == unc) {
 952     phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
 953     for (uint i = 1; i < unc->req(); i++) {
 954       Node* n = unc->in(i);
 955       if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
 956         uses.push(n);
 957       }
 958     }
 959   } else {
 960     assert(use->is_Region(), "what else?");
 961     uint idx = 1;
 962     for (; use->in(idx) != proj; idx++);
 963     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
 964       Node* u = use->fast_out(i);
 965       if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
 966         uses.push(u->in(idx));
 967       }
 968     }
 969   }
 970   for(uint next = 0; next < uses.size(); next++ ) {
 971     Node *n = uses.at(next);
 972     assert(phase->get_ctrl(n) == proj, "bad control");
 973     phase->set_ctrl_and_loop(n, new_unc_ctrl);
 974     if (n->in(0) == proj) {
 975       phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
 976     }
 977     for (uint i = 0; i < n->req(); i++) {
 978       Node* m = n->in(i);
 979       if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
 980         uses.push(m);
 981       }
 982     }
 983   }
 984 
 985   phase->igvn().rehash_node_delayed(use);
 986   int nb = use->replace_edge(proj, new_unc_ctrl);
 987   assert(nb == 1, "only use expected");
 988 }
 989 
 990 void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 991   IdealLoopTree *loop = phase->get_loop(ctrl);
 992   Node* raw_rbtrue = new CastP2XNode(ctrl, val);
 993   phase->register_new_node(raw_rbtrue, ctrl);
 994   Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 995   phase->register_new_node(cset_offset, ctrl);
 996   Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
 997   phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root());
 998   Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset);
 999   phase->register_new_node(in_cset_fast_test_adr, ctrl);
1000   uint in_cset_fast_test_idx = Compile::AliasIdxRaw;
1001   const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument
1002   debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx));
1003   Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered);
1004   phase->register_new_node(in_cset_fast_test_load, ctrl);
1005   Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT));
1006   phase->register_new_node(in_cset_fast_test_cmp, ctrl);
1007   Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq);
1008   phase->register_new_node(in_cset_fast_test_test, ctrl);
1009   IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
1010   phase->register_control(in_cset_fast_test_iff, loop, ctrl);
1011 
1012   not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff);
1013   phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff);
1014 
1015   ctrl = new IfFalseNode(in_cset_fast_test_iff);
1016   phase->register_control(ctrl, loop, in_cset_fast_test_iff);
1017 }
1018 
1019 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) {
1020   IdealLoopTree*loop = phase->get_loop(ctrl);
1021   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
1022 
1023   // The slow path stub consumes and produces raw memory in addition
1024   // to the existing memory edges
1025   Node* base = find_bottom_mem(ctrl, phase);
1026   MergeMemNode* mm = MergeMemNode::make(base);
1027   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1028   phase->register_new_node(mm, ctrl);
1029 
1030   address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
1031           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) :
1032           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier);
1033 
1034   address calladdr = is_native ? CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native)
1035                                : target;
1036   const char* name = is_native ? "load_reference_barrier_native" : "load_reference_barrier";
1037   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
1038 
1039   call->init_req(TypeFunc::Control, ctrl);
1040   call->init_req(TypeFunc::I_O, phase->C->top());
1041   call->init_req(TypeFunc::Memory, mm);
1042   call->init_req(TypeFunc::FramePtr, phase->C->top());
1043   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1044   call->init_req(TypeFunc::Parms, val);
1045   call->init_req(TypeFunc::Parms+1, load_addr);
1046   phase->register_control(call, loop, ctrl);
1047   ctrl = new ProjNode(call, TypeFunc::Control);
1048   phase->register_control(ctrl, loop, call);
1049   result_mem = new ProjNode(call, TypeFunc::Memory);
1050   phase->register_new_node(result_mem, call);
1051   val = new ProjNode(call, TypeFunc::Parms);
1052   phase->register_new_node(val, call);
1053   val = new CheckCastPPNode(ctrl, val, obj_type);
1054   phase->register_new_node(val, ctrl);
1055 }
1056 
1057 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1058   Node* ctrl = phase->get_ctrl(barrier);
1059   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1060 
1061   // Update the control of all nodes that should be after the
1062   // barrier control flow
1063   uses.clear();
1064   // Every node that is control dependent on the barrier's input
1065   // control will be after the expanded barrier. The raw memory (if
1066   // its memory is control dependent on the barrier's input control)
1067   // must stay above the barrier.
1068   uses_to_ignore.clear();
1069   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1070     uses_to_ignore.push(init_raw_mem);
1071   }
1072   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1073     Node *n = uses_to_ignore.at(next);
1074     for (uint i = 0; i < n->req(); i++) {
1075       Node* in = n->in(i);
1076       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1077         uses_to_ignore.push(in);
1078       }
1079     }
1080   }
1081   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1082     Node* u = ctrl->fast_out(i);
1083     if (u->_idx < last &&
1084         u != barrier &&
1085         !uses_to_ignore.member(u) &&
1086         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1087         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1088       Node* old_c = phase->ctrl_or_self(u);
1089       Node* c = old_c;
1090       if (c != ctrl ||
1091           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1092           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1093         phase->igvn().rehash_node_delayed(u);
1094         int nb = u->replace_edge(ctrl, region);
1095         if (u->is_CFG()) {
1096           if (phase->idom(u) == ctrl) {
1097             phase->set_idom(u, region, phase->dom_depth(region));
1098           }
1099         } else if (phase->get_ctrl(u) == ctrl) {
1100           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1101           uses.push(u);
1102         }
1103         assert(nb == 1, "more than 1 ctrl input?");
1104         --i, imax -= nb;
1105       }
1106     }
1107   }
1108 }
1109 
1110 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1111   Node* region = NULL;
1112   while (c != ctrl) {
1113     if (c->is_Region()) {
1114       region = c;
1115     }
1116     c = phase->idom(c);
1117   }
1118   assert(region != NULL, "");
1119   Node* phi = new PhiNode(region, n->bottom_type());
1120   for (uint j = 1; j < region->req(); j++) {
1121     Node* in = region->in(j);
1122     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1123       phi->init_req(j, n);
1124     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1125       phi->init_req(j, n_clone);
1126     } else {
1127       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1128     }
1129   }
1130   phase->register_new_node(phi, region);
1131   return phi;
1132 }
1133 
1134 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1135   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1136 
1137   Unique_Node_List uses;
1138   for (int i = 0; i < state->enqueue_barriers_count(); i++) {
1139     Node* barrier = state->enqueue_barrier(i);
1140     Node* ctrl = phase->get_ctrl(barrier);
1141     IdealLoopTree* loop = phase->get_loop(ctrl);
1142     if (loop->_head->is_OuterStripMinedLoop()) {
1143       // Expanding a barrier here will break loop strip mining
1144       // verification. Transform the loop so the loop nest doesn't
1145       // appear as strip mined.
1146       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1147       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1148     }
1149   }
1150 
1151   Node_Stack stack(0);
1152   Node_List clones;
1153   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1154     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1155     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1156       continue;
1157     }
1158 
1159     Node* ctrl = phase->get_ctrl(lrb);
1160     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1161 
1162     CallStaticJavaNode* unc = NULL;
1163     Node* unc_ctrl = NULL;
1164     Node* uncasted_val = val;
1165 
1166     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1167       Node* u = lrb->fast_out(i);
1168       if (u->Opcode() == Op_CastPP &&
1169           u->in(0) != NULL &&
1170           phase->is_dominator(u->in(0), ctrl)) {
1171         const Type* u_t = phase->igvn().type(u);
1172 
1173         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1174             u->in(0)->Opcode() == Op_IfTrue &&
1175             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1176             u->in(0)->in(0)->is_If() &&
1177             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1178             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1179             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1180             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1181             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1182           IdealLoopTree* loop = phase->get_loop(ctrl);
1183           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1184 
1185           if (!unc_loop->is_member(loop)) {
1186             continue;
1187           }
1188 
1189           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1190           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1191           if (branch == NodeSentinel) {
1192             continue;
1193           }
1194 
1195           phase->igvn().replace_input_of(u, 1, val);
1196           phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
1197           phase->set_ctrl(u, u->in(0));
1198           phase->set_ctrl(lrb, u->in(0));
1199           unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1200           unc_ctrl = u->in(0);
1201           val = u;
1202 
1203           for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
1204             Node* u = val->fast_out(j);
1205             if (u == lrb) continue;
1206             phase->igvn().rehash_node_delayed(u);
1207             int nb = u->replace_edge(val, lrb);
1208             --j; jmax -= nb;
1209           }
1210 
1211           RegionNode* r = new RegionNode(3);
1212           IfNode* iff = unc_ctrl->in(0)->as_If();
1213 
1214           Node* ctrl_use = unc_ctrl->unique_ctrl_out();
1215           Node* unc_ctrl_clone = unc_ctrl->clone();
1216           phase->register_control(unc_ctrl_clone, loop, iff);
1217           Node* c = unc_ctrl_clone;
1218           Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
1219           r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
1220 
1221           phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
1222           phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
1223           phase->lazy_replace(c, unc_ctrl);
1224           c = NULL;;
1225           phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
1226           phase->set_ctrl(val, unc_ctrl_clone);
1227 
1228           IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
1229           fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
1230           Node* iff_proj = iff->proj_out(0);
1231           r->init_req(2, iff_proj);
1232           phase->register_control(r, phase->ltree_root(), iff);
1233 
1234           Node* new_bol = new_iff->in(1)->clone();
1235           Node* new_cmp = new_bol->in(1)->clone();
1236           assert(new_cmp->Opcode() == Op_CmpP, "broken");
1237           assert(new_cmp->in(1) == val->in(1), "broken");
1238           new_bol->set_req(1, new_cmp);
1239           new_cmp->set_req(1, lrb);
1240           phase->register_new_node(new_bol, new_iff->in(0));
1241           phase->register_new_node(new_cmp, new_iff->in(0));
1242           phase->igvn().replace_input_of(new_iff, 1, new_bol);
1243           phase->igvn().replace_input_of(new_cast, 1, lrb);
1244 
1245           for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1246             Node* u = lrb->fast_out(i);
1247             if (u == new_cast || u == new_cmp) {
1248               continue;
1249             }
1250             phase->igvn().rehash_node_delayed(u);
1251             int nb = u->replace_edge(lrb, new_cast);
1252             assert(nb > 0, "no update?");
1253             --i; imax -= nb;
1254           }
1255 
1256           for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1257             Node* u = val->fast_out(i);
1258             if (u == lrb) {
1259               continue;
1260             }
1261             phase->igvn().rehash_node_delayed(u);
1262             int nb = u->replace_edge(val, new_cast);
1263             assert(nb > 0, "no update?");
1264             --i; imax -= nb;
1265           }
1266 
1267           ctrl = unc_ctrl_clone;
1268           phase->set_ctrl_and_loop(lrb, ctrl);
1269           break;
1270         }
1271       }
1272     }
1273     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1274       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1275       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1276         // The rethrow call may have too many projections to be
1277         // properly handled here. Given there's no reason for a
1278         // barrier to depend on the call, move it above the call
1279         stack.push(lrb, 0);
1280         do {
1281           Node* n = stack.node();
1282           uint idx = stack.index();
1283           if (idx < n->req()) {
1284             Node* in = n->in(idx);
1285             stack.set_index(idx+1);
1286             if (in != NULL) {
1287               if (phase->has_ctrl(in)) {
1288                 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1289 #ifdef ASSERT
1290                   for (uint i = 0; i < stack.size(); i++) {
1291                     assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1292                   }
1293 #endif
1294                   stack.push(in, 0);
1295                 }
1296               } else {
1297                 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1298               }
1299             }
1300           } else {
1301             phase->set_ctrl(n, call->in(0));
1302             stack.pop();
1303           }
1304         } while(stack.size() > 0);
1305         continue;
1306       }
1307       CallProjections projs;
1308       call->extract_projections(&projs, false, false);
1309 
1310       Node* lrb_clone = lrb->clone();
1311       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1312       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1313 
1314       stack.push(lrb, 0);
1315       clones.push(lrb_clone);
1316 
1317       do {
1318         assert(stack.size() == clones.size(), "");
1319         Node* n = stack.node();
1320 #ifdef ASSERT
1321         if (n->is_Load()) {
1322           Node* mem = n->in(MemNode::Memory);
1323           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1324             Node* u = mem->fast_out(j);
1325             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1326           }
1327         }
1328 #endif
1329         uint idx = stack.index();
1330         Node* n_clone = clones.at(clones.size()-1);
1331         if (idx < n->outcnt()) {
1332           Node* u = n->raw_out(idx);
1333           Node* c = phase->ctrl_or_self(u);
1334           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1335             stack.set_index(idx+1);
1336             assert(!u->is_CFG(), "");
1337             stack.push(u, 0);
1338             Node* u_clone = u->clone();
1339             int nb = u_clone->replace_edge(n, n_clone);
1340             assert(nb > 0, "should have replaced some uses");
1341             phase->register_new_node(u_clone, projs.catchall_catchproj);
1342             clones.push(u_clone);
1343             phase->set_ctrl(u, projs.fallthrough_catchproj);
1344           } else {
1345             bool replaced = false;
1346             if (u->is_Phi()) {
1347               for (uint k = 1; k < u->req(); k++) {
1348                 if (u->in(k) == n) {
1349                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1350                     phase->igvn().replace_input_of(u, k, n_clone);
1351                     replaced = true;
1352                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1353                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1354                     replaced = true;
1355                   }
1356                 }
1357               }
1358             } else {
1359               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1360                 phase->igvn().rehash_node_delayed(u);
1361                 int nb = u->replace_edge(n, n_clone);
1362                 assert(nb > 0, "should have replaced some uses");
1363                 replaced = true;
1364               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1365                 phase->igvn().rehash_node_delayed(u);
1366                 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1367                 assert(nb > 0, "should have replaced some uses");
1368                 replaced = true;
1369               }
1370             }
1371             if (!replaced) {
1372               stack.set_index(idx+1);
1373             }
1374           }
1375         } else {
1376           stack.pop();
1377           clones.pop();
1378         }
1379       } while (stack.size() > 0);
1380       assert(stack.size() == 0 && clones.size() == 0, "");
1381     }
1382   }
1383 
1384   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1385     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1386     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1387       continue;
1388     }
1389     Node* ctrl = phase->get_ctrl(lrb);
1390     IdealLoopTree* loop = phase->get_loop(ctrl);
1391     if (loop->_head->is_OuterStripMinedLoop()) {
1392       // Expanding a barrier here will break loop strip mining
1393       // verification. Transform the loop so the loop nest doesn't
1394       // appear as strip mined.
1395       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1396       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1397     }
1398   }
1399 
1400   // Expand load-reference-barriers
1401   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1402   Unique_Node_List uses_to_ignore;
1403   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1404     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1405     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1406       phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1407       continue;
1408     }
1409     uint last = phase->C->unique();
1410     Node* ctrl = phase->get_ctrl(lrb);
1411     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1412 
1413 
1414     Node* orig_ctrl = ctrl;
1415 
1416     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1417     Node* init_raw_mem = raw_mem;
1418     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1419 
1420     IdealLoopTree *loop = phase->get_loop(ctrl);
1421     CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
1422     Node* unc_ctrl = NULL;
1423     if (unc != NULL) {
1424       if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
1425         unc = NULL;
1426       } else {
1427         unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
1428       }
1429     }
1430 
1431     Node* uncasted_val = val;
1432     if (unc != NULL) {
1433       uncasted_val = val->in(1);
1434     }
1435 
1436     Node* heap_stable_ctrl = NULL;
1437     Node* null_ctrl = NULL;
1438 
1439     assert(val->bottom_type()->make_oopptr(), "need oop");
1440     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1441 
1442     enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT };
1443     Node* region = new RegionNode(PATH_LIMIT);
1444     Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
1445     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1446 
1447     // Stable path.
1448     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1449     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1450 
1451     // Heap stable case
1452     region->init_req(_heap_stable, heap_stable_ctrl);
1453     val_phi->init_req(_heap_stable, uncasted_val);
1454     raw_mem_phi->init_req(_heap_stable, raw_mem);
1455 
1456     Node* reg2_ctrl = NULL;
1457     // Null case
1458     test_null(ctrl, val, null_ctrl, phase);
1459     if (null_ctrl != NULL) {
1460       reg2_ctrl = null_ctrl->in(0);
1461       region->init_req(_null_path, null_ctrl);
1462       val_phi->init_req(_null_path, uncasted_val);
1463       raw_mem_phi->init_req(_null_path, raw_mem);
1464     } else {
1465       region->del_req(_null_path);
1466       val_phi->del_req(_null_path);
1467       raw_mem_phi->del_req(_null_path);
1468     }
1469 
1470     // Test for in-cset.
1471     // Wires !in_cset(obj) to slot 2 of region and phis
1472     Node* not_cset_ctrl = NULL;
1473     in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
1474     if (not_cset_ctrl != NULL) {
1475       if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
1476       region->init_req(_not_cset, not_cset_ctrl);
1477       val_phi->init_req(_not_cset, uncasted_val);
1478       raw_mem_phi->init_req(_not_cset, raw_mem);
1479     }
1480 
1481     // Resolve object when orig-value is in cset.
1482     // Make the unconditional resolve for fwdptr.
1483     Node* new_val = uncasted_val;
1484     if (unc_ctrl != NULL) {
1485       // Clone the null check in this branch to allow implicit null check
1486       new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
1487       fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
1488 
1489       IfNode* iff = unc_ctrl->in(0)->as_If();
1490       phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
1491     }
1492 
1493     // Call lrb-stub and wire up that path in slots 4
1494     Node* result_mem = NULL;
1495 
1496     Node* fwd = new_val;
1497     Node* addr;
1498     if (ShenandoahSelfFixing) {
1499       VectorSet visited(Thread::current()->resource_area());
1500       addr = get_load_addr(phase, visited, lrb);
1501     } else {
1502       addr = phase->igvn().zerocon(T_OBJECT);
1503     }
1504     if (addr->Opcode() == Op_AddP) {
1505       Node* orig_base = addr->in(AddPNode::Base);
1506       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), true);
1507       phase->register_new_node(base, ctrl);
1508       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1509         // Field access
1510         addr = addr->clone();
1511         addr->set_req(AddPNode::Base, base);
1512         addr->set_req(AddPNode::Address, base);
1513         phase->register_new_node(addr, ctrl);
1514       } else {
1515         Node* addr2 = addr->in(AddPNode::Address);
1516         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1517               addr2->in(AddPNode::Base) == orig_base) {
1518           addr2 = addr2->clone();
1519           addr2->set_req(AddPNode::Base, base);
1520           addr2->set_req(AddPNode::Address, base);
1521           phase->register_new_node(addr2, ctrl);
1522           addr = addr->clone();
1523           addr->set_req(AddPNode::Base, base);
1524           addr->set_req(AddPNode::Address, addr2);
1525           phase->register_new_node(addr, ctrl);
1526         }
1527       }
1528     }
1529     call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, lrb->is_native(), phase);
1530     region->init_req(_evac_path, ctrl);
1531     val_phi->init_req(_evac_path, fwd);
1532     raw_mem_phi->init_req(_evac_path, result_mem);
1533 
1534     phase->register_control(region, loop, heap_stable_iff);
1535     Node* out_val = val_phi;
1536     phase->register_new_node(val_phi, region);
1537     phase->register_new_node(raw_mem_phi, region);
1538 
1539     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1540 
1541     ctrl = orig_ctrl;
1542 
1543     if (unc != NULL) {
1544       for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1545         Node* u = val->fast_out(i);
1546         Node* c = phase->ctrl_or_self(u);
1547         if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
1548           phase->igvn().rehash_node_delayed(u);
1549           int nb = u->replace_edge(val, out_val);
1550           --i, imax -= nb;
1551         }
1552       }
1553       if (val->outcnt() == 0) {
1554         phase->igvn()._worklist.push(val);
1555       }
1556     }
1557     phase->igvn().replace_node(lrb, out_val);
1558 
1559     follow_barrier_uses(out_val, ctrl, uses, phase);
1560 
1561     for(uint next = 0; next < uses.size(); next++ ) {
1562       Node *n = uses.at(next);
1563       assert(phase->get_ctrl(n) == ctrl, "bad control");
1564       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1565       phase->set_ctrl(n, region);
1566       follow_barrier_uses(n, ctrl, uses, phase);
1567     }
1568 
1569     // The slow path call produces memory: hook the raw memory phi
1570     // from the expanded load reference barrier with the rest of the graph
1571     // which may require adding memory phis at every post dominated
1572     // region and at enclosing loop heads. Use the memory state
1573     // collected in memory_nodes to fix the memory graph. Update that
1574     // memory state as we go.
1575     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1576   }
1577   // Done expanding load-reference-barriers.
1578   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1579 
1580   for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
1581     Node* barrier = state->enqueue_barrier(i);
1582     Node* pre_val = barrier->in(1);
1583 
1584     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1585       ShouldNotReachHere();
1586       continue;
1587     }
1588 
1589     Node* ctrl = phase->get_ctrl(barrier);
1590 
1591     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1592       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1593       ctrl = ctrl->in(0)->in(0);
1594       phase->set_ctrl(barrier, ctrl);
1595     } else if (ctrl->is_CallRuntime()) {
1596       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1597       ctrl = ctrl->in(0);
1598       phase->set_ctrl(barrier, ctrl);
1599     }
1600 
1601     Node* init_ctrl = ctrl;
1602     IdealLoopTree* loop = phase->get_loop(ctrl);
1603     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1604     Node* init_raw_mem = raw_mem;
1605     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1606     Node* heap_stable_ctrl = NULL;
1607     Node* null_ctrl = NULL;
1608     uint last = phase->C->unique();
1609 
1610     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1611     Node* region = new RegionNode(PATH_LIMIT);
1612     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1613 
1614     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1615     Node* region2 = new RegionNode(PATH_LIMIT2);
1616     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1617 
1618     // Stable path.
1619     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1620     region->init_req(_heap_stable, heap_stable_ctrl);
1621     phi->init_req(_heap_stable, raw_mem);
1622 
1623     // Null path
1624     Node* reg2_ctrl = NULL;
1625     test_null(ctrl, pre_val, null_ctrl, phase);
1626     if (null_ctrl != NULL) {
1627       reg2_ctrl = null_ctrl->in(0);
1628       region2->init_req(_null_path, null_ctrl);
1629       phi2->init_req(_null_path, raw_mem);
1630     } else {
1631       region2->del_req(_null_path);
1632       phi2->del_req(_null_path);
1633     }
1634 
1635     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1636     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1637     Node* thread = new ThreadLocalNode();
1638     phase->register_new_node(thread, ctrl);
1639     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1640     phase->register_new_node(buffer_adr, ctrl);
1641     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1642     phase->register_new_node(index_adr, ctrl);
1643 
1644     BasicType index_bt = TypeX_X->basic_type();
1645     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
1646     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1647     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1648     phase->register_new_node(index, ctrl);
1649     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1650     phase->register_new_node(index_cmp, ctrl);
1651     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1652     phase->register_new_node(index_test, ctrl);
1653     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1654     if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1655     phase->register_control(queue_full_iff, loop, ctrl);
1656     Node* not_full = new IfTrueNode(queue_full_iff);
1657     phase->register_control(not_full, loop, queue_full_iff);
1658     Node* full = new IfFalseNode(queue_full_iff);
1659     phase->register_control(full, loop, queue_full_iff);
1660 
1661     ctrl = not_full;
1662 
1663     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1664     phase->register_new_node(next_index, ctrl);
1665 
1666     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1667     phase->register_new_node(buffer, ctrl);
1668     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1669     phase->register_new_node(log_addr, ctrl);
1670     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1671     phase->register_new_node(log_store, ctrl);
1672     // update the index
1673     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1674     phase->register_new_node(index_update, ctrl);
1675 
1676     // Fast-path case
1677     region2->init_req(_fast_path, ctrl);
1678     phi2->init_req(_fast_path, index_update);
1679 
1680     ctrl = full;
1681 
1682     Node* base = find_bottom_mem(ctrl, phase);
1683 
1684     MergeMemNode* mm = MergeMemNode::make(base);
1685     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1686     phase->register_new_node(mm, ctrl);
1687 
1688     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1689     call->init_req(TypeFunc::Control, ctrl);
1690     call->init_req(TypeFunc::I_O, phase->C->top());
1691     call->init_req(TypeFunc::Memory, mm);
1692     call->init_req(TypeFunc::FramePtr, phase->C->top());
1693     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1694     call->init_req(TypeFunc::Parms, pre_val);
1695     call->init_req(TypeFunc::Parms+1, thread);
1696     phase->register_control(call, loop, ctrl);
1697 
1698     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1699     phase->register_control(ctrl_proj, loop, call);
1700     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1701     phase->register_new_node(mem_proj, call);
1702 
1703     // Slow-path case
1704     region2->init_req(_slow_path, ctrl_proj);
1705     phi2->init_req(_slow_path, mem_proj);
1706 
1707     phase->register_control(region2, loop, reg2_ctrl);
1708     phase->register_new_node(phi2, region2);
1709 
1710     region->init_req(_heap_unstable, region2);
1711     phi->init_req(_heap_unstable, phi2);
1712 
1713     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1714     phase->register_new_node(phi, region);
1715 
1716     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1717     for(uint next = 0; next < uses.size(); next++ ) {
1718       Node *n = uses.at(next);
1719       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1720       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1721       phase->set_ctrl(n, region);
1722       follow_barrier_uses(n, init_ctrl, uses, phase);
1723     }
1724     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1725 
1726     phase->igvn().replace_node(barrier, pre_val);
1727   }
1728   assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1729 
1730 }
1731 
1732 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1733   if (visited.test_set(in->_idx)) {
1734     return NULL;
1735   }
1736   switch (in->Opcode()) {
1737     case Op_Proj:
1738       return get_load_addr(phase, visited, in->in(0));
1739     case Op_CastPP:
1740     case Op_CheckCastPP:
1741     case Op_DecodeN:
1742     case Op_EncodeP:
1743       return get_load_addr(phase, visited, in->in(1));
1744     case Op_LoadN:
1745     case Op_LoadP:
1746       return in->in(MemNode::Address);
1747     case Op_CompareAndExchangeN:
1748     case Op_CompareAndExchangeP:
1749     case Op_GetAndSetN:
1750     case Op_GetAndSetP:
1751     case Op_ShenandoahCompareAndExchangeP:
1752     case Op_ShenandoahCompareAndExchangeN:
1753       // Those instructions would just have stored a different
1754       // value into the field. No use to attempt to fix it at this point.
1755       return phase->igvn().zerocon(T_OBJECT);
1756     case Op_CMoveP:
1757     case Op_CMoveN: {
1758       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1759       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1760       // Handle unambiguous cases: single address reported on both branches.
1761       if (t != NULL && f == NULL) return t;
1762       if (t == NULL && f != NULL) return f;
1763       if (t != NULL && t == f)    return t;
1764       // Ambiguity.
1765       return phase->igvn().zerocon(T_OBJECT);
1766     }
1767     case Op_Phi: {
1768       Node* addr = NULL;
1769       for (uint i = 1; i < in->req(); i++) {
1770         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1771         if (addr == NULL) {
1772           addr = addr1;
1773         }
1774         if (addr != addr1) {
1775           return phase->igvn().zerocon(T_OBJECT);
1776         }
1777       }
1778       return addr;
1779     }
1780     case Op_ShenandoahLoadReferenceBarrier:
1781       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1782     case Op_ShenandoahEnqueueBarrier:
1783       return get_load_addr(phase, visited, in->in(1));
1784     case Op_CallDynamicJava:
1785     case Op_CallLeaf:
1786     case Op_CallStaticJava:
1787     case Op_ConN:
1788     case Op_ConP:
1789     case Op_Parm:
1790       return phase->igvn().zerocon(T_OBJECT);
1791     default:
1792 #ifdef ASSERT
1793       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1794 #endif
1795       return phase->igvn().zerocon(T_OBJECT);
1796   }
1797 
1798 }
1799 
1800 void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1801   IdealLoopTree *loop = phase->get_loop(iff);
1802   Node* loop_head = loop->_head;
1803   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1804 
1805   Node* bol = iff->in(1);
1806   Node* cmp = bol->in(1);
1807   Node* andi = cmp->in(1);
1808   Node* load = andi->in(1);
1809 
1810   assert(is_gc_state_load(load), "broken");
1811   if (!phase->is_dominator(load->in(0), entry_c)) {
1812     Node* mem_ctrl = NULL;
1813     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1814     load = load->clone();
1815     load->set_req(MemNode::Memory, mem);
1816     load->set_req(0, entry_c);
1817     phase->register_new_node(load, entry_c);
1818     andi = andi->clone();
1819     andi->set_req(1, load);
1820     phase->register_new_node(andi, entry_c);
1821     cmp = cmp->clone();
1822     cmp->set_req(1, andi);
1823     phase->register_new_node(cmp, entry_c);
1824     bol = bol->clone();
1825     bol->set_req(1, cmp);
1826     phase->register_new_node(bol, entry_c);
1827 
1828     Node* old_bol =iff->in(1);
1829     phase->igvn().replace_input_of(iff, 1, bol);
1830   }
1831 }
1832 
1833 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1834   if (!n->is_If() || n->is_CountedLoopEnd()) {
1835     return false;
1836   }
1837   Node* region = n->in(0);
1838 
1839   if (!region->is_Region()) {
1840     return false;
1841   }
1842   Node* dom = phase->idom(region);
1843   if (!dom->is_If()) {
1844     return false;
1845   }
1846 
1847   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1848     return false;
1849   }
1850 
1851   IfNode* dom_if = dom->as_If();
1852   Node* proj_true = dom_if->proj_out(1);
1853   Node* proj_false = dom_if->proj_out(0);
1854 
1855   for (uint i = 1; i < region->req(); i++) {
1856     if (phase->is_dominator(proj_true, region->in(i))) {
1857       continue;
1858     }
1859     if (phase->is_dominator(proj_false, region->in(i))) {
1860       continue;
1861     }
1862     return false;
1863   }
1864 
1865   return true;
1866 }
1867 
1868 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1869   assert(is_heap_stable_test(n), "no other tests");
1870   if (identical_backtoback_ifs(n, phase)) {
1871     Node* n_ctrl = n->in(0);
1872     if (phase->can_split_if(n_ctrl)) {
1873       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1874       if (is_heap_stable_test(n)) {
1875         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1876         assert(is_gc_state_load(gc_state_load), "broken");
1877         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1878         assert(is_gc_state_load(dom_gc_state_load), "broken");
1879         if (gc_state_load != dom_gc_state_load) {
1880           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1881         }
1882       }
1883       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1884       Node* proj_true = dom_if->proj_out(1);
1885       Node* proj_false = dom_if->proj_out(0);
1886       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1887       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1888 
1889       for (uint i = 1; i < n_ctrl->req(); i++) {
1890         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1891           bolphi->init_req(i, con_true);
1892         } else {
1893           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1894           bolphi->init_req(i, con_false);
1895         }
1896       }
1897       phase->register_new_node(bolphi, n_ctrl);
1898       phase->igvn().replace_input_of(n, 1, bolphi);
1899       phase->do_split_if(n);
1900     }
1901   }
1902 }
1903 
1904 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1905   // Find first invariant test that doesn't exit the loop
1906   LoopNode *head = loop->_head->as_Loop();
1907   IfNode* unswitch_iff = NULL;
1908   Node* n = head->in(LoopNode::LoopBackControl);
1909   int loop_has_sfpts = -1;
1910   while (n != head) {
1911     Node* n_dom = phase->idom(n);
1912     if (n->is_Region()) {
1913       if (n_dom->is_If()) {
1914         IfNode* iff = n_dom->as_If();
1915         if (iff->in(1)->is_Bool()) {
1916           BoolNode* bol = iff->in(1)->as_Bool();
1917           if (bol->in(1)->is_Cmp()) {
1918             // If condition is invariant and not a loop exit,
1919             // then found reason to unswitch.
1920             if (is_heap_stable_test(iff) &&
1921                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1922               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1923               if (loop_has_sfpts == -1) {
1924                 for(uint i = 0; i < loop->_body.size(); i++) {
1925                   Node *m = loop->_body[i];
1926                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1927                     loop_has_sfpts = 1;
1928                     break;
1929                   }
1930                 }
1931                 if (loop_has_sfpts == -1) {
1932                   loop_has_sfpts = 0;
1933                 }
1934               }
1935               if (!loop_has_sfpts) {
1936                 unswitch_iff = iff;
1937               }
1938             }
1939           }
1940         }
1941       }
1942     }
1943     n = n_dom;
1944   }
1945   return unswitch_iff;
1946 }
1947 
1948 
1949 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1950   Node_List heap_stable_tests;
1951   Node_List gc_state_loads;
1952   stack.push(phase->C->start(), 0);
1953   do {
1954     Node* n = stack.node();
1955     uint i = stack.index();
1956 
1957     if (i < n->outcnt()) {
1958       Node* u = n->raw_out(i);
1959       stack.set_index(i+1);
1960       if (!visited.test_set(u->_idx)) {
1961         stack.push(u, 0);
1962       }
1963     } else {
1964       stack.pop();
1965       if (ShenandoahCommonGCStateLoads && is_gc_state_load(n)) {
1966         gc_state_loads.push(n);
1967       }
1968       if (n->is_If() && is_heap_stable_test(n)) {
1969         heap_stable_tests.push(n);
1970       }
1971     }
1972   } while (stack.size() > 0);
1973 
1974   bool progress;
1975   do {
1976     progress = false;
1977     for (uint i = 0; i < gc_state_loads.size(); i++) {
1978       Node* n = gc_state_loads.at(i);
1979       if (n->outcnt() != 0) {
1980         progress |= try_common_gc_state_load(n, phase);
1981       }
1982     }
1983   } while (progress);
1984 
1985   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1986     Node* n = heap_stable_tests.at(i);
1987     assert(is_heap_stable_test(n), "only evacuation test");
1988     merge_back_to_back_tests(n, phase);
1989   }
1990 
1991   if (!phase->C->major_progress()) {
1992     VectorSet seen(Thread::current()->resource_area());
1993     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1994       Node* n = heap_stable_tests.at(i);
1995       IdealLoopTree* loop = phase->get_loop(n);
1996       if (loop != phase->ltree_root() &&
1997           loop->_child == NULL &&
1998           !loop->_irreducible) {
1999         LoopNode* head = loop->_head->as_Loop();
2000         if ((!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
2001             !seen.test_set(head->_idx)) {
2002           IfNode* iff = find_unswitching_candidate(loop, phase);
2003           if (iff != NULL) {
2004             Node* bol = iff->in(1);
2005             if (head->is_strip_mined()) {
2006               head->verify_strip_mined(0);
2007             }
2008             move_heap_stable_test_out_of_loop(iff, phase);
2009 
2010             AutoNodeBudget node_budget(phase);
2011 
2012             if (loop->policy_unswitching(phase)) {
2013               if (head->is_strip_mined()) {
2014                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
2015                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
2016               }
2017               phase->do_unswitching(loop, old_new);
2018             } else {
2019               // Not proceeding with unswitching. Move load back in
2020               // the loop.
2021               phase->igvn().replace_input_of(iff, 1, bol);
2022             }
2023           }
2024         }
2025       }
2026     }
2027   }
2028 }
2029 
2030 #ifdef ASSERT
2031 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
2032   const bool trace = false;
2033   ResourceMark rm;
2034   Unique_Node_List nodes;
2035   Unique_Node_List controls;
2036   Unique_Node_List memories;
2037 
2038   nodes.push(root);
2039   for (uint next = 0; next < nodes.size(); next++) {
2040     Node *n  = nodes.at(next);
2041     if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
2042       controls.push(n);
2043       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
2044       for (uint next2 = 0; next2 < controls.size(); next2++) {
2045         Node *m = controls.at(next2);
2046         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2047           Node* u = m->fast_out(i);
2048           if (u->is_CFG() && !u->is_Root() &&
2049               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
2050               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
2051             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
2052             controls.push(u);
2053           }
2054         }
2055       }
2056       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
2057       for (uint next2 = 0; next2 < memories.size(); next2++) {
2058         Node *m = memories.at(next2);
2059         assert(m->bottom_type() == Type::MEMORY, "");
2060         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2061           Node* u = m->fast_out(i);
2062           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
2063             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2064             memories.push(u);
2065           } else if (u->is_LoadStore()) {
2066             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
2067             memories.push(u->find_out_with(Op_SCMemProj));
2068           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
2069             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2070             memories.push(u);
2071           } else if (u->is_Phi()) {
2072             assert(u->bottom_type() == Type::MEMORY, "");
2073             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
2074               assert(controls.member(u->in(0)), "");
2075               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2076               memories.push(u);
2077             }
2078           } else if (u->is_SafePoint() || u->is_MemBar()) {
2079             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2080               Node* uu = u->fast_out(j);
2081               if (uu->bottom_type() == Type::MEMORY) {
2082                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
2083                 memories.push(uu);
2084               }
2085             }
2086           }
2087         }
2088       }
2089       for (uint next2 = 0; next2 < controls.size(); next2++) {
2090         Node *m = controls.at(next2);
2091         if (m->is_Region()) {
2092           bool all_in = true;
2093           for (uint i = 1; i < m->req(); i++) {
2094             if (!controls.member(m->in(i))) {
2095               all_in = false;
2096               break;
2097             }
2098           }
2099           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
2100           bool found_phi = false;
2101           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
2102             Node* u = m->fast_out(j);
2103             if (u->is_Phi() && memories.member(u)) {
2104               found_phi = true;
2105               for (uint i = 1; i < u->req() && found_phi; i++) {
2106                 Node* k = u->in(i);
2107                 if (memories.member(k) != controls.member(m->in(i))) {
2108                   found_phi = false;
2109                 }
2110               }
2111             }
2112           }
2113           assert(found_phi || all_in, "");
2114         }
2115       }
2116       controls.clear();
2117       memories.clear();
2118     }
2119     for( uint i = 0; i < n->len(); ++i ) {
2120       Node *m = n->in(i);
2121       if (m != NULL) {
2122         nodes.push(m);
2123       }
2124     }
2125   }
2126 }
2127 #endif
2128 
2129 ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
2130   ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
2131 }
2132 
2133 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
2134   if (in(1) == NULL || in(1)->is_top()) {
2135     return Type::TOP;
2136   }
2137   const Type* t = in(1)->bottom_type();
2138   if (t == TypePtr::NULL_PTR) {
2139     return t;
2140   }
2141   return t->is_oopptr();
2142 }
2143 
2144 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
2145   if (in(1) == NULL) {
2146     return Type::TOP;
2147   }
2148   const Type* t = phase->type(in(1));
2149   if (t == Type::TOP) {
2150     return Type::TOP;
2151   }
2152   if (t == TypePtr::NULL_PTR) {
2153     return t;
2154   }
2155   return t->is_oopptr();
2156 }
2157 
2158 int ShenandoahEnqueueBarrierNode::needed(Node* n) {
2159   if (n == NULL ||
2160       n->is_Allocate() ||
2161       n->Opcode() == Op_ShenandoahEnqueueBarrier ||
2162       n->bottom_type() == TypePtr::NULL_PTR ||
2163       (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2164     return NotNeeded;
2165   }
2166   if (n->is_Phi() ||
2167       n->is_CMove()) {
2168     return MaybeNeeded;
2169   }
2170   return Needed;
2171 }
2172 
2173 Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
2174   for (;;) {
2175     if (n == NULL) {
2176       return n;
2177     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2178       return n;
2179     } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2180       return n;
2181     } else if (n->is_ConstraintCast() ||
2182                n->Opcode() == Op_DecodeN ||
2183                n->Opcode() == Op_EncodeP) {
2184       n = n->in(1);
2185     } else if (n->is_Proj()) {
2186       n = n->in(0);
2187     } else {
2188       return n;
2189     }
2190   }
2191   ShouldNotReachHere();
2192   return NULL;
2193 }
2194 
2195 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
2196   PhaseIterGVN* igvn = phase->is_IterGVN();
2197 
2198   Node* n = next(in(1));
2199 
2200   int cont = needed(n);
2201 
2202   if (cont == NotNeeded) {
2203     return in(1);
2204   } else if (cont == MaybeNeeded) {
2205     if (igvn == NULL) {
2206       phase->record_for_igvn(this);
2207       return this;
2208     } else {
2209       ResourceMark rm;
2210       Unique_Node_List wq;
2211       uint wq_i = 0;
2212 
2213       for (;;) {
2214         if (n->is_Phi()) {
2215           for (uint i = 1; i < n->req(); i++) {
2216             Node* m = n->in(i);
2217             if (m != NULL) {
2218               wq.push(m);
2219             }
2220           }
2221         } else {
2222           assert(n->is_CMove(), "nothing else here");
2223           Node* m = n->in(CMoveNode::IfFalse);
2224           wq.push(m);
2225           m = n->in(CMoveNode::IfTrue);
2226           wq.push(m);
2227         }
2228         Node* orig_n = NULL;
2229         do {
2230           if (wq_i >= wq.size()) {
2231             return in(1);
2232           }
2233           n = wq.at(wq_i);
2234           wq_i++;
2235           orig_n = n;
2236           n = next(n);
2237           cont = needed(n);
2238           if (cont == Needed) {
2239             return this;
2240           }
2241         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2242       }
2243     }
2244   }
2245 
2246   return this;
2247 }
2248 
2249 #ifdef ASSERT
2250 static bool has_never_branch(Node* root) {
2251   for (uint i = 1; i < root->req(); i++) {
2252     Node* in = root->in(i);
2253     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2254       return true;
2255     }
2256   }
2257   return false;
2258 }
2259 #endif
2260 
2261 void MemoryGraphFixer::collect_memory_nodes() {
2262   Node_Stack stack(0);
2263   VectorSet visited(Thread::current()->resource_area());
2264   Node_List regions;
2265 
2266   // Walk the raw memory graph and create a mapping from CFG node to
2267   // memory node. Exclude phis for now.
2268   stack.push(_phase->C->root(), 1);
2269   do {
2270     Node* n = stack.node();
2271     int opc = n->Opcode();
2272     uint i = stack.index();
2273     if (i < n->req()) {
2274       Node* mem = NULL;
2275       if (opc == Op_Root) {
2276         Node* in = n->in(i);
2277         int in_opc = in->Opcode();
2278         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2279           mem = in->in(TypeFunc::Memory);
2280         } else if (in_opc == Op_Halt) {
2281           if (!in->in(0)->is_Region()) {
2282             Node* proj = in->in(0);
2283             assert(proj->is_Proj(), "");
2284             Node* in = proj->in(0);
2285             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2286             if (in->is_CallStaticJava()) {
2287               mem = in->in(TypeFunc::Memory);
2288             } else if (in->Opcode() == Op_Catch) {
2289               Node* call = in->in(0)->in(0);
2290               assert(call->is_Call(), "");
2291               mem = call->in(TypeFunc::Memory);
2292             } else if (in->Opcode() == Op_NeverBranch) {
2293               ResourceMark rm;
2294               Unique_Node_List wq;
2295               wq.push(in);
2296               wq.push(in->as_Multi()->proj_out(0));
2297               for (uint j = 1; j < wq.size(); j++) {
2298                 Node* c = wq.at(j);
2299                 assert(!c->is_Root(), "shouldn't leave loop");
2300                 if (c->is_SafePoint()) {
2301                   assert(mem == NULL, "only one safepoint");
2302                   mem = c->in(TypeFunc::Memory);
2303                 }
2304                 for (DUIterator_Fast kmax, k = c->fast_outs(kmax); k < kmax; k++) {
2305                   Node* u = c->fast_out(k);
2306                   if (u->is_CFG()) {
2307                     wq.push(u);
2308                   }
2309                 }
2310               }
2311               assert(mem != NULL, "should have found safepoint");
2312             }
2313           }
2314         } else {
2315 #ifdef ASSERT
2316           n->dump();
2317           in->dump();
2318 #endif
2319           ShouldNotReachHere();
2320         }
2321       } else {
2322         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2323         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2324         mem = n->in(i);
2325       }
2326       i++;
2327       stack.set_index(i);
2328       if (mem == NULL) {
2329         continue;
2330       }
2331       for (;;) {
2332         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2333           break;
2334         }
2335         if (mem->is_Phi()) {
2336           stack.push(mem, 2);
2337           mem = mem->in(1);
2338         } else if (mem->is_Proj()) {
2339           stack.push(mem, mem->req());
2340           mem = mem->in(0);
2341         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2342           mem = mem->in(TypeFunc::Memory);
2343         } else if (mem->is_MergeMem()) {
2344           MergeMemNode* mm = mem->as_MergeMem();
2345           mem = mm->memory_at(_alias);
2346         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2347           assert(_alias == Compile::AliasIdxRaw, "");
2348           stack.push(mem, mem->req());
2349           mem = mem->in(MemNode::Memory);
2350         } else {
2351 #ifdef ASSERT
2352           mem->dump();
2353 #endif
2354           ShouldNotReachHere();
2355         }
2356       }
2357     } else {
2358       if (n->is_Phi()) {
2359         // Nothing
2360       } else if (!n->is_Root()) {
2361         Node* c = get_ctrl(n);
2362         _memory_nodes.map(c->_idx, n);
2363       }
2364       stack.pop();
2365     }
2366   } while(stack.is_nonempty());
2367 
2368   // Iterate over CFG nodes in rpo and propagate memory state to
2369   // compute memory state at regions, creating new phis if needed.
2370   Node_List rpo_list;
2371   visited.clear();
2372   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2373   Node* root = rpo_list.pop();
2374   assert(root == _phase->C->root(), "");
2375 
2376   const bool trace = false;
2377 #ifdef ASSERT
2378   if (trace) {
2379     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2380       Node* c = rpo_list.at(i);
2381       if (_memory_nodes[c->_idx] != NULL) {
2382         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2383       }
2384     }
2385   }
2386 #endif
2387   uint last = _phase->C->unique();
2388 
2389 #ifdef ASSERT
2390   uint8_t max_depth = 0;
2391   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2392     IdealLoopTree* lpt = iter.current();
2393     max_depth = MAX2(max_depth, lpt->_nest);
2394   }
2395 #endif
2396 
2397   bool progress = true;
2398   int iteration = 0;
2399   Node_List dead_phis;
2400   while (progress) {
2401     progress = false;
2402     iteration++;
2403     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2404     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2405     IdealLoopTree* last_updated_ilt = NULL;
2406     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2407       Node* c = rpo_list.at(i);
2408 
2409       Node* prev_mem = _memory_nodes[c->_idx];
2410       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2411         Node* prev_region = regions[c->_idx];
2412         Node* unique = NULL;
2413         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2414           Node* m = _memory_nodes[c->in(j)->_idx];
2415           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2416           if (m != NULL) {
2417             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2418               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), "");
2419               // continue
2420             } else if (unique == NULL) {
2421               unique = m;
2422             } else if (m == unique) {
2423               // continue
2424             } else {
2425               unique = NodeSentinel;
2426             }
2427           }
2428         }
2429         assert(unique != NULL, "empty phi???");
2430         if (unique != NodeSentinel) {
2431           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2432             dead_phis.push(prev_region);
2433           }
2434           regions.map(c->_idx, unique);
2435         } else {
2436           Node* phi = NULL;
2437           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2438             phi = prev_region;
2439             for (uint k = 1; k < c->req(); k++) {
2440               Node* m = _memory_nodes[c->in(k)->_idx];
2441               assert(m != NULL, "expect memory state");
2442               phi->set_req(k, m);
2443             }
2444           } else {
2445             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2446               Node* u = c->fast_out(j);
2447               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2448                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2449                 phi = u;
2450                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2451                   Node* m = _memory_nodes[c->in(k)->_idx];
2452                   assert(m != NULL, "expect memory state");
2453                   if (u->in(k) != m) {
2454                     phi = NULL;
2455                   }
2456                 }
2457               }
2458             }
2459             if (phi == NULL) {
2460               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2461               for (uint k = 1; k < c->req(); k++) {
2462                 Node* m = _memory_nodes[c->in(k)->_idx];
2463                 assert(m != NULL, "expect memory state");
2464                 phi->init_req(k, m);
2465               }
2466             }
2467           }
2468           assert(phi != NULL, "");
2469           regions.map(c->_idx, phi);
2470         }
2471         Node* current_region = regions[c->_idx];
2472         if (current_region != prev_region) {
2473           progress = true;
2474           if (prev_region == prev_mem) {
2475             _memory_nodes.map(c->_idx, current_region);
2476           }
2477         }
2478       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2479         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2480         assert(m != NULL, "expect memory state");
2481         if (m != prev_mem) {
2482           _memory_nodes.map(c->_idx, m);
2483           progress = true;
2484         }
2485       }
2486 #ifdef ASSERT
2487       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2488 #endif
2489     }
2490   }
2491 
2492   // Replace existing phi with computed memory state for that region
2493   // if different (could be a new phi or a dominating memory node if
2494   // that phi was found to be useless).
2495   while (dead_phis.size() > 0) {
2496     Node* n = dead_phis.pop();
2497     n->replace_by(_phase->C->top());
2498     n->destruct();
2499   }
2500   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2501     Node* c = rpo_list.at(i);
2502     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2503       Node* n = regions[c->_idx];
2504       if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2505         _phase->register_new_node(n, c);
2506       }
2507     }
2508   }
2509   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2510     Node* c = rpo_list.at(i);
2511     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2512       Node* n = regions[c->_idx];
2513       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2514         Node* u = c->fast_out(i);
2515         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2516             u != n) {
2517           if (u->adr_type() == TypePtr::BOTTOM) {
2518             fix_memory_uses(u, n, n, c);
2519           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2520             _phase->lazy_replace(u, n);
2521             --i; --imax;
2522           }
2523         }
2524       }
2525     }
2526   }
2527 }
2528 
2529 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2530   Node* c = _phase->get_ctrl(n);
2531   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2532     assert(c == n->in(0), "");
2533     CallNode* call = c->as_Call();
2534     CallProjections projs;
2535     call->extract_projections(&projs, true, false);
2536     if (projs.catchall_memproj != NULL) {
2537       if (projs.fallthrough_memproj == n) {
2538         c = projs.fallthrough_catchproj;
2539       } else {
2540         assert(projs.catchall_memproj == n, "");
2541         c = projs.catchall_catchproj;
2542       }
2543     }
2544   }
2545   return c;
2546 }
2547 
2548 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2549   if (_phase->has_ctrl(n))
2550     return get_ctrl(n);
2551   else {
2552     assert (n->is_CFG(), "must be a CFG node");
2553     return n;
2554   }
2555 }
2556 
2557 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2558   return m != NULL && get_ctrl(m) == c;
2559 }
2560 
2561 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2562   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2563   Node* mem = _memory_nodes[ctrl->_idx];
2564   Node* c = ctrl;
2565   while (!mem_is_valid(mem, c) &&
2566          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2567     c = _phase->idom(c);
2568     mem = _memory_nodes[c->_idx];
2569   }
2570   if (n != NULL && mem_is_valid(mem, c)) {
2571     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2572       mem = next_mem(mem, _alias);
2573     }
2574     if (mem->is_MergeMem()) {
2575       mem = mem->as_MergeMem()->memory_at(_alias);
2576     }
2577     if (!mem_is_valid(mem, c)) {
2578       do {
2579         c = _phase->idom(c);
2580         mem = _memory_nodes[c->_idx];
2581       } while (!mem_is_valid(mem, c) &&
2582                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2583     }
2584   }
2585   assert(mem->bottom_type() == Type::MEMORY, "");
2586   return mem;
2587 }
2588 
2589 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2590   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2591     Node* use = region->fast_out(i);
2592     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2593         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2594       return true;
2595     }
2596   }
2597   return false;
2598 }
2599 
2600 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2601   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2602   const bool trace = false;
2603   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2604   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2605   GrowableArray<Node*> phis;
2606   if (mem_for_ctrl != mem) {
2607     Node* old = mem_for_ctrl;
2608     Node* prev = NULL;
2609     while (old != mem) {
2610       prev = old;
2611       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2612         assert(_alias == Compile::AliasIdxRaw, "");
2613         old = old->in(MemNode::Memory);
2614       } else if (old->Opcode() == Op_SCMemProj) {
2615         assert(_alias == Compile::AliasIdxRaw, "");
2616         old = old->in(0);
2617       } else {
2618         ShouldNotReachHere();
2619       }
2620     }
2621     assert(prev != NULL, "");
2622     if (new_ctrl != ctrl) {
2623       _memory_nodes.map(ctrl->_idx, mem);
2624       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2625     }
2626     uint input = (uint)MemNode::Memory;
2627     _phase->igvn().replace_input_of(prev, input, new_mem);
2628   } else {
2629     uses.clear();
2630     _memory_nodes.map(new_ctrl->_idx, new_mem);
2631     uses.push(new_ctrl);
2632     for(uint next = 0; next < uses.size(); next++ ) {
2633       Node *n = uses.at(next);
2634       assert(n->is_CFG(), "");
2635       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2636       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2637         Node* u = n->fast_out(i);
2638         if (!u->is_Root() && u->is_CFG() && u != n) {
2639           Node* m = _memory_nodes[u->_idx];
2640           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2641               !has_mem_phi(u) &&
2642               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2643             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2644             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2645 
2646             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2647               bool push = true;
2648               bool create_phi = true;
2649               if (_phase->is_dominator(new_ctrl, u)) {
2650                 create_phi = false;
2651               } else if (!_phase->C->has_irreducible_loop()) {
2652                 IdealLoopTree* loop = _phase->get_loop(ctrl);
2653                 bool do_check = true;
2654                 IdealLoopTree* l = loop;
2655                 create_phi = false;
2656                 while (l != _phase->ltree_root()) {
2657                   Node* head = l->_head;
2658                   if (head->in(0) == NULL) {
2659                     head = _phase->get_ctrl(head);
2660                   }
2661                   if (_phase->is_dominator(head, u) && _phase->is_dominator(_phase->idom(u), head)) {
2662                     create_phi = true;
2663                     do_check = false;
2664                     break;
2665                   }
2666                   l = l->_parent;
2667                 }
2668 
2669                 if (do_check) {
2670                   assert(!create_phi, "");
2671                   IdealLoopTree* u_loop = _phase->get_loop(u);
2672                   if (u_loop != _phase->ltree_root() && u_loop->is_member(loop)) {
2673                     Node* c = ctrl;
2674                     while (!_phase->is_dominator(c, u_loop->tail())) {
2675                       c = _phase->idom(c);
2676                     }
2677                     if (!_phase->is_dominator(c, u)) {
2678                       do_check = false;
2679                     }
2680                   }
2681                 }
2682 
2683                 if (do_check && _phase->is_dominator(_phase->idom(u), new_ctrl)) {
2684                   create_phi = true;
2685                 }
2686               }
2687               if (create_phi) {
2688                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2689                 _phase->register_new_node(phi, u);
2690                 phis.push(phi);
2691                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2692                 if (!mem_is_valid(m, u)) {
2693                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2694                   _memory_nodes.map(u->_idx, phi);
2695                 } else {
2696                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2697                   for (;;) {
2698                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2699                     Node* next = NULL;
2700                     if (m->is_Proj()) {
2701                       next = m->in(0);
2702                     } else {
2703                       assert(m->is_Mem() || m->is_LoadStore(), "");
2704                       assert(_alias == Compile::AliasIdxRaw, "");
2705                       next = m->in(MemNode::Memory);
2706                     }
2707                     if (_phase->get_ctrl(next) != u) {
2708                       break;
2709                     }
2710                     if (next->is_MergeMem()) {
2711                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2712                       break;
2713                     }
2714                     if (next->is_Phi()) {
2715                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2716                       break;
2717                     }
2718                     m = next;
2719                   }
2720 
2721                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2722                   assert(m->is_Mem() || m->is_LoadStore(), "");
2723                   uint input = (uint)MemNode::Memory;
2724                   _phase->igvn().replace_input_of(m, input, phi);
2725                   push = false;
2726                 }
2727               } else {
2728                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2729               }
2730               if (push) {
2731                 uses.push(u);
2732               }
2733             }
2734           } else if (!mem_is_valid(m, u) &&
2735                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2736             uses.push(u);
2737           }
2738         }
2739       }
2740     }
2741     for (int i = 0; i < phis.length(); i++) {
2742       Node* n = phis.at(i);
2743       Node* r = n->in(0);
2744       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2745       for (uint j = 1; j < n->req(); j++) {
2746         Node* m = find_mem(r->in(j), NULL);
2747         _phase->igvn().replace_input_of(n, j, m);
2748         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2749       }
2750     }
2751   }
2752   uint last = _phase->C->unique();
2753   MergeMemNode* mm = NULL;
2754   int alias = _alias;
2755   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2756   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2757     Node* u = mem->out(i);
2758     if (u->_idx < last) {
2759       if (u->is_Mem()) {
2760         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2761           Node* m = find_mem(_phase->get_ctrl(u), u);
2762           if (m != mem) {
2763             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2764             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2765             --i;
2766           }
2767         }
2768       } else if (u->is_MergeMem()) {
2769         MergeMemNode* u_mm = u->as_MergeMem();
2770         if (u_mm->memory_at(alias) == mem) {
2771           MergeMemNode* newmm = NULL;
2772           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2773             Node* uu = u->fast_out(j);
2774             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2775             if (uu->is_Phi()) {
2776               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2777               Node* region = uu->in(0);
2778               int nb = 0;
2779               for (uint k = 1; k < uu->req(); k++) {
2780                 if (uu->in(k) == u) {
2781                   Node* m = find_mem(region->in(k), NULL);
2782                   if (m != mem) {
2783                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2784                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2785                     if (newmm != u) {
2786                       _phase->igvn().replace_input_of(uu, k, newmm);
2787                       nb++;
2788                       --jmax;
2789                     }
2790                   }
2791                 }
2792               }
2793               if (nb > 0) {
2794                 --j;
2795               }
2796             } else {
2797               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2798               if (m != mem) {
2799                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2800                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2801                 if (newmm != u) {
2802                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2803                   --j, --jmax;
2804                 }
2805               }
2806             }
2807           }
2808         }
2809       } else if (u->is_Phi()) {
2810         assert(u->bottom_type() == Type::MEMORY, "what else?");
2811         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2812           Node* region = u->in(0);
2813           bool replaced = false;
2814           for (uint j = 1; j < u->req(); j++) {
2815             if (u->in(j) == mem) {
2816               Node* m = find_mem(region->in(j), NULL);
2817               Node* nnew = m;
2818               if (m != mem) {
2819                 if (u->adr_type() == TypePtr::BOTTOM) {
2820                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2821                   nnew = mm;
2822                 }
2823                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2824                 _phase->igvn().replace_input_of(u, j, nnew);
2825                 replaced = true;
2826               }
2827             }
2828           }
2829           if (replaced) {
2830             --i;
2831           }
2832         }
2833       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2834                  u->adr_type() == NULL) {
2835         assert(u->adr_type() != NULL ||
2836                u->Opcode() == Op_Rethrow ||
2837                u->Opcode() == Op_Return ||
2838                u->Opcode() == Op_SafePoint ||
2839                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2840                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2841                u->Opcode() == Op_CallLeaf, "");
2842         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2843         if (m != mem) {
2844           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2845           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2846           --i;
2847         }
2848       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2849         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2850         if (m != mem) {
2851           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2852           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2853           --i;
2854         }
2855       } else if (u->adr_type() != TypePtr::BOTTOM &&
2856                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2857         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2858         assert(m != mem, "");
2859         // u is on the wrong slice...
2860         assert(u->is_ClearArray(), "");
2861         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2862         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2863         --i;
2864       }
2865     }
2866   }
2867 #ifdef ASSERT
2868   assert(new_mem->outcnt() > 0, "");
2869   for (int i = 0; i < phis.length(); i++) {
2870     Node* n = phis.at(i);
2871     assert(n->outcnt() > 0, "new phi must have uses now");
2872   }
2873 #endif
2874 }
2875 
2876 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2877   MergeMemNode* mm = MergeMemNode::make(mem);
2878   mm->set_memory_at(_alias, rep_proj);
2879   _phase->register_new_node(mm, rep_ctrl);
2880   return mm;
2881 }
2882 
2883 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2884   MergeMemNode* newmm = NULL;
2885   MergeMemNode* u_mm = u->as_MergeMem();
2886   Node* c = _phase->get_ctrl(u);
2887   if (_phase->is_dominator(c, rep_ctrl)) {
2888     c = rep_ctrl;
2889   } else {
2890     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2891   }
2892   if (u->outcnt() == 1) {
2893     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2894       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2895       --i;
2896     } else {
2897       _phase->igvn().rehash_node_delayed(u);
2898       u_mm->set_memory_at(_alias, rep_proj);
2899     }
2900     newmm = u_mm;
2901     _phase->set_ctrl_and_loop(u, c);
2902   } else {
2903     // can't simply clone u and then change one of its input because
2904     // it adds and then removes an edge which messes with the
2905     // DUIterator
2906     newmm = MergeMemNode::make(u_mm->base_memory());
2907     for (uint j = 0; j < u->req(); j++) {
2908       if (j < newmm->req()) {
2909         if (j == (uint)_alias) {
2910           newmm->set_req(j, rep_proj);
2911         } else if (newmm->in(j) != u->in(j)) {
2912           newmm->set_req(j, u->in(j));
2913         }
2914       } else if (j == (uint)_alias) {
2915         newmm->add_req(rep_proj);
2916       } else {
2917         newmm->add_req(u->in(j));
2918       }
2919     }
2920     if ((uint)_alias >= u->req()) {
2921       newmm->set_memory_at(_alias, rep_proj);
2922     }
2923     _phase->register_new_node(newmm, c);
2924   }
2925   return newmm;
2926 }
2927 
2928 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2929   if (phi->adr_type() == TypePtr::BOTTOM) {
2930     Node* region = phi->in(0);
2931     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2932       Node* uu = region->fast_out(j);
2933       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2934         return false;
2935       }
2936     }
2937     return true;
2938   }
2939   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2940 }
2941 
2942 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2943   uint last = _phase-> C->unique();
2944   MergeMemNode* mm = NULL;
2945   assert(mem->bottom_type() == Type::MEMORY, "");
2946   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2947     Node* u = mem->out(i);
2948     if (u != replacement && u->_idx < last) {
2949       if (u->is_MergeMem()) {
2950         MergeMemNode* u_mm = u->as_MergeMem();
2951         if (u_mm->memory_at(_alias) == mem) {
2952           MergeMemNode* newmm = NULL;
2953           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2954             Node* uu = u->fast_out(j);
2955             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2956             if (uu->is_Phi()) {
2957               if (should_process_phi(uu)) {
2958                 Node* region = uu->in(0);
2959                 int nb = 0;
2960                 for (uint k = 1; k < uu->req(); k++) {
2961                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2962                     if (newmm == NULL) {
2963                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2964                     }
2965                     if (newmm != u) {
2966                       _phase->igvn().replace_input_of(uu, k, newmm);
2967                       nb++;
2968                       --jmax;
2969                     }
2970                   }
2971                 }
2972                 if (nb > 0) {
2973                   --j;
2974                 }
2975               }
2976             } else {
2977               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2978                 if (newmm == NULL) {
2979                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2980                 }
2981                 if (newmm != u) {
2982                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2983                   --j, --jmax;
2984                 }
2985               }
2986             }
2987           }
2988         }
2989       } else if (u->is_Phi()) {
2990         assert(u->bottom_type() == Type::MEMORY, "what else?");
2991         Node* region = u->in(0);
2992         if (should_process_phi(u)) {
2993           bool replaced = false;
2994           for (uint j = 1; j < u->req(); j++) {
2995             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2996               Node* nnew = rep_proj;
2997               if (u->adr_type() == TypePtr::BOTTOM) {
2998                 if (mm == NULL) {
2999                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
3000                 }
3001                 nnew = mm;
3002               }
3003               _phase->igvn().replace_input_of(u, j, nnew);
3004               replaced = true;
3005             }
3006           }
3007           if (replaced) {
3008             --i;
3009           }
3010 
3011         }
3012       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
3013                  u->adr_type() == NULL) {
3014         assert(u->adr_type() != NULL ||
3015                u->Opcode() == Op_Rethrow ||
3016                u->Opcode() == Op_Return ||
3017                u->Opcode() == Op_SafePoint ||
3018                u->Opcode() == Op_StoreIConditional ||
3019                u->Opcode() == Op_StoreLConditional ||
3020                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
3021                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
3022                u->Opcode() == Op_CallLeaf, "%s", u->Name());
3023         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3024           if (mm == NULL) {
3025             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
3026           }
3027           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
3028           --i;
3029         }
3030       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
3031         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3032           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
3033           --i;
3034         }
3035       }
3036     }
3037   }
3038 }
3039 
3040 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, bool native)
3041 : Node(ctrl, obj), _native(native) {
3042   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
3043 }
3044 
3045 bool ShenandoahLoadReferenceBarrierNode::is_native() const {
3046   return _native;
3047 }
3048 
3049 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
3050   return sizeof(*this);
3051 }
3052 
3053 uint ShenandoahLoadReferenceBarrierNode::hash() const {
3054   return Node::hash() + (_native ? 1 : 0);
3055 }
3056 
3057 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
3058   return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
3059          _native == ((const ShenandoahLoadReferenceBarrierNode&)n)._native;
3060 }
3061 
3062 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
3063   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
3064     return Type::TOP;
3065   }
3066   const Type* t = in(ValueIn)->bottom_type();
3067   if (t == TypePtr::NULL_PTR) {
3068     return t;
3069   }
3070   return t->is_oopptr();
3071 }
3072 
3073 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
3074   // Either input is TOP ==> the result is TOP
3075   const Type *t2 = phase->type(in(ValueIn));
3076   if( t2 == Type::TOP ) return Type::TOP;
3077 
3078   if (t2 == TypePtr::NULL_PTR) {
3079     return t2;
3080   }
3081 
3082   const Type* type = t2->is_oopptr();
3083   return type;
3084 }
3085 
3086 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
3087   Node* value = in(ValueIn);
3088   if (!needs_barrier(phase, value)) {
3089     return value;
3090   }
3091   return this;
3092 }
3093 
3094 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
3095   Unique_Node_List visited;
3096   return needs_barrier_impl(phase, n, visited);
3097 }
3098 
3099 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
3100   if (n == NULL) return false;
3101   if (visited.member(n)) {
3102     return false; // Been there.
3103   }
3104   visited.push(n);
3105 
3106   if (n->is_Allocate()) {
3107     // tty->print_cr("optimize barrier on alloc");
3108     return false;
3109   }
3110   if (n->is_Call()) {
3111     // tty->print_cr("optimize barrier on call");
3112     return false;
3113   }
3114 
3115   const Type* type = phase->type(n);
3116   if (type == Type::TOP) {
3117     return false;
3118   }
3119   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
3120     // tty->print_cr("optimize barrier on null");
3121     return false;
3122   }
3123   if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
3124     // tty->print_cr("optimize barrier on constant");
3125     return false;
3126   }
3127 
3128   switch (n->Opcode()) {
3129     case Op_AddP:
3130       return true; // TODO: Can refine?
3131     case Op_LoadP:
3132     case Op_ShenandoahCompareAndExchangeN:
3133     case Op_ShenandoahCompareAndExchangeP:
3134     case Op_CompareAndExchangeN:
3135     case Op_CompareAndExchangeP:
3136     case Op_GetAndSetN:
3137     case Op_GetAndSetP:
3138       return true;
3139     case Op_Phi: {
3140       for (uint i = 1; i < n->req(); i++) {
3141         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3142       }
3143       return false;
3144     }
3145     case Op_CheckCastPP:
3146     case Op_CastPP:
3147       return needs_barrier_impl(phase, n->in(1), visited);
3148     case Op_Proj:
3149       return needs_barrier_impl(phase, n->in(0), visited);
3150     case Op_ShenandoahLoadReferenceBarrier:
3151       // tty->print_cr("optimize barrier on barrier");
3152       return false;
3153     case Op_Parm:
3154       // tty->print_cr("optimize barrier on input arg");
3155       return false;
3156     case Op_DecodeN:
3157     case Op_EncodeP:
3158       return needs_barrier_impl(phase, n->in(1), visited);
3159     case Op_LoadN:
3160       return true;
3161     case Op_CMoveN:
3162     case Op_CMoveP:
3163       return needs_barrier_impl(phase, n->in(2), visited) ||
3164              needs_barrier_impl(phase, n->in(3), visited);
3165     case Op_ShenandoahEnqueueBarrier:
3166       return needs_barrier_impl(phase, n->in(1), visited);
3167     case Op_CreateEx:
3168       return false;
3169     default:
3170       break;
3171   }
3172 #ifdef ASSERT
3173   tty->print("need barrier on?: ");
3174   tty->print_cr("ins:");
3175   n->dump(2);
3176   tty->print_cr("outs:");
3177   n->dump(-2);
3178   ShouldNotReachHere();
3179 #endif
3180   return true;
3181 }
3182 
3183 ShenandoahLoadReferenceBarrierNode::Strength ShenandoahLoadReferenceBarrierNode::get_barrier_strength() {
3184   Unique_Node_List visited;
3185   Node_Stack stack(0);
3186   stack.push(this, 0);
3187 
3188   // Look for strongest strength: go over nodes looking for STRONG ones.
3189   // Stop once we encountered STRONG. Otherwise, walk until we ran out of nodes,
3190   // and then the overall strength is NONE.
3191   Strength strength = NONE;
3192   while (strength != STRONG && stack.size() > 0) {
3193     Node* n = stack.node();
3194     if (visited.member(n)) {
3195       stack.pop();
3196       continue;
3197     }
3198     visited.push(n);
3199     bool visit_users = false;
3200     switch (n->Opcode()) {
3201       case Op_CallStaticJava:
3202       case Op_CallDynamicJava:
3203       case Op_CallLeaf:
3204       case Op_CallLeafNoFP:
3205       case Op_CompareAndSwapL:
3206       case Op_CompareAndSwapI:
3207       case Op_CompareAndSwapB:
3208       case Op_CompareAndSwapS:
3209       case Op_CompareAndSwapN:
3210       case Op_CompareAndSwapP:
3211       case Op_CompareAndExchangeL:
3212       case Op_CompareAndExchangeI:
3213       case Op_CompareAndExchangeB:
3214       case Op_CompareAndExchangeS:
3215       case Op_CompareAndExchangeN:
3216       case Op_CompareAndExchangeP:
3217       case Op_WeakCompareAndSwapL:
3218       case Op_WeakCompareAndSwapI:
3219       case Op_WeakCompareAndSwapB:
3220       case Op_WeakCompareAndSwapS:
3221       case Op_WeakCompareAndSwapN:
3222       case Op_WeakCompareAndSwapP:
3223       case Op_ShenandoahCompareAndSwapN:
3224       case Op_ShenandoahCompareAndSwapP:
3225       case Op_ShenandoahWeakCompareAndSwapN:
3226       case Op_ShenandoahWeakCompareAndSwapP:
3227       case Op_ShenandoahCompareAndExchangeN:
3228       case Op_ShenandoahCompareAndExchangeP:
3229       case Op_GetAndSetL:
3230       case Op_GetAndSetI:
3231       case Op_GetAndSetB:
3232       case Op_GetAndSetS:
3233       case Op_GetAndSetP:
3234       case Op_GetAndSetN:
3235       case Op_GetAndAddL:
3236       case Op_GetAndAddI:
3237       case Op_GetAndAddB:
3238       case Op_GetAndAddS:
3239       case Op_ShenandoahEnqueueBarrier:
3240       case Op_FastLock:
3241       case Op_FastUnlock:
3242       case Op_Rethrow:
3243       case Op_Return:
3244       case Op_StoreB:
3245       case Op_StoreC:
3246       case Op_StoreD:
3247       case Op_StoreF:
3248       case Op_StoreL:
3249       case Op_StoreLConditional:
3250       case Op_StoreI:
3251       case Op_StoreIConditional:
3252       case Op_StoreN:
3253       case Op_StoreP:
3254       case Op_StoreVector:
3255       case Op_StrInflatedCopy:
3256       case Op_StrCompressedCopy:
3257       case Op_EncodeP:
3258       case Op_CastP2X:
3259       case Op_SafePoint:
3260       case Op_EncodeISOArray:
3261       case Op_AryEq:
3262       case Op_StrEquals:
3263       case Op_StrComp:
3264       case Op_StrIndexOf:
3265       case Op_StrIndexOfChar:
3266       case Op_HasNegatives:
3267         // Known to require barriers
3268         strength = STRONG;
3269         break;
3270       case Op_CmpP: {
3271         if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) ||
3272             n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3273           // One of the sides is known null, no need for barrier.
3274         } else {
3275           strength = STRONG;
3276         }
3277         break;
3278       }
3279       case Op_LoadB:
3280       case Op_LoadUB:
3281       case Op_LoadUS:
3282       case Op_LoadD:
3283       case Op_LoadF:
3284       case Op_LoadL:
3285       case Op_LoadI:
3286       case Op_LoadS:
3287       case Op_LoadN:
3288       case Op_LoadP:
3289       case Op_LoadVector: {
3290         const TypePtr* adr_type = n->adr_type();
3291         int alias_idx = Compile::current()->get_alias_index(adr_type);
3292         Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3293         ciField* field = alias_type->field();
3294         bool is_static = field != NULL && field->is_static();
3295         bool is_final = field != NULL && field->is_final();
3296 
3297         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3298           // Loading the constant does not require barriers: it should be handled
3299           // as part of GC roots already.
3300         } else {
3301           strength = STRONG;
3302         }
3303         break;
3304       }
3305       case Op_Conv2B:
3306       case Op_LoadRange:
3307       case Op_LoadKlass:
3308       case Op_LoadNKlass:
3309         // Do not require barriers
3310         break;
3311       case Op_AddP:
3312       case Op_CheckCastPP:
3313       case Op_CastPP:
3314       case Op_CMoveP:
3315       case Op_Phi:
3316       case Op_ShenandoahLoadReferenceBarrier:
3317         // Whether or not these need the barriers depends on their users
3318         visit_users = true;
3319         break;
3320       default: {
3321 #ifdef ASSERT
3322         fatal("Unknown node in get_barrier_strength: %s", NodeClassNames[n->Opcode()]);
3323 #else
3324         // Default to strong: better to have excess barriers, rather than miss some.
3325         strength = STRONG;
3326 #endif
3327       }
3328     }
3329 
3330     stack.pop();
3331     if (visit_users) {
3332       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3333         Node* user = n->fast_out(i);
3334         if (user != NULL) {
3335           stack.push(user, 0);
3336         }
3337       }
3338     }
3339   }
3340   return strength;
3341 }
3342 
3343 CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
3344   Node* val = in(ValueIn);
3345 
3346   const Type* val_t = igvn.type(val);
3347 
3348   if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
3349       val->Opcode() == Op_CastPP &&
3350       val->in(0) != NULL &&
3351       val->in(0)->Opcode() == Op_IfTrue &&
3352       val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
3353       val->in(0)->in(0)->is_If() &&
3354       val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
3355       val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
3356       val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
3357       val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
3358       val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
3359     assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
3360     CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
3361     return unc;
3362   }
3363   return NULL;
3364 }