1 /*
   2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  28 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  30 #include "gc/shenandoah/shenandoahForwarding.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  33 #include "gc/shenandoah/shenandoahRuntime.hpp"
  34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/block.hpp"
  37 #include "opto/callnode.hpp"
  38 #include "opto/castnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/phaseX.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/runtime.hpp"
  43 #include "opto/subnode.hpp"
  44 
  45 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  46   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  47   if ((state->enqueue_barriers_count() +
  48        state->load_reference_barriers_count()) > 0) {
  49     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  50     C->clear_major_progress();
  51     PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
  52     if (C->failing()) return false;
  53     PhaseIdealLoop::verify(igvn);
  54     DEBUG_ONLY(verify_raw_mem(C->root());)
  55     if (attempt_more_loopopts) {
  56       C->set_major_progress();
  57       if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
  58         return false;
  59       }
  60       C->clear_major_progress();
  61       if (C->range_check_cast_count() > 0) {
  62         // No more loop optimizations. Remove all range check dependent CastIINodes.
  63         C->remove_range_check_casts(igvn);
  64         igvn.optimize();
  65       }
  66     }
  67   }
  68   return true;
  69 }
  70 
  71 bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) {
  72   if (!UseShenandoahGC) {
  73     return false;
  74   }
  75   assert(iff->is_If(), "bad input");
  76   if (iff->Opcode() != Op_If) {
  77     return false;
  78   }
  79   Node* bol = iff->in(1);
  80   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  81     return false;
  82   }
  83   Node* cmp = bol->in(1);
  84   if (cmp->Opcode() != Op_CmpI) {
  85     return false;
  86   }
  87   Node* in1 = cmp->in(1);
  88   Node* in2 = cmp->in(2);
  89   if (in2->find_int_con(-1) != 0) {
  90     return false;
  91   }
  92   if (in1->Opcode() != Op_AndI) {
  93     return false;
  94   }
  95   in2 = in1->in(2);
  96   if (in2->find_int_con(-1) != mask) {
  97     return false;
  98   }
  99   in1 = in1->in(1);
 100 
 101   return is_gc_state_load(in1);
 102 }
 103 
 104 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 105   return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 106 }
 107 
 108 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 109   if (!UseShenandoahGC) {
 110     return false;
 111   }
 112   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 113     return false;
 114   }
 115   Node* addp = n->in(MemNode::Address);
 116   if (!addp->is_AddP()) {
 117     return false;
 118   }
 119   Node* base = addp->in(AddPNode::Address);
 120   Node* off = addp->in(AddPNode::Offset);
 121   if (base->Opcode() != Op_ThreadLocal) {
 122     return false;
 123   }
 124   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 125     return false;
 126   }
 127   return true;
 128 }
 129 
 130 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 131   assert(phase->is_dominator(stop, start), "bad inputs");
 132   ResourceMark rm;
 133   Unique_Node_List wq;
 134   wq.push(start);
 135   for (uint next = 0; next < wq.size(); next++) {
 136     Node *m = wq.at(next);
 137     if (m == stop) {
 138       continue;
 139     }
 140     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 141       return true;
 142     }
 143     if (m->is_Region()) {
 144       for (uint i = 1; i < m->req(); i++) {
 145         wq.push(m->in(i));
 146       }
 147     } else {
 148       wq.push(m->in(0));
 149     }
 150   }
 151   return false;
 152 }
 153 
 154 #ifdef ASSERT
 155 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 156   assert(phis.size() == 0, "");
 157 
 158   while (true) {
 159     if (in->bottom_type() == TypePtr::NULL_PTR) {
 160       if (trace) {tty->print_cr("NULL");}
 161     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 162       if (trace) {tty->print_cr("Non oop");}
 163     } else {
 164       if (in->is_ConstraintCast()) {
 165         in = in->in(1);
 166         continue;
 167       } else if (in->is_AddP()) {
 168         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 169         in = in->in(AddPNode::Address);
 170         continue;
 171       } else if (in->is_Con()) {
 172         if (trace) {
 173           tty->print("Found constant");
 174           in->dump();
 175         }
 176       } else if (in->Opcode() == Op_Parm) {
 177         if (trace) {
 178           tty->print("Found argument");
 179         }
 180       } else if (in->Opcode() == Op_CreateEx) {
 181         if (trace) {
 182           tty->print("Found create-exception");
 183         }
 184       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 185         if (trace) {
 186           tty->print("Found raw LoadP (OSR argument?)");
 187         }
 188       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 189         if (t == ShenandoahOopStore) {
 190           uint i = 0;
 191           for (; i < phis.size(); i++) {
 192             Node* n = phis.node_at(i);
 193             if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
 194               break;
 195             }
 196           }
 197           if (i == phis.size()) {
 198             return false;
 199           }
 200         }
 201         barriers_used.push(in);
 202         if (trace) {tty->print("Found barrier"); in->dump();}
 203       } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
 204         if (t != ShenandoahOopStore) {
 205           in = in->in(1);
 206           continue;
 207         }
 208         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 209         phis.push(in, in->req());
 210         in = in->in(1);
 211         continue;
 212       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 213         if (trace) {
 214           tty->print("Found alloc");
 215           in->in(0)->dump();
 216         }
 217       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 218         if (trace) {
 219           tty->print("Found Java call");
 220         }
 221       } else if (in->is_Phi()) {
 222         if (!visited.test_set(in->_idx)) {
 223           if (trace) {tty->print("Pushed phi:"); in->dump();}
 224           phis.push(in, 2);
 225           in = in->in(1);
 226           continue;
 227         }
 228         if (trace) {tty->print("Already seen phi:"); in->dump();}
 229       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 230         if (!visited.test_set(in->_idx)) {
 231           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 232           phis.push(in, CMoveNode::IfTrue);
 233           in = in->in(CMoveNode::IfFalse);
 234           continue;
 235         }
 236         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 237       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 238         in = in->in(1);
 239         continue;
 240       } else {
 241         return false;
 242       }
 243     }
 244     bool cont = false;
 245     while (phis.is_nonempty()) {
 246       uint idx = phis.index();
 247       Node* phi = phis.node();
 248       if (idx >= phi->req()) {
 249         if (trace) {tty->print("Popped phi:"); phi->dump();}
 250         phis.pop();
 251         continue;
 252       }
 253       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 254       in = phi->in(idx);
 255       phis.set_index(idx+1);
 256       cont = true;
 257       break;
 258     }
 259     if (!cont) {
 260       break;
 261     }
 262   }
 263   return true;
 264 }
 265 
 266 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 267   if (n1 != NULL) {
 268     n1->dump(+10);
 269   }
 270   if (n2 != NULL) {
 271     n2->dump(+10);
 272   }
 273   fatal("%s", msg);
 274 }
 275 
 276 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 277   ResourceMark rm;
 278   Unique_Node_List wq;
 279   GrowableArray<Node*> barriers;
 280   Unique_Node_List barriers_used;
 281   Node_Stack phis(0);
 282   VectorSet visited(Thread::current()->resource_area());
 283   const bool trace = false;
 284   const bool verify_no_useless_barrier = false;
 285 
 286   wq.push(root);
 287   for (uint next = 0; next < wq.size(); next++) {
 288     Node *n = wq.at(next);
 289     if (n->is_Load()) {
 290       const bool trace = false;
 291       if (trace) {tty->print("Verifying"); n->dump();}
 292       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 293         if (trace) {tty->print_cr("Load range/klass");}
 294       } else {
 295         const TypePtr* adr_type = n->as_Load()->adr_type();
 296 
 297         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 298           if (trace) {tty->print_cr("Mark load");}
 299         } else if (adr_type->isa_instptr() &&
 300                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 301                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
 302           if (trace) {tty->print_cr("Reference.get()");}
 303         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 304           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 305         }
 306       }
 307     } else if (n->is_Store()) {
 308       const bool trace = false;
 309 
 310       if (trace) {tty->print("Verifying"); n->dump();}
 311       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 312         Node* adr = n->in(MemNode::Address);
 313         bool verify = true;
 314 
 315         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 316           adr = adr->in(AddPNode::Address);
 317           if (adr->is_AddP()) {
 318             assert(adr->in(AddPNode::Base)->is_top(), "");
 319             adr = adr->in(AddPNode::Address);
 320             if (adr->Opcode() == Op_LoadP &&
 321                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 322                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 323                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 324               if (trace) {tty->print_cr("SATB prebarrier");}
 325               verify = false;
 326             }
 327           }
 328         }
 329 
 330         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 331           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 332         }
 333       }
 334       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 335         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 336       }
 337     } else if (n->Opcode() == Op_CmpP) {
 338       const bool trace = false;
 339 
 340       Node* in1 = n->in(1);
 341       Node* in2 = n->in(2);
 342       if (in1->bottom_type()->isa_oopptr()) {
 343         if (trace) {tty->print("Verifying"); n->dump();}
 344 
 345         bool mark_inputs = false;
 346         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 347             (in1->is_Con() || in2->is_Con())) {
 348           if (trace) {tty->print_cr("Comparison against a constant");}
 349           mark_inputs = true;
 350         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 351                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 352           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 353           mark_inputs = true;
 354         } else {
 355           assert(in2->bottom_type()->isa_oopptr(), "");
 356 
 357           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 358               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 359             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 360           }
 361         }
 362         if (verify_no_useless_barrier &&
 363             mark_inputs &&
 364             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 365              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 366           phis.clear();
 367           visited.reset();
 368         }
 369       }
 370     } else if (n->is_LoadStore()) {
 371       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 372           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 373         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 374       }
 375 
 376       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 377         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 378       }
 379     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 380       CallNode* call = n->as_Call();
 381 
 382       static struct {
 383         const char* name;
 384         struct {
 385           int pos;
 386           verify_type t;
 387         } args[6];
 388       } calls[] = {
 389         "aescrypt_encryptBlock",
 390         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 391           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 392         "aescrypt_decryptBlock",
 393         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 394           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 395         "multiplyToLen",
 396         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 397           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 398         "squareToLen",
 399         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 400           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 401         "montgomery_multiply",
 402         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 403           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 404         "montgomery_square",
 405         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 406           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 407         "mulAdd",
 408         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 409           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 410         "vectorizedMismatch",
 411         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 412           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 413         "updateBytesCRC32",
 414         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 415           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 416         "updateBytesAdler32",
 417         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 418           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 419         "updateBytesCRC32C",
 420         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 421           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 422         "counterMode_AESCrypt",
 423         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 424           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 425         "cipherBlockChaining_encryptAESCrypt",
 426         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 427           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 428         "cipherBlockChaining_decryptAESCrypt",
 429         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 430           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 431         "shenandoah_clone_barrier",
 432         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 433           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 434         "ghash_processBlocks",
 435         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 436           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 437         "sha1_implCompress",
 438         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 439           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 440         "sha256_implCompress",
 441         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 442           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 443         "sha512_implCompress",
 444         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 445           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 446         "sha1_implCompressMB",
 447         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 448           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 449         "sha256_implCompressMB",
 450         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 451           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 452         "sha512_implCompressMB",
 453         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 454           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 455         "encodeBlock",
 456         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 457           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 458       };
 459 
 460       if (call->is_call_to_arraycopystub()) {
 461         Node* dest = NULL;
 462         const TypeTuple* args = n->as_Call()->_tf->domain();
 463         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 464           if (args->field_at(i)->isa_ptr()) {
 465             j++;
 466             if (j == 2) {
 467               dest = n->in(i);
 468               break;
 469             }
 470           }
 471         }
 472         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 473             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 474           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 475         }
 476       } else if (strlen(call->_name) > 5 &&
 477                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 478         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 479           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 480         }
 481       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 482         // skip
 483       } else {
 484         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 485         int i = 0;
 486         for (; i < calls_len; i++) {
 487           if (!strcmp(calls[i].name, call->_name)) {
 488             break;
 489           }
 490         }
 491         if (i != calls_len) {
 492           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 493           for (uint j = 0; j < args_len; j++) {
 494             int pos = calls[i].args[j].pos;
 495             if (pos == -1) {
 496               break;
 497             }
 498             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 499               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 500             }
 501           }
 502           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 503             if (call->in(j)->bottom_type()->make_ptr() &&
 504                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 505               uint k = 0;
 506               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 507               if (k == args_len) {
 508                 fatal("arg %d for call %s not covered", j, call->_name);
 509               }
 510             }
 511           }
 512         } else {
 513           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 514             if (call->in(j)->bottom_type()->make_ptr() &&
 515                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 516               fatal("%s not covered", call->_name);
 517             }
 518           }
 519         }
 520       }
 521     } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 522       // skip
 523     } else if (n->is_AddP()
 524                || n->is_Phi()
 525                || n->is_ConstraintCast()
 526                || n->Opcode() == Op_Return
 527                || n->Opcode() == Op_CMoveP
 528                || n->Opcode() == Op_CMoveN
 529                || n->Opcode() == Op_Rethrow
 530                || n->is_MemBar()
 531                || n->Opcode() == Op_Conv2B
 532                || n->Opcode() == Op_SafePoint
 533                || n->is_CallJava()
 534                || n->Opcode() == Op_Unlock
 535                || n->Opcode() == Op_EncodeP
 536                || n->Opcode() == Op_DecodeN) {
 537       // nothing to do
 538     } else {
 539       static struct {
 540         int opcode;
 541         struct {
 542           int pos;
 543           verify_type t;
 544         } inputs[2];
 545       } others[] = {
 546         Op_FastLock,
 547         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 548         Op_Lock,
 549         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 550         Op_ArrayCopy,
 551         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 552         Op_StrCompressedCopy,
 553         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 554         Op_StrInflatedCopy,
 555         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 556         Op_AryEq,
 557         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 558         Op_StrIndexOf,
 559         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 560         Op_StrComp,
 561         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 562         Op_StrEquals,
 563         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 564         Op_EncodeISOArray,
 565         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 566         Op_HasNegatives,
 567         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 568         Op_CastP2X,
 569         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 570         Op_StrIndexOfChar,
 571         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 572       };
 573 
 574       const int others_len = sizeof(others) / sizeof(others[0]);
 575       int i = 0;
 576       for (; i < others_len; i++) {
 577         if (others[i].opcode == n->Opcode()) {
 578           break;
 579         }
 580       }
 581       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 582       if (i != others_len) {
 583         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 584         for (uint j = 0; j < inputs_len; j++) {
 585           int pos = others[i].inputs[j].pos;
 586           if (pos == -1) {
 587             break;
 588           }
 589           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 590             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 591           }
 592         }
 593         for (uint j = 1; j < stop; j++) {
 594           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 595               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 596             uint k = 0;
 597             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 598             if (k == inputs_len) {
 599               fatal("arg %d for node %s not covered", j, n->Name());
 600             }
 601           }
 602         }
 603       } else {
 604         for (uint j = 1; j < stop; j++) {
 605           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 606               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 607             fatal("%s not covered", n->Name());
 608           }
 609         }
 610       }
 611     }
 612 
 613     if (n->is_SafePoint()) {
 614       SafePointNode* sfpt = n->as_SafePoint();
 615       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 616         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 617           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 618             phis.clear();
 619             visited.reset();
 620           }
 621         }
 622       }
 623     }
 624   }
 625 
 626   if (verify_no_useless_barrier) {
 627     for (int i = 0; i < barriers.length(); i++) {
 628       Node* n = barriers.at(i);
 629       if (!barriers_used.member(n)) {
 630         tty->print("XXX useless barrier"); n->dump(-2);
 631         ShouldNotReachHere();
 632       }
 633     }
 634   }
 635 }
 636 #endif
 637 
 638 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 639   // That both nodes have the same control is not sufficient to prove
 640   // domination, verify that there's no path from d to n
 641   ResourceMark rm;
 642   Unique_Node_List wq;
 643   wq.push(d);
 644   for (uint next = 0; next < wq.size(); next++) {
 645     Node *m = wq.at(next);
 646     if (m == n) {
 647       return false;
 648     }
 649     if (m->is_Phi() && m->in(0)->is_Loop()) {
 650       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 651     } else {
 652       if (m->is_Store() || m->is_LoadStore()) {
 653         // Take anti-dependencies into account
 654         Node* mem = m->in(MemNode::Memory);
 655         for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 656           Node* u = mem->fast_out(i);
 657           if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
 658               phase->ctrl_or_self(u) == c) {
 659             wq.push(u);
 660           }
 661         }
 662       }
 663       for (uint i = 0; i < m->req(); i++) {
 664         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 665           wq.push(m->in(i));
 666         }
 667       }
 668     }
 669   }
 670   return true;
 671 }
 672 
 673 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 674   if (d_c != n_c) {
 675     return phase->is_dominator(d_c, n_c);
 676   }
 677   return is_dominator_same_ctrl(d_c, d, n, phase);
 678 }
 679 
 680 Node* next_mem(Node* mem, int alias) {
 681   Node* res = NULL;
 682   if (mem->is_Proj()) {
 683     res = mem->in(0);
 684   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 685     res = mem->in(TypeFunc::Memory);
 686   } else if (mem->is_Phi()) {
 687     res = mem->in(1);
 688   } else if (mem->is_MergeMem()) {
 689     res = mem->as_MergeMem()->memory_at(alias);
 690   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 691     assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 692     res = mem->in(MemNode::Memory);
 693   } else {
 694 #ifdef ASSERT
 695     mem->dump();
 696 #endif
 697     ShouldNotReachHere();
 698   }
 699   return res;
 700 }
 701 
 702 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 703   Node* iffproj = NULL;
 704   while (c != dom) {
 705     Node* next = phase->idom(c);
 706     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 707     if (c->is_Region()) {
 708       ResourceMark rm;
 709       Unique_Node_List wq;
 710       wq.push(c);
 711       for (uint i = 0; i < wq.size(); i++) {
 712         Node *n = wq.at(i);
 713         if (n == next) {
 714           continue;
 715         }
 716         if (n->is_Region()) {
 717           for (uint j = 1; j < n->req(); j++) {
 718             wq.push(n->in(j));
 719           }
 720         } else {
 721           wq.push(n->in(0));
 722         }
 723       }
 724       for (uint i = 0; i < wq.size(); i++) {
 725         Node *n = wq.at(i);
 726         assert(n->is_CFG(), "");
 727         if (n->is_Multi()) {
 728           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 729             Node* u = n->fast_out(j);
 730             if (u->is_CFG()) {
 731               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 732                 return NodeSentinel;
 733               }
 734             }
 735           }
 736         }
 737       }
 738     } else  if (c->is_Proj()) {
 739       if (c->is_IfProj()) {
 740         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 741           // continue;
 742         } else {
 743           if (!allow_one_proj) {
 744             return NodeSentinel;
 745           }
 746           if (iffproj == NULL) {
 747             iffproj = c;
 748           } else {
 749             return NodeSentinel;
 750           }
 751         }
 752       } else if (c->Opcode() == Op_JumpProj) {
 753         return NodeSentinel; // unsupported
 754       } else if (c->Opcode() == Op_CatchProj) {
 755         return NodeSentinel; // unsupported
 756       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 757         return NodeSentinel; // unsupported
 758       } else {
 759         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 760       }
 761     }
 762     c = next;
 763   }
 764   return iffproj;
 765 }
 766 
 767 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 768   ResourceMark rm;
 769   VectorSet wq(Thread::current()->resource_area());
 770   wq.set(mem->_idx);
 771   mem_ctrl = phase->ctrl_or_self(mem);
 772   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 773     mem = next_mem(mem, alias);
 774     if (wq.test_set(mem->_idx)) {
 775       return NULL;
 776     }
 777     mem_ctrl = phase->ctrl_or_self(mem);
 778   }
 779   if (mem->is_MergeMem()) {
 780     mem = mem->as_MergeMem()->memory_at(alias);
 781     mem_ctrl = phase->ctrl_or_self(mem);
 782   }
 783   return mem;
 784 }
 785 
 786 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 787   Node* mem = NULL;
 788   Node* c = ctrl;
 789   do {
 790     if (c->is_Region()) {
 791       Node* phi_bottom = NULL;
 792       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 793         Node* u = c->fast_out(i);
 794         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 795           if (u->adr_type() == TypePtr::BOTTOM) {
 796             mem = u;
 797           }
 798         }
 799       }
 800     } else {
 801       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 802         CallProjections projs;
 803         c->as_Call()->extract_projections(&projs, true, false);
 804         if (projs.fallthrough_memproj != NULL) {
 805           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 806             if (projs.catchall_memproj == NULL) {
 807               mem = projs.fallthrough_memproj;
 808             } else {
 809               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 810                 mem = projs.fallthrough_memproj;
 811               } else {
 812                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 813                 mem = projs.catchall_memproj;
 814               }
 815             }
 816           }
 817         } else {
 818           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 819           if (proj != NULL &&
 820               proj->adr_type() == TypePtr::BOTTOM) {
 821             mem = proj;
 822           }
 823         }
 824       } else {
 825         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 826           Node* u = c->fast_out(i);
 827           if (u->is_Proj() &&
 828               u->bottom_type() == Type::MEMORY &&
 829               u->adr_type() == TypePtr::BOTTOM) {
 830               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 831               assert(mem == NULL, "only one proj");
 832               mem = u;
 833           }
 834         }
 835         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 836       }
 837     }
 838     c = phase->idom(c);
 839   } while (mem == NULL);
 840   return mem;
 841 }
 842 
 843 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 844   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 845     Node* u = n->fast_out(i);
 846     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 847       uses.push(u);
 848     }
 849   }
 850 }
 851 
 852 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 853   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 854   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 855   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 856   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 857   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 858   phase->lazy_replace(outer, new_outer);
 859   phase->lazy_replace(le, new_le);
 860   inner->clear_strip_mined();
 861 }
 862 
 863 void ShenandoahBarrierC2Support::test_heap_state(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
 864                                                  PhaseIdealLoop* phase, int flags) {
 865   IdealLoopTree* loop = phase->get_loop(ctrl);
 866   Node* thread = new ThreadLocalNode();
 867   phase->register_new_node(thread, ctrl);
 868   Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 869   phase->set_ctrl(offset, phase->C->root());
 870   Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset);
 871   phase->register_new_node(gc_state_addr, ctrl);
 872   uint gc_state_idx = Compile::AliasIdxRaw;
 873   const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
 874   debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
 875 
 876   Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered);
 877   phase->register_new_node(gc_state, ctrl);
 878   Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(flags));
 879   phase->register_new_node(heap_stable_and, ctrl);
 880   Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT));
 881   phase->register_new_node(heap_stable_cmp, ctrl);
 882   Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne);
 883   phase->register_new_node(heap_stable_test, ctrl);
 884   IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 885   phase->register_control(heap_stable_iff, loop, ctrl);
 886 
 887   heap_stable_ctrl = new IfFalseNode(heap_stable_iff);
 888   phase->register_control(heap_stable_ctrl, loop, heap_stable_iff);
 889   ctrl = new IfTrueNode(heap_stable_iff);
 890   phase->register_control(ctrl, loop, heap_stable_iff);
 891 
 892   assert(is_heap_state_test(heap_stable_iff, flags), "Should match the shape");
 893 }
 894 
 895 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 896   const Type* val_t = phase->igvn().type(val);
 897   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 898     IdealLoopTree* loop = phase->get_loop(ctrl);
 899     Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT));
 900     phase->register_new_node(null_cmp, ctrl);
 901     Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
 902     phase->register_new_node(null_test, ctrl);
 903     IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 904     phase->register_control(null_iff, loop, ctrl);
 905     ctrl = new IfTrueNode(null_iff);
 906     phase->register_control(ctrl, loop, null_iff);
 907     null_ctrl = new IfFalseNode(null_iff);
 908     phase->register_control(null_ctrl, loop, null_iff);
 909   }
 910 }
 911 
 912 Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
 913   IdealLoopTree *loop = phase->get_loop(c);
 914   Node* iff = unc_ctrl->in(0);
 915   assert(iff->is_If(), "broken");
 916   Node* new_iff = iff->clone();
 917   new_iff->set_req(0, c);
 918   phase->register_control(new_iff, loop, c);
 919   Node* iffalse = new IfFalseNode(new_iff->as_If());
 920   phase->register_control(iffalse, loop, new_iff);
 921   Node* iftrue = new IfTrueNode(new_iff->as_If());
 922   phase->register_control(iftrue, loop, new_iff);
 923   c = iftrue;
 924   const Type *t = phase->igvn().type(val);
 925   assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
 926   Node* uncasted_val = val->in(1);
 927   val = new CastPPNode(uncasted_val, t);
 928   val->init_req(0, c);
 929   phase->register_new_node(val, c);
 930   return val;
 931 }
 932 
 933 void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
 934                                                 Unique_Node_List& uses, PhaseIdealLoop* phase) {
 935   IfNode* iff = unc_ctrl->in(0)->as_If();
 936   Node* proj = iff->proj_out(0);
 937   assert(proj != unc_ctrl, "bad projection");
 938   Node* use = proj->unique_ctrl_out();
 939 
 940   assert(use == unc || use->is_Region(), "what else?");
 941 
 942   uses.clear();
 943   if (use == unc) {
 944     phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
 945     for (uint i = 1; i < unc->req(); i++) {
 946       Node* n = unc->in(i);
 947       if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
 948         uses.push(n);
 949       }
 950     }
 951   } else {
 952     assert(use->is_Region(), "what else?");
 953     uint idx = 1;
 954     for (; use->in(idx) != proj; idx++);
 955     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
 956       Node* u = use->fast_out(i);
 957       if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
 958         uses.push(u->in(idx));
 959       }
 960     }
 961   }
 962   for(uint next = 0; next < uses.size(); next++ ) {
 963     Node *n = uses.at(next);
 964     assert(phase->get_ctrl(n) == proj, "bad control");
 965     phase->set_ctrl_and_loop(n, new_unc_ctrl);
 966     if (n->in(0) == proj) {
 967       phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
 968     }
 969     for (uint i = 0; i < n->req(); i++) {
 970       Node* m = n->in(i);
 971       if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
 972         uses.push(m);
 973       }
 974     }
 975   }
 976 
 977   phase->igvn().rehash_node_delayed(use);
 978   int nb = use->replace_edge(proj, new_unc_ctrl);
 979   assert(nb == 1, "only use expected");
 980 }
 981 
 982 void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 983   IdealLoopTree *loop = phase->get_loop(ctrl);
 984   Node* raw_rbtrue = new CastP2XNode(ctrl, val);
 985   phase->register_new_node(raw_rbtrue, ctrl);
 986   Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 987   phase->register_new_node(cset_offset, ctrl);
 988   Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
 989   phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root());
 990   Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset);
 991   phase->register_new_node(in_cset_fast_test_adr, ctrl);
 992   uint in_cset_fast_test_idx = Compile::AliasIdxRaw;
 993   const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument
 994   debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx));
 995   Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered);
 996   phase->register_new_node(in_cset_fast_test_load, ctrl);
 997   Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT));
 998   phase->register_new_node(in_cset_fast_test_cmp, ctrl);
 999   Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq);
1000   phase->register_new_node(in_cset_fast_test_test, ctrl);
1001   IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
1002   phase->register_control(in_cset_fast_test_iff, loop, ctrl);
1003 
1004   not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff);
1005   phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff);
1006 
1007   ctrl = new IfFalseNode(in_cset_fast_test_iff);
1008   phase->register_control(ctrl, loop, in_cset_fast_test_iff);
1009 }
1010 
1011 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) {
1012   IdealLoopTree*loop = phase->get_loop(ctrl);
1013   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
1014 
1015   // The slow path stub consumes and produces raw memory in addition
1016   // to the existing memory edges
1017   Node* base = find_bottom_mem(ctrl, phase);
1018   MergeMemNode* mm = MergeMemNode::make(base);
1019   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1020   phase->register_new_node(mm, ctrl);
1021 
1022   address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
1023           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) :
1024           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier);
1025 
1026   address calladdr = is_native ? CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native)
1027                                : target;
1028   const char* name = is_native ? "load_reference_barrier_native" : "load_reference_barrier";
1029   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
1030 
1031   call->init_req(TypeFunc::Control, ctrl);
1032   call->init_req(TypeFunc::I_O, phase->C->top());
1033   call->init_req(TypeFunc::Memory, mm);
1034   call->init_req(TypeFunc::FramePtr, phase->C->top());
1035   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1036   call->init_req(TypeFunc::Parms, val);
1037   call->init_req(TypeFunc::Parms+1, load_addr);
1038   phase->register_control(call, loop, ctrl);
1039   ctrl = new ProjNode(call, TypeFunc::Control);
1040   phase->register_control(ctrl, loop, call);
1041   result_mem = new ProjNode(call, TypeFunc::Memory);
1042   phase->register_new_node(result_mem, call);
1043   val = new ProjNode(call, TypeFunc::Parms);
1044   phase->register_new_node(val, call);
1045   val = new CheckCastPPNode(ctrl, val, obj_type);
1046   phase->register_new_node(val, ctrl);
1047 }
1048 
1049 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1050   Node* ctrl = phase->get_ctrl(barrier);
1051   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1052 
1053   // Update the control of all nodes that should be after the
1054   // barrier control flow
1055   uses.clear();
1056   // Every node that is control dependent on the barrier's input
1057   // control will be after the expanded barrier. The raw memory (if
1058   // its memory is control dependent on the barrier's input control)
1059   // must stay above the barrier.
1060   uses_to_ignore.clear();
1061   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1062     uses_to_ignore.push(init_raw_mem);
1063   }
1064   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1065     Node *n = uses_to_ignore.at(next);
1066     for (uint i = 0; i < n->req(); i++) {
1067       Node* in = n->in(i);
1068       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1069         uses_to_ignore.push(in);
1070       }
1071     }
1072   }
1073   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1074     Node* u = ctrl->fast_out(i);
1075     if (u->_idx < last &&
1076         u != barrier &&
1077         !uses_to_ignore.member(u) &&
1078         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1079         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1080       Node* old_c = phase->ctrl_or_self(u);
1081       Node* c = old_c;
1082       if (c != ctrl ||
1083           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1084           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1085         phase->igvn().rehash_node_delayed(u);
1086         int nb = u->replace_edge(ctrl, region);
1087         if (u->is_CFG()) {
1088           if (phase->idom(u) == ctrl) {
1089             phase->set_idom(u, region, phase->dom_depth(region));
1090           }
1091         } else if (phase->get_ctrl(u) == ctrl) {
1092           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1093           uses.push(u);
1094         }
1095         assert(nb == 1, "more than 1 ctrl input?");
1096         --i, imax -= nb;
1097       }
1098     }
1099   }
1100 }
1101 
1102 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1103   Node* region = NULL;
1104   while (c != ctrl) {
1105     if (c->is_Region()) {
1106       region = c;
1107     }
1108     c = phase->idom(c);
1109   }
1110   assert(region != NULL, "");
1111   Node* phi = new PhiNode(region, n->bottom_type());
1112   for (uint j = 1; j < region->req(); j++) {
1113     Node* in = region->in(j);
1114     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1115       phi->init_req(j, n);
1116     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1117       phi->init_req(j, n_clone);
1118     } else {
1119       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1120     }
1121   }
1122   phase->register_new_node(phi, region);
1123   return phi;
1124 }
1125 
1126 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1127   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1128 
1129   Unique_Node_List uses;
1130   for (int i = 0; i < state->enqueue_barriers_count(); i++) {
1131     Node* barrier = state->enqueue_barrier(i);
1132     Node* ctrl = phase->get_ctrl(barrier);
1133     IdealLoopTree* loop = phase->get_loop(ctrl);
1134     if (loop->_head->is_OuterStripMinedLoop()) {
1135       // Expanding a barrier here will break loop strip mining
1136       // verification. Transform the loop so the loop nest doesn't
1137       // appear as strip mined.
1138       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1139       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1140     }
1141   }
1142 
1143   Node_Stack stack(0);
1144   Node_List clones;
1145   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1146     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1147     if (lrb->is_redundant()) {
1148       continue;
1149     }
1150 
1151     Node* ctrl = phase->get_ctrl(lrb);
1152     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1153 
1154     CallStaticJavaNode* unc = NULL;
1155     Node* unc_ctrl = NULL;
1156     Node* uncasted_val = val;
1157 
1158     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1159       Node* u = lrb->fast_out(i);
1160       if (u->Opcode() == Op_CastPP &&
1161           u->in(0) != NULL &&
1162           phase->is_dominator(u->in(0), ctrl)) {
1163         const Type* u_t = phase->igvn().type(u);
1164 
1165         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1166             u->in(0)->Opcode() == Op_IfTrue &&
1167             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1168             u->in(0)->in(0)->is_If() &&
1169             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1170             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1171             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1172             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1173             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1174           IdealLoopTree* loop = phase->get_loop(ctrl);
1175           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1176 
1177           if (!unc_loop->is_member(loop)) {
1178             continue;
1179           }
1180 
1181           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1182           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1183           if (branch == NodeSentinel) {
1184             continue;
1185           }
1186 
1187           phase->igvn().replace_input_of(u, 1, val);
1188           phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
1189           phase->set_ctrl(u, u->in(0));
1190           phase->set_ctrl(lrb, u->in(0));
1191           unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1192           unc_ctrl = u->in(0);
1193           val = u;
1194 
1195           for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
1196             Node* u = val->fast_out(j);
1197             if (u == lrb) continue;
1198             phase->igvn().rehash_node_delayed(u);
1199             int nb = u->replace_edge(val, lrb);
1200             --j; jmax -= nb;
1201           }
1202 
1203           RegionNode* r = new RegionNode(3);
1204           IfNode* iff = unc_ctrl->in(0)->as_If();
1205 
1206           Node* ctrl_use = unc_ctrl->unique_ctrl_out();
1207           Node* unc_ctrl_clone = unc_ctrl->clone();
1208           phase->register_control(unc_ctrl_clone, loop, iff);
1209           Node* c = unc_ctrl_clone;
1210           Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
1211           r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
1212 
1213           phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
1214           phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
1215           phase->lazy_replace(c, unc_ctrl);
1216           c = NULL;;
1217           phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
1218           phase->set_ctrl(val, unc_ctrl_clone);
1219 
1220           IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
1221           fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
1222           Node* iff_proj = iff->proj_out(0);
1223           r->init_req(2, iff_proj);
1224           phase->register_control(r, phase->ltree_root(), iff);
1225 
1226           Node* new_bol = new_iff->in(1)->clone();
1227           Node* new_cmp = new_bol->in(1)->clone();
1228           assert(new_cmp->Opcode() == Op_CmpP, "broken");
1229           assert(new_cmp->in(1) == val->in(1), "broken");
1230           new_bol->set_req(1, new_cmp);
1231           new_cmp->set_req(1, lrb);
1232           phase->register_new_node(new_bol, new_iff->in(0));
1233           phase->register_new_node(new_cmp, new_iff->in(0));
1234           phase->igvn().replace_input_of(new_iff, 1, new_bol);
1235           phase->igvn().replace_input_of(new_cast, 1, lrb);
1236 
1237           for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1238             Node* u = lrb->fast_out(i);
1239             if (u == new_cast || u == new_cmp) {
1240               continue;
1241             }
1242             phase->igvn().rehash_node_delayed(u);
1243             int nb = u->replace_edge(lrb, new_cast);
1244             assert(nb > 0, "no update?");
1245             --i; imax -= nb;
1246           }
1247 
1248           for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1249             Node* u = val->fast_out(i);
1250             if (u == lrb) {
1251               continue;
1252             }
1253             phase->igvn().rehash_node_delayed(u);
1254             int nb = u->replace_edge(val, new_cast);
1255             assert(nb > 0, "no update?");
1256             --i; imax -= nb;
1257           }
1258 
1259           ctrl = unc_ctrl_clone;
1260           phase->set_ctrl_and_loop(lrb, ctrl);
1261           break;
1262         }
1263       }
1264     }
1265     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1266       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1267       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1268         // The rethrow call may have too many projections to be
1269         // properly handled here. Given there's no reason for a
1270         // barrier to depend on the call, move it above the call
1271         stack.push(lrb, 0);
1272         do {
1273           Node* n = stack.node();
1274           uint idx = stack.index();
1275           if (idx < n->req()) {
1276             Node* in = n->in(idx);
1277             stack.set_index(idx+1);
1278             if (in != NULL) {
1279               if (phase->has_ctrl(in)) {
1280                 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1281 #ifdef ASSERT
1282                   for (uint i = 0; i < stack.size(); i++) {
1283                     assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1284                   }
1285 #endif
1286                   stack.push(in, 0);
1287                 }
1288               } else {
1289                 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1290               }
1291             }
1292           } else {
1293             phase->set_ctrl(n, call->in(0));
1294             stack.pop();
1295           }
1296         } while(stack.size() > 0);
1297         continue;
1298       }
1299       CallProjections projs;
1300       call->extract_projections(&projs, false, false);
1301 
1302       Node* lrb_clone = lrb->clone();
1303       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1304       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1305 
1306       stack.push(lrb, 0);
1307       clones.push(lrb_clone);
1308 
1309       do {
1310         assert(stack.size() == clones.size(), "");
1311         Node* n = stack.node();
1312 #ifdef ASSERT
1313         if (n->is_Load()) {
1314           Node* mem = n->in(MemNode::Memory);
1315           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1316             Node* u = mem->fast_out(j);
1317             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1318           }
1319         }
1320 #endif
1321         uint idx = stack.index();
1322         Node* n_clone = clones.at(clones.size()-1);
1323         if (idx < n->outcnt()) {
1324           Node* u = n->raw_out(idx);
1325           Node* c = phase->ctrl_or_self(u);
1326           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1327             stack.set_index(idx+1);
1328             assert(!u->is_CFG(), "");
1329             stack.push(u, 0);
1330             Node* u_clone = u->clone();
1331             int nb = u_clone->replace_edge(n, n_clone);
1332             assert(nb > 0, "should have replaced some uses");
1333             phase->register_new_node(u_clone, projs.catchall_catchproj);
1334             clones.push(u_clone);
1335             phase->set_ctrl(u, projs.fallthrough_catchproj);
1336           } else {
1337             bool replaced = false;
1338             if (u->is_Phi()) {
1339               for (uint k = 1; k < u->req(); k++) {
1340                 if (u->in(k) == n) {
1341                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1342                     phase->igvn().replace_input_of(u, k, n_clone);
1343                     replaced = true;
1344                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1345                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1346                     replaced = true;
1347                   }
1348                 }
1349               }
1350             } else {
1351               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1352                 phase->igvn().rehash_node_delayed(u);
1353                 int nb = u->replace_edge(n, n_clone);
1354                 assert(nb > 0, "should have replaced some uses");
1355                 replaced = true;
1356               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1357                 phase->igvn().rehash_node_delayed(u);
1358                 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1359                 assert(nb > 0, "should have replaced some uses");
1360                 replaced = true;
1361               }
1362             }
1363             if (!replaced) {
1364               stack.set_index(idx+1);
1365             }
1366           }
1367         } else {
1368           stack.pop();
1369           clones.pop();
1370         }
1371       } while (stack.size() > 0);
1372       assert(stack.size() == 0 && clones.size() == 0, "");
1373     }
1374   }
1375 
1376   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1377     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1378     if (lrb->is_redundant()) {
1379       continue;
1380     }
1381     Node* ctrl = phase->get_ctrl(lrb);
1382     IdealLoopTree* loop = phase->get_loop(ctrl);
1383     if (loop->_head->is_OuterStripMinedLoop()) {
1384       // Expanding a barrier here will break loop strip mining
1385       // verification. Transform the loop so the loop nest doesn't
1386       // appear as strip mined.
1387       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1388       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1389     }
1390   }
1391 
1392   // Expand load-reference-barriers
1393   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1394   Unique_Node_List uses_to_ignore;
1395   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1396     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1397     if (lrb->is_redundant()) {
1398       phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1399       continue;
1400     }
1401     uint last = phase->C->unique();
1402     Node* ctrl = phase->get_ctrl(lrb);
1403     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1404 
1405 
1406     Node* orig_ctrl = ctrl;
1407 
1408     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1409     Node* init_raw_mem = raw_mem;
1410     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1411 
1412     IdealLoopTree *loop = phase->get_loop(ctrl);
1413     CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
1414     Node* unc_ctrl = NULL;
1415     if (unc != NULL) {
1416       if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
1417         unc = NULL;
1418       } else {
1419         unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
1420       }
1421     }
1422 
1423     Node* uncasted_val = val;
1424     if (unc != NULL) {
1425       uncasted_val = val->in(1);
1426     }
1427 
1428     Node* heap_stable_ctrl = NULL;
1429     Node* null_ctrl = NULL;
1430 
1431     assert(val->bottom_type()->make_oopptr(), "need oop");
1432     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1433 
1434     enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT };
1435     Node* region = new RegionNode(PATH_LIMIT);
1436     Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
1437     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1438 
1439     // Stable path.
1440     test_heap_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::HAS_FORWARDED);
1441     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1442 
1443     // Heap stable case
1444     region->init_req(_heap_stable, heap_stable_ctrl);
1445     val_phi->init_req(_heap_stable, uncasted_val);
1446     raw_mem_phi->init_req(_heap_stable, raw_mem);
1447 
1448     Node* reg2_ctrl = NULL;
1449     // Null case
1450     test_null(ctrl, val, null_ctrl, phase);
1451     if (null_ctrl != NULL) {
1452       reg2_ctrl = null_ctrl->in(0);
1453       region->init_req(_null_path, null_ctrl);
1454       val_phi->init_req(_null_path, uncasted_val);
1455       raw_mem_phi->init_req(_null_path, raw_mem);
1456     } else {
1457       region->del_req(_null_path);
1458       val_phi->del_req(_null_path);
1459       raw_mem_phi->del_req(_null_path);
1460     }
1461 
1462     // Test for in-cset.
1463     // Wires !in_cset(obj) to slot 2 of region and phis
1464     Node* not_cset_ctrl = NULL;
1465     in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
1466     if (not_cset_ctrl != NULL) {
1467       if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
1468       region->init_req(_not_cset, not_cset_ctrl);
1469       val_phi->init_req(_not_cset, uncasted_val);
1470       raw_mem_phi->init_req(_not_cset, raw_mem);
1471     }
1472 
1473     // Resolve object when orig-value is in cset.
1474     // Make the unconditional resolve for fwdptr.
1475     Node* new_val = uncasted_val;
1476     if (unc_ctrl != NULL) {
1477       // Clone the null check in this branch to allow implicit null check
1478       new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
1479       fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
1480 
1481       IfNode* iff = unc_ctrl->in(0)->as_If();
1482       phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
1483     }
1484 
1485     // Call lrb-stub and wire up that path in slots 4
1486     Node* result_mem = NULL;
1487 
1488     Node* fwd = new_val;
1489     Node* addr;
1490     if (ShenandoahSelfFixing) {
1491       VectorSet visited(Thread::current()->resource_area());
1492       addr = get_load_addr(phase, visited, lrb);
1493     } else {
1494       addr = phase->igvn().zerocon(T_OBJECT);
1495     }
1496     if (addr->Opcode() == Op_AddP) {
1497       Node* orig_base = addr->in(AddPNode::Base);
1498       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), true);
1499       phase->register_new_node(base, ctrl);
1500       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1501         // Field access
1502         addr = addr->clone();
1503         addr->set_req(AddPNode::Base, base);
1504         addr->set_req(AddPNode::Address, base);
1505         phase->register_new_node(addr, ctrl);
1506       } else {
1507         Node* addr2 = addr->in(AddPNode::Address);
1508         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1509               addr2->in(AddPNode::Base) == orig_base) {
1510           addr2 = addr2->clone();
1511           addr2->set_req(AddPNode::Base, base);
1512           addr2->set_req(AddPNode::Address, base);
1513           phase->register_new_node(addr2, ctrl);
1514           addr = addr->clone();
1515           addr->set_req(AddPNode::Base, base);
1516           addr->set_req(AddPNode::Address, addr2);
1517           phase->register_new_node(addr, ctrl);
1518         }
1519       }
1520     }
1521     call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, lrb->is_native(), phase);
1522     region->init_req(_evac_path, ctrl);
1523     val_phi->init_req(_evac_path, fwd);
1524     raw_mem_phi->init_req(_evac_path, result_mem);
1525 
1526     phase->register_control(region, loop, heap_stable_iff);
1527     Node* out_val = val_phi;
1528     phase->register_new_node(val_phi, region);
1529     phase->register_new_node(raw_mem_phi, region);
1530 
1531     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1532 
1533     ctrl = orig_ctrl;
1534 
1535     if (unc != NULL) {
1536       for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1537         Node* u = val->fast_out(i);
1538         Node* c = phase->ctrl_or_self(u);
1539         if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
1540           phase->igvn().rehash_node_delayed(u);
1541           int nb = u->replace_edge(val, out_val);
1542           --i, imax -= nb;
1543         }
1544       }
1545       if (val->outcnt() == 0) {
1546         phase->igvn()._worklist.push(val);
1547       }
1548     }
1549     phase->igvn().replace_node(lrb, out_val);
1550 
1551     follow_barrier_uses(out_val, ctrl, uses, phase);
1552 
1553     for(uint next = 0; next < uses.size(); next++ ) {
1554       Node *n = uses.at(next);
1555       assert(phase->get_ctrl(n) == ctrl, "bad control");
1556       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1557       phase->set_ctrl(n, region);
1558       follow_barrier_uses(n, ctrl, uses, phase);
1559     }
1560 
1561     // The slow path call produces memory: hook the raw memory phi
1562     // from the expanded load reference barrier with the rest of the graph
1563     // which may require adding memory phis at every post dominated
1564     // region and at enclosing loop heads. Use the memory state
1565     // collected in memory_nodes to fix the memory graph. Update that
1566     // memory state as we go.
1567     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1568   }
1569   // Done expanding load-reference-barriers.
1570   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1571 
1572   for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
1573     Node* barrier = state->enqueue_barrier(i);
1574     Node* pre_val = barrier->in(1);
1575 
1576     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1577       ShouldNotReachHere();
1578       continue;
1579     }
1580 
1581     Node* ctrl = phase->get_ctrl(barrier);
1582 
1583     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1584       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1585       ctrl = ctrl->in(0)->in(0);
1586       phase->set_ctrl(barrier, ctrl);
1587     } else if (ctrl->is_CallRuntime()) {
1588       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1589       ctrl = ctrl->in(0);
1590       phase->set_ctrl(barrier, ctrl);
1591     }
1592 
1593     Node* init_ctrl = ctrl;
1594     IdealLoopTree* loop = phase->get_loop(ctrl);
1595     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1596     Node* init_raw_mem = raw_mem;
1597     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1598     Node* heap_stable_ctrl = NULL;
1599     Node* null_ctrl = NULL;
1600     uint last = phase->C->unique();
1601 
1602     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1603     Node* region = new RegionNode(PATH_LIMIT);
1604     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1605 
1606     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1607     Node* region2 = new RegionNode(PATH_LIMIT2);
1608     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1609 
1610     // Stable path.
1611     test_heap_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING);
1612     region->init_req(_heap_stable, heap_stable_ctrl);
1613     phi->init_req(_heap_stable, raw_mem);
1614 
1615     // Null path
1616     Node* reg2_ctrl = NULL;
1617     test_null(ctrl, pre_val, null_ctrl, phase);
1618     if (null_ctrl != NULL) {
1619       reg2_ctrl = null_ctrl->in(0);
1620       region2->init_req(_null_path, null_ctrl);
1621       phi2->init_req(_null_path, raw_mem);
1622     } else {
1623       region2->del_req(_null_path);
1624       phi2->del_req(_null_path);
1625     }
1626 
1627     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1628     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1629     Node* thread = new ThreadLocalNode();
1630     phase->register_new_node(thread, ctrl);
1631     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1632     phase->register_new_node(buffer_adr, ctrl);
1633     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1634     phase->register_new_node(index_adr, ctrl);
1635 
1636     BasicType index_bt = TypeX_X->basic_type();
1637     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
1638     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1639     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1640     phase->register_new_node(index, ctrl);
1641     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1642     phase->register_new_node(index_cmp, ctrl);
1643     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1644     phase->register_new_node(index_test, ctrl);
1645     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1646     if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1647     phase->register_control(queue_full_iff, loop, ctrl);
1648     Node* not_full = new IfTrueNode(queue_full_iff);
1649     phase->register_control(not_full, loop, queue_full_iff);
1650     Node* full = new IfFalseNode(queue_full_iff);
1651     phase->register_control(full, loop, queue_full_iff);
1652 
1653     ctrl = not_full;
1654 
1655     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1656     phase->register_new_node(next_index, ctrl);
1657 
1658     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1659     phase->register_new_node(buffer, ctrl);
1660     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1661     phase->register_new_node(log_addr, ctrl);
1662     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1663     phase->register_new_node(log_store, ctrl);
1664     // update the index
1665     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1666     phase->register_new_node(index_update, ctrl);
1667 
1668     // Fast-path case
1669     region2->init_req(_fast_path, ctrl);
1670     phi2->init_req(_fast_path, index_update);
1671 
1672     ctrl = full;
1673 
1674     Node* base = find_bottom_mem(ctrl, phase);
1675 
1676     MergeMemNode* mm = MergeMemNode::make(base);
1677     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1678     phase->register_new_node(mm, ctrl);
1679 
1680     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1681     call->init_req(TypeFunc::Control, ctrl);
1682     call->init_req(TypeFunc::I_O, phase->C->top());
1683     call->init_req(TypeFunc::Memory, mm);
1684     call->init_req(TypeFunc::FramePtr, phase->C->top());
1685     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1686     call->init_req(TypeFunc::Parms, pre_val);
1687     call->init_req(TypeFunc::Parms+1, thread);
1688     phase->register_control(call, loop, ctrl);
1689 
1690     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1691     phase->register_control(ctrl_proj, loop, call);
1692     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1693     phase->register_new_node(mem_proj, call);
1694 
1695     // Slow-path case
1696     region2->init_req(_slow_path, ctrl_proj);
1697     phi2->init_req(_slow_path, mem_proj);
1698 
1699     phase->register_control(region2, loop, reg2_ctrl);
1700     phase->register_new_node(phi2, region2);
1701 
1702     region->init_req(_heap_unstable, region2);
1703     phi->init_req(_heap_unstable, phi2);
1704 
1705     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1706     phase->register_new_node(phi, region);
1707 
1708     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1709     for(uint next = 0; next < uses.size(); next++ ) {
1710       Node *n = uses.at(next);
1711       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1712       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1713       phase->set_ctrl(n, region);
1714       follow_barrier_uses(n, init_ctrl, uses, phase);
1715     }
1716     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1717 
1718     phase->igvn().replace_node(barrier, pre_val);
1719   }
1720   assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1721 
1722 }
1723 
1724 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1725   if (visited.test_set(in->_idx)) {
1726     return NULL;
1727   }
1728   switch (in->Opcode()) {
1729     case Op_Proj:
1730       return get_load_addr(phase, visited, in->in(0));
1731     case Op_CastPP:
1732     case Op_CheckCastPP:
1733     case Op_DecodeN:
1734     case Op_EncodeP:
1735       return get_load_addr(phase, visited, in->in(1));
1736     case Op_LoadN:
1737     case Op_LoadP:
1738       return in->in(MemNode::Address);
1739     case Op_CompareAndExchangeN:
1740     case Op_CompareAndExchangeP:
1741     case Op_GetAndSetN:
1742     case Op_GetAndSetP:
1743     case Op_ShenandoahCompareAndExchangeP:
1744     case Op_ShenandoahCompareAndExchangeN:
1745       // Those instructions would just have stored a different
1746       // value into the field. No use to attempt to fix it at this point.
1747       return phase->igvn().zerocon(T_OBJECT);
1748     case Op_CMoveP:
1749     case Op_CMoveN: {
1750       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1751       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1752       // Handle unambiguous cases: single address reported on both branches.
1753       if (t != NULL && f == NULL) return t;
1754       if (t == NULL && f != NULL) return f;
1755       if (t != NULL && t == f)    return t;
1756       // Ambiguity.
1757       return phase->igvn().zerocon(T_OBJECT);
1758     }
1759     case Op_Phi: {
1760       Node* addr = NULL;
1761       for (uint i = 1; i < in->req(); i++) {
1762         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1763         if (addr == NULL) {
1764           addr = addr1;
1765         }
1766         if (addr != addr1) {
1767           return phase->igvn().zerocon(T_OBJECT);
1768         }
1769       }
1770       return addr;
1771     }
1772     case Op_ShenandoahLoadReferenceBarrier:
1773       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1774     case Op_ShenandoahEnqueueBarrier:
1775       return get_load_addr(phase, visited, in->in(1));
1776     case Op_CallDynamicJava:
1777     case Op_CallLeaf:
1778     case Op_CallStaticJava:
1779     case Op_ConN:
1780     case Op_ConP:
1781     case Op_Parm:
1782     case Op_CreateEx:
1783       return phase->igvn().zerocon(T_OBJECT);
1784     default:
1785 #ifdef ASSERT
1786       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1787 #endif
1788       return phase->igvn().zerocon(T_OBJECT);
1789   }
1790 
1791 }
1792 
1793 void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1794   IdealLoopTree *loop = phase->get_loop(iff);
1795   Node* loop_head = loop->_head;
1796   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1797 
1798   Node* bol = iff->in(1);
1799   Node* cmp = bol->in(1);
1800   Node* andi = cmp->in(1);
1801   Node* load = andi->in(1);
1802 
1803   assert(is_gc_state_load(load), "broken");
1804   if (!phase->is_dominator(load->in(0), entry_c)) {
1805     Node* mem_ctrl = NULL;
1806     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1807     load = load->clone();
1808     load->set_req(MemNode::Memory, mem);
1809     load->set_req(0, entry_c);
1810     phase->register_new_node(load, entry_c);
1811     andi = andi->clone();
1812     andi->set_req(1, load);
1813     phase->register_new_node(andi, entry_c);
1814     cmp = cmp->clone();
1815     cmp->set_req(1, andi);
1816     phase->register_new_node(cmp, entry_c);
1817     bol = bol->clone();
1818     bol->set_req(1, cmp);
1819     phase->register_new_node(bol, entry_c);
1820 
1821     Node* old_bol =iff->in(1);
1822     phase->igvn().replace_input_of(iff, 1, bol);
1823   }
1824 }
1825 
1826 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1827   if (!n->is_If() || n->is_CountedLoopEnd()) {
1828     return false;
1829   }
1830   Node* region = n->in(0);
1831 
1832   if (!region->is_Region()) {
1833     return false;
1834   }
1835   Node* dom = phase->idom(region);
1836   if (!dom->is_If()) {
1837     return false;
1838   }
1839 
1840   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1841     return false;
1842   }
1843 
1844   IfNode* dom_if = dom->as_If();
1845   Node* proj_true = dom_if->proj_out(1);
1846   Node* proj_false = dom_if->proj_out(0);
1847 
1848   for (uint i = 1; i < region->req(); i++) {
1849     if (phase->is_dominator(proj_true, region->in(i))) {
1850       continue;
1851     }
1852     if (phase->is_dominator(proj_false, region->in(i))) {
1853       continue;
1854     }
1855     return false;
1856   }
1857 
1858   return true;
1859 }
1860 
1861 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1862   assert(is_heap_stable_test(n), "no other tests");
1863   if (identical_backtoback_ifs(n, phase)) {
1864     Node* n_ctrl = n->in(0);
1865     if (phase->can_split_if(n_ctrl)) {
1866       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1867       if (is_heap_stable_test(n)) {
1868         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1869         assert(is_gc_state_load(gc_state_load), "broken");
1870         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1871         assert(is_gc_state_load(dom_gc_state_load), "broken");
1872         if (gc_state_load != dom_gc_state_load) {
1873           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1874         }
1875       }
1876       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1877       Node* proj_true = dom_if->proj_out(1);
1878       Node* proj_false = dom_if->proj_out(0);
1879       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1880       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1881 
1882       for (uint i = 1; i < n_ctrl->req(); i++) {
1883         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1884           bolphi->init_req(i, con_true);
1885         } else {
1886           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1887           bolphi->init_req(i, con_false);
1888         }
1889       }
1890       phase->register_new_node(bolphi, n_ctrl);
1891       phase->igvn().replace_input_of(n, 1, bolphi);
1892       phase->do_split_if(n);
1893     }
1894   }
1895 }
1896 
1897 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1898   // Find first invariant test that doesn't exit the loop
1899   LoopNode *head = loop->_head->as_Loop();
1900   IfNode* unswitch_iff = NULL;
1901   Node* n = head->in(LoopNode::LoopBackControl);
1902   int loop_has_sfpts = -1;
1903   while (n != head) {
1904     Node* n_dom = phase->idom(n);
1905     if (n->is_Region()) {
1906       if (n_dom->is_If()) {
1907         IfNode* iff = n_dom->as_If();
1908         if (iff->in(1)->is_Bool()) {
1909           BoolNode* bol = iff->in(1)->as_Bool();
1910           if (bol->in(1)->is_Cmp()) {
1911             // If condition is invariant and not a loop exit,
1912             // then found reason to unswitch.
1913             if (is_heap_stable_test(iff) &&
1914                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1915               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1916               if (loop_has_sfpts == -1) {
1917                 for(uint i = 0; i < loop->_body.size(); i++) {
1918                   Node *m = loop->_body[i];
1919                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1920                     loop_has_sfpts = 1;
1921                     break;
1922                   }
1923                 }
1924                 if (loop_has_sfpts == -1) {
1925                   loop_has_sfpts = 0;
1926                 }
1927               }
1928               if (!loop_has_sfpts) {
1929                 unswitch_iff = iff;
1930               }
1931             }
1932           }
1933         }
1934       }
1935     }
1936     n = n_dom;
1937   }
1938   return unswitch_iff;
1939 }
1940 
1941 
1942 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1943   Node_List heap_stable_tests;
1944   stack.push(phase->C->start(), 0);
1945   do {
1946     Node* n = stack.node();
1947     uint i = stack.index();
1948 
1949     if (i < n->outcnt()) {
1950       Node* u = n->raw_out(i);
1951       stack.set_index(i+1);
1952       if (!visited.test_set(u->_idx)) {
1953         stack.push(u, 0);
1954       }
1955     } else {
1956       stack.pop();
1957       if (n->is_If() && is_heap_stable_test(n)) {
1958         heap_stable_tests.push(n);
1959       }
1960     }
1961   } while (stack.size() > 0);
1962 
1963   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1964     Node* n = heap_stable_tests.at(i);
1965     assert(is_heap_stable_test(n), "only evacuation test");
1966     merge_back_to_back_tests(n, phase);
1967   }
1968 
1969   if (!phase->C->major_progress()) {
1970     VectorSet seen(Thread::current()->resource_area());
1971     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1972       Node* n = heap_stable_tests.at(i);
1973       IdealLoopTree* loop = phase->get_loop(n);
1974       if (loop != phase->ltree_root() &&
1975           loop->_child == NULL &&
1976           !loop->_irreducible) {
1977         Node* head = loop->_head;
1978         if (head->is_Loop() &&
1979             (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1980             !seen.test_set(head->_idx)) {
1981           IfNode* iff = find_unswitching_candidate(loop, phase);
1982           if (iff != NULL) {
1983             Node* bol = iff->in(1);
1984             if (head->as_Loop()->is_strip_mined()) {
1985               head->as_Loop()->verify_strip_mined(0);
1986             }
1987             move_heap_stable_test_out_of_loop(iff, phase);
1988 
1989             AutoNodeBudget node_budget(phase);
1990 
1991             if (loop->policy_unswitching(phase)) {
1992               if (head->as_Loop()->is_strip_mined()) {
1993                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1994                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1995               }
1996               phase->do_unswitching(loop, old_new);
1997             } else {
1998               // Not proceeding with unswitching. Move load back in
1999               // the loop.
2000               phase->igvn().replace_input_of(iff, 1, bol);
2001             }
2002           }
2003         }
2004       }
2005     }
2006   }
2007 }
2008 
2009 #ifdef ASSERT
2010 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
2011   const bool trace = false;
2012   ResourceMark rm;
2013   Unique_Node_List nodes;
2014   Unique_Node_List controls;
2015   Unique_Node_List memories;
2016 
2017   nodes.push(root);
2018   for (uint next = 0; next < nodes.size(); next++) {
2019     Node *n  = nodes.at(next);
2020     if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
2021       controls.push(n);
2022       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
2023       for (uint next2 = 0; next2 < controls.size(); next2++) {
2024         Node *m = controls.at(next2);
2025         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2026           Node* u = m->fast_out(i);
2027           if (u->is_CFG() && !u->is_Root() &&
2028               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
2029               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
2030             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
2031             controls.push(u);
2032           }
2033         }
2034       }
2035       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
2036       for (uint next2 = 0; next2 < memories.size(); next2++) {
2037         Node *m = memories.at(next2);
2038         assert(m->bottom_type() == Type::MEMORY, "");
2039         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2040           Node* u = m->fast_out(i);
2041           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
2042             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2043             memories.push(u);
2044           } else if (u->is_LoadStore()) {
2045             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
2046             memories.push(u->find_out_with(Op_SCMemProj));
2047           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
2048             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2049             memories.push(u);
2050           } else if (u->is_Phi()) {
2051             assert(u->bottom_type() == Type::MEMORY, "");
2052             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
2053               assert(controls.member(u->in(0)), "");
2054               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2055               memories.push(u);
2056             }
2057           } else if (u->is_SafePoint() || u->is_MemBar()) {
2058             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2059               Node* uu = u->fast_out(j);
2060               if (uu->bottom_type() == Type::MEMORY) {
2061                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
2062                 memories.push(uu);
2063               }
2064             }
2065           }
2066         }
2067       }
2068       for (uint next2 = 0; next2 < controls.size(); next2++) {
2069         Node *m = controls.at(next2);
2070         if (m->is_Region()) {
2071           bool all_in = true;
2072           for (uint i = 1; i < m->req(); i++) {
2073             if (!controls.member(m->in(i))) {
2074               all_in = false;
2075               break;
2076             }
2077           }
2078           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
2079           bool found_phi = false;
2080           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
2081             Node* u = m->fast_out(j);
2082             if (u->is_Phi() && memories.member(u)) {
2083               found_phi = true;
2084               for (uint i = 1; i < u->req() && found_phi; i++) {
2085                 Node* k = u->in(i);
2086                 if (memories.member(k) != controls.member(m->in(i))) {
2087                   found_phi = false;
2088                 }
2089               }
2090             }
2091           }
2092           assert(found_phi || all_in, "");
2093         }
2094       }
2095       controls.clear();
2096       memories.clear();
2097     }
2098     for( uint i = 0; i < n->len(); ++i ) {
2099       Node *m = n->in(i);
2100       if (m != NULL) {
2101         nodes.push(m);
2102       }
2103     }
2104   }
2105 }
2106 #endif
2107 
2108 ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
2109   ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
2110 }
2111 
2112 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
2113   if (in(1) == NULL || in(1)->is_top()) {
2114     return Type::TOP;
2115   }
2116   const Type* t = in(1)->bottom_type();
2117   if (t == TypePtr::NULL_PTR) {
2118     return t;
2119   }
2120   return t->is_oopptr();
2121 }
2122 
2123 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
2124   if (in(1) == NULL) {
2125     return Type::TOP;
2126   }
2127   const Type* t = phase->type(in(1));
2128   if (t == Type::TOP) {
2129     return Type::TOP;
2130   }
2131   if (t == TypePtr::NULL_PTR) {
2132     return t;
2133   }
2134   return t->is_oopptr();
2135 }
2136 
2137 int ShenandoahEnqueueBarrierNode::needed(Node* n) {
2138   if (n == NULL ||
2139       n->is_Allocate() ||
2140       n->Opcode() == Op_ShenandoahEnqueueBarrier ||
2141       n->bottom_type() == TypePtr::NULL_PTR ||
2142       (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2143     return NotNeeded;
2144   }
2145   if (n->is_Phi() ||
2146       n->is_CMove()) {
2147     return MaybeNeeded;
2148   }
2149   return Needed;
2150 }
2151 
2152 Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
2153   for (;;) {
2154     if (n == NULL) {
2155       return n;
2156     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2157       return n;
2158     } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2159       return n;
2160     } else if (n->is_ConstraintCast() ||
2161                n->Opcode() == Op_DecodeN ||
2162                n->Opcode() == Op_EncodeP) {
2163       n = n->in(1);
2164     } else if (n->is_Proj()) {
2165       n = n->in(0);
2166     } else {
2167       return n;
2168     }
2169   }
2170   ShouldNotReachHere();
2171   return NULL;
2172 }
2173 
2174 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
2175   PhaseIterGVN* igvn = phase->is_IterGVN();
2176 
2177   Node* n = next(in(1));
2178 
2179   int cont = needed(n);
2180 
2181   if (cont == NotNeeded) {
2182     return in(1);
2183   } else if (cont == MaybeNeeded) {
2184     if (igvn == NULL) {
2185       phase->record_for_igvn(this);
2186       return this;
2187     } else {
2188       ResourceMark rm;
2189       Unique_Node_List wq;
2190       uint wq_i = 0;
2191 
2192       for (;;) {
2193         if (n->is_Phi()) {
2194           for (uint i = 1; i < n->req(); i++) {
2195             Node* m = n->in(i);
2196             if (m != NULL) {
2197               wq.push(m);
2198             }
2199           }
2200         } else {
2201           assert(n->is_CMove(), "nothing else here");
2202           Node* m = n->in(CMoveNode::IfFalse);
2203           wq.push(m);
2204           m = n->in(CMoveNode::IfTrue);
2205           wq.push(m);
2206         }
2207         Node* orig_n = NULL;
2208         do {
2209           if (wq_i >= wq.size()) {
2210             return in(1);
2211           }
2212           n = wq.at(wq_i);
2213           wq_i++;
2214           orig_n = n;
2215           n = next(n);
2216           cont = needed(n);
2217           if (cont == Needed) {
2218             return this;
2219           }
2220         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2221       }
2222     }
2223   }
2224 
2225   return this;
2226 }
2227 
2228 #ifdef ASSERT
2229 static bool has_never_branch(Node* root) {
2230   for (uint i = 1; i < root->req(); i++) {
2231     Node* in = root->in(i);
2232     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2233       return true;
2234     }
2235   }
2236   return false;
2237 }
2238 #endif
2239 
2240 void MemoryGraphFixer::collect_memory_nodes() {
2241   Node_Stack stack(0);
2242   VectorSet visited(Thread::current()->resource_area());
2243   Node_List regions;
2244 
2245   // Walk the raw memory graph and create a mapping from CFG node to
2246   // memory node. Exclude phis for now.
2247   stack.push(_phase->C->root(), 1);
2248   do {
2249     Node* n = stack.node();
2250     int opc = n->Opcode();
2251     uint i = stack.index();
2252     if (i < n->req()) {
2253       Node* mem = NULL;
2254       if (opc == Op_Root) {
2255         Node* in = n->in(i);
2256         int in_opc = in->Opcode();
2257         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2258           mem = in->in(TypeFunc::Memory);
2259         } else if (in_opc == Op_Halt) {
2260           if (in->in(0)->is_Region()) {
2261             Node* r = in->in(0);
2262             for (uint j = 1; j < r->req(); j++) {
2263               assert(r->in(j)->Opcode() != Op_NeverBranch, "");
2264             }
2265           } else {
2266             Node* proj = in->in(0);
2267             assert(proj->is_Proj(), "");
2268             Node* in = proj->in(0);
2269             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2270             if (in->is_CallStaticJava()) {
2271               mem = in->in(TypeFunc::Memory);
2272             } else if (in->Opcode() == Op_Catch) {
2273               Node* call = in->in(0)->in(0);
2274               assert(call->is_Call(), "");
2275               mem = call->in(TypeFunc::Memory);
2276             } else if (in->Opcode() == Op_NeverBranch) {
2277               Node* head = in->in(0);
2278               assert(head->is_Region() && head->req() == 3, "unexpected infinite loop graph shape");
2279               assert(_phase->is_dominator(head, head->in(1)) || _phase->is_dominator(head, head->in(2)), "no back branch?");
2280               Node* tail = _phase->is_dominator(head, head->in(1)) ? head->in(1) : head->in(2);
2281               Node* c = tail;
2282               while (c != head) {
2283                 if (c->is_SafePoint() && !c->is_CallLeaf()) {
2284                   mem = c->in(TypeFunc::Memory);
2285                 }
2286                 c = _phase->idom(c);
2287               }
2288               assert(mem != NULL, "should have found safepoint");
2289 
2290               Node* phi_mem = NULL;
2291               for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
2292                 Node* u = head->fast_out(j);
2293                 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
2294                   if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2295                     assert(phi_mem == NULL || phi_mem->adr_type() == TypePtr::BOTTOM, "");
2296                     phi_mem = u;
2297                   } else if (u->adr_type() == TypePtr::BOTTOM) {
2298                     assert(phi_mem == NULL || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
2299                     if (phi_mem == NULL) {
2300                       phi_mem = u;
2301                     }
2302                   }
2303                 }
2304               }
2305               if (phi_mem != NULL) {
2306                 mem = phi_mem;
2307               }
2308             }
2309           }
2310         } else {
2311 #ifdef ASSERT
2312           n->dump();
2313           in->dump();
2314 #endif
2315           ShouldNotReachHere();
2316         }
2317       } else {
2318         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2319         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2320         mem = n->in(i);
2321       }
2322       i++;
2323       stack.set_index(i);
2324       if (mem == NULL) {
2325         continue;
2326       }
2327       for (;;) {
2328         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2329           break;
2330         }
2331         if (mem->is_Phi()) {
2332           stack.push(mem, 2);
2333           mem = mem->in(1);
2334         } else if (mem->is_Proj()) {
2335           stack.push(mem, mem->req());
2336           mem = mem->in(0);
2337         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2338           mem = mem->in(TypeFunc::Memory);
2339         } else if (mem->is_MergeMem()) {
2340           MergeMemNode* mm = mem->as_MergeMem();
2341           mem = mm->memory_at(_alias);
2342         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2343           assert(_alias == Compile::AliasIdxRaw, "");
2344           stack.push(mem, mem->req());
2345           mem = mem->in(MemNode::Memory);
2346         } else {
2347 #ifdef ASSERT
2348           mem->dump();
2349 #endif
2350           ShouldNotReachHere();
2351         }
2352       }
2353     } else {
2354       if (n->is_Phi()) {
2355         // Nothing
2356       } else if (!n->is_Root()) {
2357         Node* c = get_ctrl(n);
2358         _memory_nodes.map(c->_idx, n);
2359       }
2360       stack.pop();
2361     }
2362   } while(stack.is_nonempty());
2363 
2364   // Iterate over CFG nodes in rpo and propagate memory state to
2365   // compute memory state at regions, creating new phis if needed.
2366   Node_List rpo_list;
2367   visited.clear();
2368   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2369   Node* root = rpo_list.pop();
2370   assert(root == _phase->C->root(), "");
2371 
2372   const bool trace = false;
2373 #ifdef ASSERT
2374   if (trace) {
2375     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2376       Node* c = rpo_list.at(i);
2377       if (_memory_nodes[c->_idx] != NULL) {
2378         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2379       }
2380     }
2381   }
2382 #endif
2383   uint last = _phase->C->unique();
2384 
2385 #ifdef ASSERT
2386   uint8_t max_depth = 0;
2387   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2388     IdealLoopTree* lpt = iter.current();
2389     max_depth = MAX2(max_depth, lpt->_nest);
2390   }
2391 #endif
2392 
2393   bool progress = true;
2394   int iteration = 0;
2395   Node_List dead_phis;
2396   while (progress) {
2397     progress = false;
2398     iteration++;
2399     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2400     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2401     IdealLoopTree* last_updated_ilt = NULL;
2402     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2403       Node* c = rpo_list.at(i);
2404 
2405       Node* prev_mem = _memory_nodes[c->_idx];
2406       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2407         Node* prev_region = regions[c->_idx];
2408         Node* unique = NULL;
2409         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2410           Node* m = _memory_nodes[c->in(j)->_idx];
2411           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2412           if (m != NULL) {
2413             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2414               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), "");
2415               // continue
2416             } else if (unique == NULL) {
2417               unique = m;
2418             } else if (m == unique) {
2419               // continue
2420             } else {
2421               unique = NodeSentinel;
2422             }
2423           }
2424         }
2425         assert(unique != NULL, "empty phi???");
2426         if (unique != NodeSentinel) {
2427           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2428             dead_phis.push(prev_region);
2429           }
2430           regions.map(c->_idx, unique);
2431         } else {
2432           Node* phi = NULL;
2433           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2434             phi = prev_region;
2435             for (uint k = 1; k < c->req(); k++) {
2436               Node* m = _memory_nodes[c->in(k)->_idx];
2437               assert(m != NULL, "expect memory state");
2438               phi->set_req(k, m);
2439             }
2440           } else {
2441             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2442               Node* u = c->fast_out(j);
2443               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2444                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2445                 phi = u;
2446                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2447                   Node* m = _memory_nodes[c->in(k)->_idx];
2448                   assert(m != NULL, "expect memory state");
2449                   if (u->in(k) != m) {
2450                     phi = NULL;
2451                   }
2452                 }
2453               }
2454             }
2455             if (phi == NULL) {
2456               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2457               for (uint k = 1; k < c->req(); k++) {
2458                 Node* m = _memory_nodes[c->in(k)->_idx];
2459                 assert(m != NULL, "expect memory state");
2460                 phi->init_req(k, m);
2461               }
2462             }
2463           }
2464           assert(phi != NULL, "");
2465           regions.map(c->_idx, phi);
2466         }
2467         Node* current_region = regions[c->_idx];
2468         if (current_region != prev_region) {
2469           progress = true;
2470           if (prev_region == prev_mem) {
2471             _memory_nodes.map(c->_idx, current_region);
2472           }
2473         }
2474       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2475         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2476         assert(m != NULL, "expect memory state");
2477         if (m != prev_mem) {
2478           _memory_nodes.map(c->_idx, m);
2479           progress = true;
2480         }
2481       }
2482 #ifdef ASSERT
2483       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2484 #endif
2485     }
2486   }
2487 
2488   // Replace existing phi with computed memory state for that region
2489   // if different (could be a new phi or a dominating memory node if
2490   // that phi was found to be useless).
2491   while (dead_phis.size() > 0) {
2492     Node* n = dead_phis.pop();
2493     n->replace_by(_phase->C->top());
2494     n->destruct();
2495   }
2496   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2497     Node* c = rpo_list.at(i);
2498     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2499       Node* n = regions[c->_idx];
2500       if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2501         _phase->register_new_node(n, c);
2502       }
2503     }
2504   }
2505   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2506     Node* c = rpo_list.at(i);
2507     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2508       Node* n = regions[c->_idx];
2509       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2510         Node* u = c->fast_out(i);
2511         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2512             u != n) {
2513           if (u->adr_type() == TypePtr::BOTTOM) {
2514             fix_memory_uses(u, n, n, c);
2515           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2516             _phase->lazy_replace(u, n);
2517             --i; --imax;
2518           }
2519         }
2520       }
2521     }
2522   }
2523 }
2524 
2525 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2526   Node* c = _phase->get_ctrl(n);
2527   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2528     assert(c == n->in(0), "");
2529     CallNode* call = c->as_Call();
2530     CallProjections projs;
2531     call->extract_projections(&projs, true, false);
2532     if (projs.catchall_memproj != NULL) {
2533       if (projs.fallthrough_memproj == n) {
2534         c = projs.fallthrough_catchproj;
2535       } else {
2536         assert(projs.catchall_memproj == n, "");
2537         c = projs.catchall_catchproj;
2538       }
2539     }
2540   }
2541   return c;
2542 }
2543 
2544 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2545   if (_phase->has_ctrl(n))
2546     return get_ctrl(n);
2547   else {
2548     assert (n->is_CFG(), "must be a CFG node");
2549     return n;
2550   }
2551 }
2552 
2553 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2554   return m != NULL && get_ctrl(m) == c;
2555 }
2556 
2557 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2558   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2559   Node* mem = _memory_nodes[ctrl->_idx];
2560   Node* c = ctrl;
2561   while (!mem_is_valid(mem, c) &&
2562          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2563     c = _phase->idom(c);
2564     mem = _memory_nodes[c->_idx];
2565   }
2566   if (n != NULL && mem_is_valid(mem, c)) {
2567     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2568       mem = next_mem(mem, _alias);
2569     }
2570     if (mem->is_MergeMem()) {
2571       mem = mem->as_MergeMem()->memory_at(_alias);
2572     }
2573     if (!mem_is_valid(mem, c)) {
2574       do {
2575         c = _phase->idom(c);
2576         mem = _memory_nodes[c->_idx];
2577       } while (!mem_is_valid(mem, c) &&
2578                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2579     }
2580   }
2581   assert(mem->bottom_type() == Type::MEMORY, "");
2582   return mem;
2583 }
2584 
2585 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2586   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2587     Node* use = region->fast_out(i);
2588     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2589         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2590       return true;
2591     }
2592   }
2593   return false;
2594 }
2595 
2596 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2597   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2598   const bool trace = false;
2599   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2600   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2601   GrowableArray<Node*> phis;
2602   if (mem_for_ctrl != mem) {
2603     Node* old = mem_for_ctrl;
2604     Node* prev = NULL;
2605     while (old != mem) {
2606       prev = old;
2607       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2608         assert(_alias == Compile::AliasIdxRaw, "");
2609         old = old->in(MemNode::Memory);
2610       } else if (old->Opcode() == Op_SCMemProj) {
2611         assert(_alias == Compile::AliasIdxRaw, "");
2612         old = old->in(0);
2613       } else {
2614         ShouldNotReachHere();
2615       }
2616     }
2617     assert(prev != NULL, "");
2618     if (new_ctrl != ctrl) {
2619       _memory_nodes.map(ctrl->_idx, mem);
2620       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2621     }
2622     uint input = (uint)MemNode::Memory;
2623     _phase->igvn().replace_input_of(prev, input, new_mem);
2624   } else {
2625     uses.clear();
2626     _memory_nodes.map(new_ctrl->_idx, new_mem);
2627     uses.push(new_ctrl);
2628     for(uint next = 0; next < uses.size(); next++ ) {
2629       Node *n = uses.at(next);
2630       assert(n->is_CFG(), "");
2631       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2632       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2633         Node* u = n->fast_out(i);
2634         if (!u->is_Root() && u->is_CFG() && u != n) {
2635           Node* m = _memory_nodes[u->_idx];
2636           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2637               !has_mem_phi(u) &&
2638               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2639             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2640             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2641 
2642             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2643               bool push = true;
2644               bool create_phi = true;
2645               if (_phase->is_dominator(new_ctrl, u)) {
2646                 create_phi = false;
2647               }
2648               if (create_phi) {
2649                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2650                 _phase->register_new_node(phi, u);
2651                 phis.push(phi);
2652                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2653                 if (!mem_is_valid(m, u)) {
2654                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2655                   _memory_nodes.map(u->_idx, phi);
2656                 } else {
2657                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2658                   for (;;) {
2659                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2660                     Node* next = NULL;
2661                     if (m->is_Proj()) {
2662                       next = m->in(0);
2663                     } else {
2664                       assert(m->is_Mem() || m->is_LoadStore(), "");
2665                       assert(_alias == Compile::AliasIdxRaw, "");
2666                       next = m->in(MemNode::Memory);
2667                     }
2668                     if (_phase->get_ctrl(next) != u) {
2669                       break;
2670                     }
2671                     if (next->is_MergeMem()) {
2672                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2673                       break;
2674                     }
2675                     if (next->is_Phi()) {
2676                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2677                       break;
2678                     }
2679                     m = next;
2680                   }
2681 
2682                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2683                   assert(m->is_Mem() || m->is_LoadStore(), "");
2684                   uint input = (uint)MemNode::Memory;
2685                   _phase->igvn().replace_input_of(m, input, phi);
2686                   push = false;
2687                 }
2688               } else {
2689                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2690               }
2691               if (push) {
2692                 uses.push(u);
2693               }
2694             }
2695           } else if (!mem_is_valid(m, u) &&
2696                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2697             uses.push(u);
2698           }
2699         }
2700       }
2701     }
2702     for (int i = 0; i < phis.length(); i++) {
2703       Node* n = phis.at(i);
2704       Node* r = n->in(0);
2705       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2706       for (uint j = 1; j < n->req(); j++) {
2707         Node* m = find_mem(r->in(j), NULL);
2708         _phase->igvn().replace_input_of(n, j, m);
2709         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2710       }
2711     }
2712   }
2713   uint last = _phase->C->unique();
2714   MergeMemNode* mm = NULL;
2715   int alias = _alias;
2716   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2717   // Process loads first to not miss an anti-dependency: if the memory
2718   // edge of a store is updated before a load is processed then an
2719   // anti-dependency may be missed.
2720   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2721     Node* u = mem->out(i);
2722     if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2723       Node* m = find_mem(_phase->get_ctrl(u), u);
2724       if (m != mem) {
2725         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2726         _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2727         --i;
2728       }
2729     }
2730   }
2731   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2732     Node* u = mem->out(i);
2733     if (u->_idx < last) {
2734       if (u->is_Mem()) {
2735         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2736           Node* m = find_mem(_phase->get_ctrl(u), u);
2737           if (m != mem) {
2738             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2739             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2740             --i;
2741           }
2742         }
2743       } else if (u->is_MergeMem()) {
2744         MergeMemNode* u_mm = u->as_MergeMem();
2745         if (u_mm->memory_at(alias) == mem) {
2746           MergeMemNode* newmm = NULL;
2747           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2748             Node* uu = u->fast_out(j);
2749             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2750             if (uu->is_Phi()) {
2751               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2752               Node* region = uu->in(0);
2753               int nb = 0;
2754               for (uint k = 1; k < uu->req(); k++) {
2755                 if (uu->in(k) == u) {
2756                   Node* m = find_mem(region->in(k), NULL);
2757                   if (m != mem) {
2758                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2759                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2760                     if (newmm != u) {
2761                       _phase->igvn().replace_input_of(uu, k, newmm);
2762                       nb++;
2763                       --jmax;
2764                     }
2765                   }
2766                 }
2767               }
2768               if (nb > 0) {
2769                 --j;
2770               }
2771             } else {
2772               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2773               if (m != mem) {
2774                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2775                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2776                 if (newmm != u) {
2777                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2778                   --j, --jmax;
2779                 }
2780               }
2781             }
2782           }
2783         }
2784       } else if (u->is_Phi()) {
2785         assert(u->bottom_type() == Type::MEMORY, "what else?");
2786         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2787           Node* region = u->in(0);
2788           bool replaced = false;
2789           for (uint j = 1; j < u->req(); j++) {
2790             if (u->in(j) == mem) {
2791               Node* m = find_mem(region->in(j), NULL);
2792               Node* nnew = m;
2793               if (m != mem) {
2794                 if (u->adr_type() == TypePtr::BOTTOM) {
2795                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2796                   nnew = mm;
2797                 }
2798                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2799                 _phase->igvn().replace_input_of(u, j, nnew);
2800                 replaced = true;
2801               }
2802             }
2803           }
2804           if (replaced) {
2805             --i;
2806           }
2807         }
2808       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2809                  u->adr_type() == NULL) {
2810         assert(u->adr_type() != NULL ||
2811                u->Opcode() == Op_Rethrow ||
2812                u->Opcode() == Op_Return ||
2813                u->Opcode() == Op_SafePoint ||
2814                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2815                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2816                u->Opcode() == Op_CallLeaf, "");
2817         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2818         if (m != mem) {
2819           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2820           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2821           --i;
2822         }
2823       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2824         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2825         if (m != mem) {
2826           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2827           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2828           --i;
2829         }
2830       } else if (u->adr_type() != TypePtr::BOTTOM &&
2831                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2832         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2833         assert(m != mem, "");
2834         // u is on the wrong slice...
2835         assert(u->is_ClearArray(), "");
2836         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2837         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2838         --i;
2839       }
2840     }
2841   }
2842 #ifdef ASSERT
2843   assert(new_mem->outcnt() > 0, "");
2844   for (int i = 0; i < phis.length(); i++) {
2845     Node* n = phis.at(i);
2846     assert(n->outcnt() > 0, "new phi must have uses now");
2847   }
2848 #endif
2849 }
2850 
2851 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2852   MergeMemNode* mm = MergeMemNode::make(mem);
2853   mm->set_memory_at(_alias, rep_proj);
2854   _phase->register_new_node(mm, rep_ctrl);
2855   return mm;
2856 }
2857 
2858 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2859   MergeMemNode* newmm = NULL;
2860   MergeMemNode* u_mm = u->as_MergeMem();
2861   Node* c = _phase->get_ctrl(u);
2862   if (_phase->is_dominator(c, rep_ctrl)) {
2863     c = rep_ctrl;
2864   } else {
2865     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2866   }
2867   if (u->outcnt() == 1) {
2868     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2869       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2870       --i;
2871     } else {
2872       _phase->igvn().rehash_node_delayed(u);
2873       u_mm->set_memory_at(_alias, rep_proj);
2874     }
2875     newmm = u_mm;
2876     _phase->set_ctrl_and_loop(u, c);
2877   } else {
2878     // can't simply clone u and then change one of its input because
2879     // it adds and then removes an edge which messes with the
2880     // DUIterator
2881     newmm = MergeMemNode::make(u_mm->base_memory());
2882     for (uint j = 0; j < u->req(); j++) {
2883       if (j < newmm->req()) {
2884         if (j == (uint)_alias) {
2885           newmm->set_req(j, rep_proj);
2886         } else if (newmm->in(j) != u->in(j)) {
2887           newmm->set_req(j, u->in(j));
2888         }
2889       } else if (j == (uint)_alias) {
2890         newmm->add_req(rep_proj);
2891       } else {
2892         newmm->add_req(u->in(j));
2893       }
2894     }
2895     if ((uint)_alias >= u->req()) {
2896       newmm->set_memory_at(_alias, rep_proj);
2897     }
2898     _phase->register_new_node(newmm, c);
2899   }
2900   return newmm;
2901 }
2902 
2903 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2904   if (phi->adr_type() == TypePtr::BOTTOM) {
2905     Node* region = phi->in(0);
2906     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2907       Node* uu = region->fast_out(j);
2908       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2909         return false;
2910       }
2911     }
2912     return true;
2913   }
2914   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2915 }
2916 
2917 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2918   uint last = _phase-> C->unique();
2919   MergeMemNode* mm = NULL;
2920   assert(mem->bottom_type() == Type::MEMORY, "");
2921   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2922     Node* u = mem->out(i);
2923     if (u != replacement && u->_idx < last) {
2924       if (u->is_MergeMem()) {
2925         MergeMemNode* u_mm = u->as_MergeMem();
2926         if (u_mm->memory_at(_alias) == mem) {
2927           MergeMemNode* newmm = NULL;
2928           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2929             Node* uu = u->fast_out(j);
2930             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2931             if (uu->is_Phi()) {
2932               if (should_process_phi(uu)) {
2933                 Node* region = uu->in(0);
2934                 int nb = 0;
2935                 for (uint k = 1; k < uu->req(); k++) {
2936                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2937                     if (newmm == NULL) {
2938                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2939                     }
2940                     if (newmm != u) {
2941                       _phase->igvn().replace_input_of(uu, k, newmm);
2942                       nb++;
2943                       --jmax;
2944                     }
2945                   }
2946                 }
2947                 if (nb > 0) {
2948                   --j;
2949                 }
2950               }
2951             } else {
2952               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2953                 if (newmm == NULL) {
2954                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2955                 }
2956                 if (newmm != u) {
2957                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2958                   --j, --jmax;
2959                 }
2960               }
2961             }
2962           }
2963         }
2964       } else if (u->is_Phi()) {
2965         assert(u->bottom_type() == Type::MEMORY, "what else?");
2966         Node* region = u->in(0);
2967         if (should_process_phi(u)) {
2968           bool replaced = false;
2969           for (uint j = 1; j < u->req(); j++) {
2970             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2971               Node* nnew = rep_proj;
2972               if (u->adr_type() == TypePtr::BOTTOM) {
2973                 if (mm == NULL) {
2974                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2975                 }
2976                 nnew = mm;
2977               }
2978               _phase->igvn().replace_input_of(u, j, nnew);
2979               replaced = true;
2980             }
2981           }
2982           if (replaced) {
2983             --i;
2984           }
2985 
2986         }
2987       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2988                  u->adr_type() == NULL) {
2989         assert(u->adr_type() != NULL ||
2990                u->Opcode() == Op_Rethrow ||
2991                u->Opcode() == Op_Return ||
2992                u->Opcode() == Op_SafePoint ||
2993                u->Opcode() == Op_StoreIConditional ||
2994                u->Opcode() == Op_StoreLConditional ||
2995                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2996                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2997                u->Opcode() == Op_CallLeaf, "%s", u->Name());
2998         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2999           if (mm == NULL) {
3000             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
3001           }
3002           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
3003           --i;
3004         }
3005       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
3006         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3007           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
3008           --i;
3009         }
3010       }
3011     }
3012   }
3013 }
3014 
3015 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, bool native)
3016 : Node(ctrl, obj), _native(native) {
3017   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
3018 }
3019 
3020 bool ShenandoahLoadReferenceBarrierNode::is_native() const {
3021   return _native;
3022 }
3023 
3024 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
3025   return sizeof(*this);
3026 }
3027 
3028 uint ShenandoahLoadReferenceBarrierNode::hash() const {
3029   return Node::hash() + (_native ? 1 : 0);
3030 }
3031 
3032 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
3033   return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
3034          _native == ((const ShenandoahLoadReferenceBarrierNode&)n)._native;
3035 }
3036 
3037 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
3038   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
3039     return Type::TOP;
3040   }
3041   const Type* t = in(ValueIn)->bottom_type();
3042   if (t == TypePtr::NULL_PTR) {
3043     return t;
3044   }
3045   return t->is_oopptr();
3046 }
3047 
3048 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
3049   // Either input is TOP ==> the result is TOP
3050   const Type *t2 = phase->type(in(ValueIn));
3051   if( t2 == Type::TOP ) return Type::TOP;
3052 
3053   if (t2 == TypePtr::NULL_PTR) {
3054     return t2;
3055   }
3056 
3057   const Type* type = t2->is_oopptr();
3058   return type;
3059 }
3060 
3061 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
3062   Node* value = in(ValueIn);
3063   if (!needs_barrier(phase, value)) {
3064     return value;
3065   }
3066   return this;
3067 }
3068 
3069 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
3070   Unique_Node_List visited;
3071   return needs_barrier_impl(phase, n, visited);
3072 }
3073 
3074 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
3075   if (n == NULL) return false;
3076   if (visited.member(n)) {
3077     return false; // Been there.
3078   }
3079   visited.push(n);
3080 
3081   if (n->is_Allocate()) {
3082     // tty->print_cr("optimize barrier on alloc");
3083     return false;
3084   }
3085   if (n->is_Call()) {
3086     // tty->print_cr("optimize barrier on call");
3087     return false;
3088   }
3089 
3090   const Type* type = phase->type(n);
3091   if (type == Type::TOP) {
3092     return false;
3093   }
3094   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
3095     // tty->print_cr("optimize barrier on null");
3096     return false;
3097   }
3098   if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
3099     // tty->print_cr("optimize barrier on constant");
3100     return false;
3101   }
3102 
3103   switch (n->Opcode()) {
3104     case Op_AddP:
3105       return true; // TODO: Can refine?
3106     case Op_LoadP:
3107     case Op_ShenandoahCompareAndExchangeN:
3108     case Op_ShenandoahCompareAndExchangeP:
3109     case Op_CompareAndExchangeN:
3110     case Op_CompareAndExchangeP:
3111     case Op_GetAndSetN:
3112     case Op_GetAndSetP:
3113       return true;
3114     case Op_Phi: {
3115       for (uint i = 1; i < n->req(); i++) {
3116         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3117       }
3118       return false;
3119     }
3120     case Op_CheckCastPP:
3121     case Op_CastPP:
3122       return needs_barrier_impl(phase, n->in(1), visited);
3123     case Op_Proj:
3124       return needs_barrier_impl(phase, n->in(0), visited);
3125     case Op_ShenandoahLoadReferenceBarrier:
3126       // tty->print_cr("optimize barrier on barrier");
3127       return false;
3128     case Op_Parm:
3129       // tty->print_cr("optimize barrier on input arg");
3130       return false;
3131     case Op_DecodeN:
3132     case Op_EncodeP:
3133       return needs_barrier_impl(phase, n->in(1), visited);
3134     case Op_LoadN:
3135       return true;
3136     case Op_CMoveN:
3137     case Op_CMoveP:
3138       return needs_barrier_impl(phase, n->in(2), visited) ||
3139              needs_barrier_impl(phase, n->in(3), visited);
3140     case Op_ShenandoahEnqueueBarrier:
3141       return needs_barrier_impl(phase, n->in(1), visited);
3142     case Op_CreateEx:
3143       return false;
3144     default:
3145       break;
3146   }
3147 #ifdef ASSERT
3148   tty->print("need barrier on?: ");
3149   tty->print_cr("ins:");
3150   n->dump(2);
3151   tty->print_cr("outs:");
3152   n->dump(-2);
3153   ShouldNotReachHere();
3154 #endif
3155   return true;
3156 }
3157 
3158 bool ShenandoahLoadReferenceBarrierNode::is_redundant() {
3159   Unique_Node_List visited;
3160   Node_Stack stack(0);
3161   stack.push(this, 0);
3162 
3163   // Check if the barrier is actually useful: go over nodes looking for useful uses
3164   // (e.g. memory accesses). Stop once we detected a required use. Otherwise, walk
3165   // until we ran out of nodes, and then declare the barrier redundant.
3166   while (stack.size() > 0) {
3167     Node* n = stack.node();
3168     if (visited.member(n)) {
3169       stack.pop();
3170       continue;
3171     }
3172     visited.push(n);
3173     bool visit_users = false;
3174     switch (n->Opcode()) {
3175       case Op_CallStaticJava:
3176       case Op_CallDynamicJava:
3177       case Op_CallLeaf:
3178       case Op_CallLeafNoFP:
3179       case Op_CompareAndSwapL:
3180       case Op_CompareAndSwapI:
3181       case Op_CompareAndSwapB:
3182       case Op_CompareAndSwapS:
3183       case Op_CompareAndSwapN:
3184       case Op_CompareAndSwapP:
3185       case Op_CompareAndExchangeL:
3186       case Op_CompareAndExchangeI:
3187       case Op_CompareAndExchangeB:
3188       case Op_CompareAndExchangeS:
3189       case Op_CompareAndExchangeN:
3190       case Op_CompareAndExchangeP:
3191       case Op_WeakCompareAndSwapL:
3192       case Op_WeakCompareAndSwapI:
3193       case Op_WeakCompareAndSwapB:
3194       case Op_WeakCompareAndSwapS:
3195       case Op_WeakCompareAndSwapN:
3196       case Op_WeakCompareAndSwapP:
3197       case Op_ShenandoahCompareAndSwapN:
3198       case Op_ShenandoahCompareAndSwapP:
3199       case Op_ShenandoahWeakCompareAndSwapN:
3200       case Op_ShenandoahWeakCompareAndSwapP:
3201       case Op_ShenandoahCompareAndExchangeN:
3202       case Op_ShenandoahCompareAndExchangeP:
3203       case Op_GetAndSetL:
3204       case Op_GetAndSetI:
3205       case Op_GetAndSetB:
3206       case Op_GetAndSetS:
3207       case Op_GetAndSetP:
3208       case Op_GetAndSetN:
3209       case Op_GetAndAddL:
3210       case Op_GetAndAddI:
3211       case Op_GetAndAddB:
3212       case Op_GetAndAddS:
3213       case Op_ShenandoahEnqueueBarrier:
3214       case Op_FastLock:
3215       case Op_FastUnlock:
3216       case Op_Rethrow:
3217       case Op_Return:
3218       case Op_StoreB:
3219       case Op_StoreC:
3220       case Op_StoreD:
3221       case Op_StoreF:
3222       case Op_StoreL:
3223       case Op_StoreLConditional:
3224       case Op_StoreI:
3225       case Op_StoreIConditional:
3226       case Op_StoreN:
3227       case Op_StoreP:
3228       case Op_StoreVector:
3229       case Op_StrInflatedCopy:
3230       case Op_StrCompressedCopy:
3231       case Op_EncodeP:
3232       case Op_CastP2X:
3233       case Op_SafePoint:
3234       case Op_EncodeISOArray:
3235       case Op_AryEq:
3236       case Op_StrEquals:
3237       case Op_StrComp:
3238       case Op_StrIndexOf:
3239       case Op_StrIndexOfChar:
3240       case Op_HasNegatives:
3241         // Known to require barriers
3242         return false;
3243       case Op_CmpP: {
3244         if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) ||
3245             n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3246           // One of the sides is known null, no need for barrier.
3247         } else {
3248           return false;
3249         }
3250         break;
3251       }
3252       case Op_LoadB:
3253       case Op_LoadUB:
3254       case Op_LoadUS:
3255       case Op_LoadD:
3256       case Op_LoadF:
3257       case Op_LoadL:
3258       case Op_LoadI:
3259       case Op_LoadS:
3260       case Op_LoadN:
3261       case Op_LoadP:
3262       case Op_LoadVector: {
3263         const TypePtr* adr_type = n->adr_type();
3264         int alias_idx = Compile::current()->get_alias_index(adr_type);
3265         Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3266         ciField* field = alias_type->field();
3267         bool is_static = field != NULL && field->is_static();
3268         bool is_final = field != NULL && field->is_final();
3269 
3270         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3271           // Loading the constant does not require barriers: it should be handled
3272           // as part of GC roots already.
3273         } else {
3274           return false;
3275         }
3276         break;
3277       }
3278       case Op_Conv2B:
3279       case Op_LoadRange:
3280       case Op_LoadKlass:
3281       case Op_LoadNKlass:
3282         // Do not require barriers
3283         break;
3284       case Op_AddP:
3285       case Op_CheckCastPP:
3286       case Op_CastPP:
3287       case Op_CMoveP:
3288       case Op_Phi:
3289       case Op_ShenandoahLoadReferenceBarrier:
3290         // Whether or not these need the barriers depends on their users
3291         visit_users = true;
3292         break;
3293       default: {
3294 #ifdef ASSERT
3295         fatal("Unknown node in is_redundant: %s", NodeClassNames[n->Opcode()]);
3296 #else
3297         // Default to have excess barriers, rather than miss some.
3298         return false;
3299 #endif
3300       }
3301     }
3302 
3303     stack.pop();
3304     if (visit_users) {
3305       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3306         Node* user = n->fast_out(i);
3307         if (user != NULL) {
3308           stack.push(user, 0);
3309         }
3310       }
3311     }
3312   }
3313 
3314   // No need for barrier found.
3315   return true;
3316 }
3317 
3318 CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
3319   Node* val = in(ValueIn);
3320 
3321   const Type* val_t = igvn.type(val);
3322 
3323   if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
3324       val->Opcode() == Op_CastPP &&
3325       val->in(0) != NULL &&
3326       val->in(0)->Opcode() == Op_IfTrue &&
3327       val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
3328       val->in(0)->in(0)->is_If() &&
3329       val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
3330       val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
3331       val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
3332       val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
3333       val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
3334     assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
3335     CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
3336     return unc;
3337   }
3338   return NULL;
3339 }