1 /*
   2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  28 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  30 #include "gc/shenandoah/shenandoahForwarding.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  33 #include "gc/shenandoah/shenandoahRuntime.hpp"
  34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/block.hpp"
  37 #include "opto/callnode.hpp"
  38 #include "opto/castnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/phaseX.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/runtime.hpp"
  43 #include "opto/subnode.hpp"
  44 
  45 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  46   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  47   if ((state->enqueue_barriers_count() +
  48        state->load_reference_barriers_count()) > 0) {
  49     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  50     C->clear_major_progress();
  51     PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
  52     if (C->failing()) return false;
  53     PhaseIdealLoop::verify(igvn);
  54     DEBUG_ONLY(verify_raw_mem(C->root());)
  55     if (attempt_more_loopopts) {
  56       C->set_major_progress();
  57       if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
  58         return false;
  59       }
  60       C->clear_major_progress();
  61       if (C->range_check_cast_count() > 0) {
  62         // No more loop optimizations. Remove all range check dependent CastIINodes.
  63         C->remove_range_check_casts(igvn);
  64         igvn.optimize();
  65       }
  66     }
  67   }
  68   return true;
  69 }
  70 
  71 bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) {
  72   if (!UseShenandoahGC) {
  73     return false;
  74   }
  75   assert(iff->is_If(), "bad input");
  76   if (iff->Opcode() != Op_If) {
  77     return false;
  78   }
  79   Node* bol = iff->in(1);
  80   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  81     return false;
  82   }
  83   Node* cmp = bol->in(1);
  84   if (cmp->Opcode() != Op_CmpI) {
  85     return false;
  86   }
  87   Node* in1 = cmp->in(1);
  88   Node* in2 = cmp->in(2);
  89   if (in2->find_int_con(-1) != 0) {
  90     return false;
  91   }
  92   if (in1->Opcode() != Op_AndI) {
  93     return false;
  94   }
  95   in2 = in1->in(2);
  96   if (in2->find_int_con(-1) != mask) {
  97     return false;
  98   }
  99   in1 = in1->in(1);
 100 
 101   return is_gc_state_load(in1);
 102 }
 103 
 104 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 105   return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 106 }
 107 
 108 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 109   if (!UseShenandoahGC) {
 110     return false;
 111   }
 112   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 113     return false;
 114   }
 115   Node* addp = n->in(MemNode::Address);
 116   if (!addp->is_AddP()) {
 117     return false;
 118   }
 119   Node* base = addp->in(AddPNode::Address);
 120   Node* off = addp->in(AddPNode::Offset);
 121   if (base->Opcode() != Op_ThreadLocal) {
 122     return false;
 123   }
 124   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 125     return false;
 126   }
 127   return true;
 128 }
 129 
 130 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 131   assert(phase->is_dominator(stop, start), "bad inputs");
 132   ResourceMark rm;
 133   Unique_Node_List wq;
 134   wq.push(start);
 135   for (uint next = 0; next < wq.size(); next++) {
 136     Node *m = wq.at(next);
 137     if (m == stop) {
 138       continue;
 139     }
 140     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 141       return true;
 142     }
 143     if (m->is_Region()) {
 144       for (uint i = 1; i < m->req(); i++) {
 145         wq.push(m->in(i));
 146       }
 147     } else {
 148       wq.push(m->in(0));
 149     }
 150   }
 151   return false;
 152 }
 153 
 154 #ifdef ASSERT
 155 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 156   assert(phis.size() == 0, "");
 157 
 158   while (true) {
 159     if (in->bottom_type() == TypePtr::NULL_PTR) {
 160       if (trace) {tty->print_cr("NULL");}
 161     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 162       if (trace) {tty->print_cr("Non oop");}
 163     } else {
 164       if (in->is_ConstraintCast()) {
 165         in = in->in(1);
 166         continue;
 167       } else if (in->is_AddP()) {
 168         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 169         in = in->in(AddPNode::Address);
 170         continue;
 171       } else if (in->is_Con()) {
 172         if (trace) {
 173           tty->print("Found constant");
 174           in->dump();
 175         }
 176       } else if (in->Opcode() == Op_Parm) {
 177         if (trace) {
 178           tty->print("Found argument");
 179         }
 180       } else if (in->Opcode() == Op_CreateEx) {
 181         if (trace) {
 182           tty->print("Found create-exception");
 183         }
 184       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 185         if (trace) {
 186           tty->print("Found raw LoadP (OSR argument?)");
 187         }
 188       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 189         if (t == ShenandoahOopStore) {
 190           uint i = 0;
 191           for (; i < phis.size(); i++) {
 192             Node* n = phis.node_at(i);
 193             if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
 194               break;
 195             }
 196           }
 197           if (i == phis.size()) {
 198             return false;
 199           }
 200         }
 201         barriers_used.push(in);
 202         if (trace) {tty->print("Found barrier"); in->dump();}
 203       } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
 204         if (t != ShenandoahOopStore) {
 205           in = in->in(1);
 206           continue;
 207         }
 208         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 209         phis.push(in, in->req());
 210         in = in->in(1);
 211         continue;
 212       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 213         if (trace) {
 214           tty->print("Found alloc");
 215           in->in(0)->dump();
 216         }
 217       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 218         if (trace) {
 219           tty->print("Found Java call");
 220         }
 221       } else if (in->is_Phi()) {
 222         if (!visited.test_set(in->_idx)) {
 223           if (trace) {tty->print("Pushed phi:"); in->dump();}
 224           phis.push(in, 2);
 225           in = in->in(1);
 226           continue;
 227         }
 228         if (trace) {tty->print("Already seen phi:"); in->dump();}
 229       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 230         if (!visited.test_set(in->_idx)) {
 231           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 232           phis.push(in, CMoveNode::IfTrue);
 233           in = in->in(CMoveNode::IfFalse);
 234           continue;
 235         }
 236         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 237       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 238         in = in->in(1);
 239         continue;
 240       } else {
 241         return false;
 242       }
 243     }
 244     bool cont = false;
 245     while (phis.is_nonempty()) {
 246       uint idx = phis.index();
 247       Node* phi = phis.node();
 248       if (idx >= phi->req()) {
 249         if (trace) {tty->print("Popped phi:"); phi->dump();}
 250         phis.pop();
 251         continue;
 252       }
 253       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 254       in = phi->in(idx);
 255       phis.set_index(idx+1);
 256       cont = true;
 257       break;
 258     }
 259     if (!cont) {
 260       break;
 261     }
 262   }
 263   return true;
 264 }
 265 
 266 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 267   if (n1 != NULL) {
 268     n1->dump(+10);
 269   }
 270   if (n2 != NULL) {
 271     n2->dump(+10);
 272   }
 273   fatal("%s", msg);
 274 }
 275 
 276 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 277   ResourceMark rm;
 278   Unique_Node_List wq;
 279   GrowableArray<Node*> barriers;
 280   Unique_Node_List barriers_used;
 281   Node_Stack phis(0);
 282   VectorSet visited(Thread::current()->resource_area());
 283   const bool trace = false;
 284   const bool verify_no_useless_barrier = false;
 285 
 286   wq.push(root);
 287   for (uint next = 0; next < wq.size(); next++) {
 288     Node *n = wq.at(next);
 289     if (n->is_Load()) {
 290       const bool trace = false;
 291       if (trace) {tty->print("Verifying"); n->dump();}
 292       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 293         if (trace) {tty->print_cr("Load range/klass");}
 294       } else {
 295         const TypePtr* adr_type = n->as_Load()->adr_type();
 296 
 297         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 298           if (trace) {tty->print_cr("Mark load");}
 299         } else if (adr_type->isa_instptr() &&
 300                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 301                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
 302           if (trace) {tty->print_cr("Reference.get()");}
 303         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 304           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 305         }
 306       }
 307     } else if (n->is_Store()) {
 308       const bool trace = false;
 309 
 310       if (trace) {tty->print("Verifying"); n->dump();}
 311       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 312         Node* adr = n->in(MemNode::Address);
 313         bool verify = true;
 314 
 315         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 316           adr = adr->in(AddPNode::Address);
 317           if (adr->is_AddP()) {
 318             assert(adr->in(AddPNode::Base)->is_top(), "");
 319             adr = adr->in(AddPNode::Address);
 320             if (adr->Opcode() == Op_LoadP &&
 321                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 322                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 323                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 324               if (trace) {tty->print_cr("SATB prebarrier");}
 325               verify = false;
 326             }
 327           }
 328         }
 329 
 330         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 331           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 332         }
 333       }
 334       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 335         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 336       }
 337     } else if (n->Opcode() == Op_CmpP) {
 338       const bool trace = false;
 339 
 340       Node* in1 = n->in(1);
 341       Node* in2 = n->in(2);
 342       if (in1->bottom_type()->isa_oopptr()) {
 343         if (trace) {tty->print("Verifying"); n->dump();}
 344 
 345         bool mark_inputs = false;
 346         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 347             (in1->is_Con() || in2->is_Con())) {
 348           if (trace) {tty->print_cr("Comparison against a constant");}
 349           mark_inputs = true;
 350         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 351                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 352           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 353           mark_inputs = true;
 354         } else {
 355           assert(in2->bottom_type()->isa_oopptr(), "");
 356 
 357           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 358               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 359             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 360           }
 361         }
 362         if (verify_no_useless_barrier &&
 363             mark_inputs &&
 364             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 365              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 366           phis.clear();
 367           visited.reset();
 368         }
 369       }
 370     } else if (n->is_LoadStore()) {
 371       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 372           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 373         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 374       }
 375 
 376       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 377         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 378       }
 379     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 380       CallNode* call = n->as_Call();
 381 
 382       static struct {
 383         const char* name;
 384         struct {
 385           int pos;
 386           verify_type t;
 387         } args[6];
 388       } calls[] = {
 389         "aescrypt_encryptBlock",
 390         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 391           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 392         "aescrypt_decryptBlock",
 393         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 394           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 395         "multiplyToLen",
 396         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 397           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 398         "squareToLen",
 399         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 400           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 401         "montgomery_multiply",
 402         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 403           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 404         "montgomery_square",
 405         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 406           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 407         "mulAdd",
 408         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 409           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 410         "vectorizedMismatch",
 411         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 412           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 413         "updateBytesCRC32",
 414         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 415           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 416         "updateBytesAdler32",
 417         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 418           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 419         "updateBytesCRC32C",
 420         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 421           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 422         "counterMode_AESCrypt",
 423         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 424           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 425         "cipherBlockChaining_encryptAESCrypt",
 426         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 427           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 428         "cipherBlockChaining_decryptAESCrypt",
 429         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 430           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 431         "shenandoah_clone_barrier",
 432         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 433           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 434         "ghash_processBlocks",
 435         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 436           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 437         "sha1_implCompress",
 438         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 439           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 440         "sha256_implCompress",
 441         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 442           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 443         "sha512_implCompress",
 444         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 445           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 446         "sha1_implCompressMB",
 447         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 448           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 449         "sha256_implCompressMB",
 450         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 451           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 452         "sha512_implCompressMB",
 453         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 454           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 455         "encodeBlock",
 456         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 457           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 458       };
 459 
 460       if (call->is_call_to_arraycopystub()) {
 461         Node* dest = NULL;
 462         const TypeTuple* args = n->as_Call()->_tf->domain();
 463         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 464           if (args->field_at(i)->isa_ptr()) {
 465             j++;
 466             if (j == 2) {
 467               dest = n->in(i);
 468               break;
 469             }
 470           }
 471         }
 472         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 473             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 474           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 475         }
 476       } else if (strlen(call->_name) > 5 &&
 477                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 478         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 479           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 480         }
 481       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 482         // skip
 483       } else {
 484         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 485         int i = 0;
 486         for (; i < calls_len; i++) {
 487           if (!strcmp(calls[i].name, call->_name)) {
 488             break;
 489           }
 490         }
 491         if (i != calls_len) {
 492           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 493           for (uint j = 0; j < args_len; j++) {
 494             int pos = calls[i].args[j].pos;
 495             if (pos == -1) {
 496               break;
 497             }
 498             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 499               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 500             }
 501           }
 502           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 503             if (call->in(j)->bottom_type()->make_ptr() &&
 504                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 505               uint k = 0;
 506               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 507               if (k == args_len) {
 508                 fatal("arg %d for call %s not covered", j, call->_name);
 509               }
 510             }
 511           }
 512         } else {
 513           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 514             if (call->in(j)->bottom_type()->make_ptr() &&
 515                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 516               fatal("%s not covered", call->_name);
 517             }
 518           }
 519         }
 520       }
 521     } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 522       // skip
 523     } else if (n->is_AddP()
 524                || n->is_Phi()
 525                || n->is_ConstraintCast()
 526                || n->Opcode() == Op_Return
 527                || n->Opcode() == Op_CMoveP
 528                || n->Opcode() == Op_CMoveN
 529                || n->Opcode() == Op_Rethrow
 530                || n->is_MemBar()
 531                || n->Opcode() == Op_Conv2B
 532                || n->Opcode() == Op_SafePoint
 533                || n->is_CallJava()
 534                || n->Opcode() == Op_Unlock
 535                || n->Opcode() == Op_EncodeP
 536                || n->Opcode() == Op_DecodeN) {
 537       // nothing to do
 538     } else {
 539       static struct {
 540         int opcode;
 541         struct {
 542           int pos;
 543           verify_type t;
 544         } inputs[2];
 545       } others[] = {
 546         Op_FastLock,
 547         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 548         Op_Lock,
 549         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 550         Op_ArrayCopy,
 551         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 552         Op_StrCompressedCopy,
 553         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 554         Op_StrInflatedCopy,
 555         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 556         Op_AryEq,
 557         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 558         Op_StrIndexOf,
 559         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 560         Op_StrComp,
 561         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 562         Op_StrEquals,
 563         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 564         Op_EncodeISOArray,
 565         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 566         Op_HasNegatives,
 567         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 568         Op_CastP2X,
 569         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 570         Op_StrIndexOfChar,
 571         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 572       };
 573 
 574       const int others_len = sizeof(others) / sizeof(others[0]);
 575       int i = 0;
 576       for (; i < others_len; i++) {
 577         if (others[i].opcode == n->Opcode()) {
 578           break;
 579         }
 580       }
 581       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 582       if (i != others_len) {
 583         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 584         for (uint j = 0; j < inputs_len; j++) {
 585           int pos = others[i].inputs[j].pos;
 586           if (pos == -1) {
 587             break;
 588           }
 589           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 590             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 591           }
 592         }
 593         for (uint j = 1; j < stop; j++) {
 594           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 595               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 596             uint k = 0;
 597             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 598             if (k == inputs_len) {
 599               fatal("arg %d for node %s not covered", j, n->Name());
 600             }
 601           }
 602         }
 603       } else {
 604         for (uint j = 1; j < stop; j++) {
 605           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 606               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 607             fatal("%s not covered", n->Name());
 608           }
 609         }
 610       }
 611     }
 612 
 613     if (n->is_SafePoint()) {
 614       SafePointNode* sfpt = n->as_SafePoint();
 615       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 616         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 617           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 618             phis.clear();
 619             visited.reset();
 620           }
 621         }
 622       }
 623     }
 624   }
 625 
 626   if (verify_no_useless_barrier) {
 627     for (int i = 0; i < barriers.length(); i++) {
 628       Node* n = barriers.at(i);
 629       if (!barriers_used.member(n)) {
 630         tty->print("XXX useless barrier"); n->dump(-2);
 631         ShouldNotReachHere();
 632       }
 633     }
 634   }
 635 }
 636 #endif
 637 
 638 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 639   // That both nodes have the same control is not sufficient to prove
 640   // domination, verify that there's no path from d to n
 641   ResourceMark rm;
 642   Unique_Node_List wq;
 643   wq.push(d);
 644   for (uint next = 0; next < wq.size(); next++) {
 645     Node *m = wq.at(next);
 646     if (m == n) {
 647       return false;
 648     }
 649     if (m->is_Phi() && m->in(0)->is_Loop()) {
 650       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 651     } else {
 652       if (m->is_Store() || m->is_LoadStore()) {
 653         // Take anti-dependencies into account
 654         Node* mem = m->in(MemNode::Memory);
 655         for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 656           Node* u = mem->fast_out(i);
 657           if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
 658               phase->ctrl_or_self(u) == c) {
 659             wq.push(u);
 660           }
 661         }
 662       }
 663       for (uint i = 0; i < m->req(); i++) {
 664         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 665           wq.push(m->in(i));
 666         }
 667       }
 668     }
 669   }
 670   return true;
 671 }
 672 
 673 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 674   if (d_c != n_c) {
 675     return phase->is_dominator(d_c, n_c);
 676   }
 677   return is_dominator_same_ctrl(d_c, d, n, phase);
 678 }
 679 
 680 Node* next_mem(Node* mem, int alias) {
 681   Node* res = NULL;
 682   if (mem->is_Proj()) {
 683     res = mem->in(0);
 684   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 685     res = mem->in(TypeFunc::Memory);
 686   } else if (mem->is_Phi()) {
 687     res = mem->in(1);
 688   } else if (mem->is_MergeMem()) {
 689     res = mem->as_MergeMem()->memory_at(alias);
 690   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 691     assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 692     res = mem->in(MemNode::Memory);
 693   } else {
 694 #ifdef ASSERT
 695     mem->dump();
 696 #endif
 697     ShouldNotReachHere();
 698   }
 699   return res;
 700 }
 701 
 702 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 703   Node* iffproj = NULL;
 704   while (c != dom) {
 705     Node* next = phase->idom(c);
 706     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 707     if (c->is_Region()) {
 708       ResourceMark rm;
 709       Unique_Node_List wq;
 710       wq.push(c);
 711       for (uint i = 0; i < wq.size(); i++) {
 712         Node *n = wq.at(i);
 713         if (n == next) {
 714           continue;
 715         }
 716         if (n->is_Region()) {
 717           for (uint j = 1; j < n->req(); j++) {
 718             wq.push(n->in(j));
 719           }
 720         } else {
 721           wq.push(n->in(0));
 722         }
 723       }
 724       for (uint i = 0; i < wq.size(); i++) {
 725         Node *n = wq.at(i);
 726         assert(n->is_CFG(), "");
 727         if (n->is_Multi()) {
 728           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 729             Node* u = n->fast_out(j);
 730             if (u->is_CFG()) {
 731               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 732                 return NodeSentinel;
 733               }
 734             }
 735           }
 736         }
 737       }
 738     } else  if (c->is_Proj()) {
 739       if (c->is_IfProj()) {
 740         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 741           // continue;
 742         } else {
 743           if (!allow_one_proj) {
 744             return NodeSentinel;
 745           }
 746           if (iffproj == NULL) {
 747             iffproj = c;
 748           } else {
 749             return NodeSentinel;
 750           }
 751         }
 752       } else if (c->Opcode() == Op_JumpProj) {
 753         return NodeSentinel; // unsupported
 754       } else if (c->Opcode() == Op_CatchProj) {
 755         return NodeSentinel; // unsupported
 756       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 757         return NodeSentinel; // unsupported
 758       } else {
 759         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 760       }
 761     }
 762     c = next;
 763   }
 764   return iffproj;
 765 }
 766 
 767 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 768   ResourceMark rm;
 769   VectorSet wq(Thread::current()->resource_area());
 770   wq.set(mem->_idx);
 771   mem_ctrl = phase->ctrl_or_self(mem);
 772   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 773     mem = next_mem(mem, alias);
 774     if (wq.test_set(mem->_idx)) {
 775       return NULL;
 776     }
 777     mem_ctrl = phase->ctrl_or_self(mem);
 778   }
 779   if (mem->is_MergeMem()) {
 780     mem = mem->as_MergeMem()->memory_at(alias);
 781     mem_ctrl = phase->ctrl_or_self(mem);
 782   }
 783   return mem;
 784 }
 785 
 786 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 787   Node* mem = NULL;
 788   Node* c = ctrl;
 789   do {
 790     if (c->is_Region()) {
 791       Node* phi_bottom = NULL;
 792       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 793         Node* u = c->fast_out(i);
 794         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 795           if (u->adr_type() == TypePtr::BOTTOM) {
 796             mem = u;
 797           }
 798         }
 799       }
 800     } else {
 801       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 802         CallProjections projs;
 803         c->as_Call()->extract_projections(&projs, true, false);
 804         if (projs.fallthrough_memproj != NULL) {
 805           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 806             if (projs.catchall_memproj == NULL) {
 807               mem = projs.fallthrough_memproj;
 808             } else {
 809               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 810                 mem = projs.fallthrough_memproj;
 811               } else {
 812                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 813                 mem = projs.catchall_memproj;
 814               }
 815             }
 816           }
 817         } else {
 818           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 819           if (proj != NULL &&
 820               proj->adr_type() == TypePtr::BOTTOM) {
 821             mem = proj;
 822           }
 823         }
 824       } else {
 825         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 826           Node* u = c->fast_out(i);
 827           if (u->is_Proj() &&
 828               u->bottom_type() == Type::MEMORY &&
 829               u->adr_type() == TypePtr::BOTTOM) {
 830               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 831               assert(mem == NULL, "only one proj");
 832               mem = u;
 833           }
 834         }
 835         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 836       }
 837     }
 838     c = phase->idom(c);
 839   } while (mem == NULL);
 840   return mem;
 841 }
 842 
 843 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 844   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 845     Node* u = n->fast_out(i);
 846     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 847       uses.push(u);
 848     }
 849   }
 850 }
 851 
 852 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 853   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 854   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 855   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 856   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 857   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 858   phase->lazy_replace(outer, new_outer);
 859   phase->lazy_replace(le, new_le);
 860   inner->clear_strip_mined();
 861 }
 862 
 863 void ShenandoahBarrierC2Support::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
 864                                                   PhaseIdealLoop* phase, int flags) {
 865   IdealLoopTree* loop = phase->get_loop(ctrl);
 866   Node* thread = new ThreadLocalNode();
 867   phase->register_new_node(thread, ctrl);
 868   Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 869   phase->set_ctrl(offset, phase->C->root());
 870   Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset);
 871   phase->register_new_node(gc_state_addr, ctrl);
 872   uint gc_state_idx = Compile::AliasIdxRaw;
 873   const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
 874   debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
 875 
 876   Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered);
 877   phase->register_new_node(gc_state, ctrl);
 878   Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(flags));
 879   phase->register_new_node(heap_stable_and, ctrl);
 880   Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT));
 881   phase->register_new_node(heap_stable_cmp, ctrl);
 882   Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne);
 883   phase->register_new_node(heap_stable_test, ctrl);
 884   IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 885   phase->register_control(heap_stable_iff, loop, ctrl);
 886 
 887   heap_stable_ctrl = new IfFalseNode(heap_stable_iff);
 888   phase->register_control(heap_stable_ctrl, loop, heap_stable_iff);
 889   ctrl = new IfTrueNode(heap_stable_iff);
 890   phase->register_control(ctrl, loop, heap_stable_iff);
 891 
 892   assert(is_heap_state_test(heap_stable_iff, flags), "Should match the shape");
 893 }
 894 
 895 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 896   const Type* val_t = phase->igvn().type(val);
 897   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 898     IdealLoopTree* loop = phase->get_loop(ctrl);
 899     Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT));
 900     phase->register_new_node(null_cmp, ctrl);
 901     Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
 902     phase->register_new_node(null_test, ctrl);
 903     IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 904     phase->register_control(null_iff, loop, ctrl);
 905     ctrl = new IfTrueNode(null_iff);
 906     phase->register_control(ctrl, loop, null_iff);
 907     null_ctrl = new IfFalseNode(null_iff);
 908     phase->register_control(null_ctrl, loop, null_iff);
 909   }
 910 }
 911 
 912 Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
 913   IdealLoopTree *loop = phase->get_loop(c);
 914   Node* iff = unc_ctrl->in(0);
 915   assert(iff->is_If(), "broken");
 916   Node* new_iff = iff->clone();
 917   new_iff->set_req(0, c);
 918   phase->register_control(new_iff, loop, c);
 919   Node* iffalse = new IfFalseNode(new_iff->as_If());
 920   phase->register_control(iffalse, loop, new_iff);
 921   Node* iftrue = new IfTrueNode(new_iff->as_If());
 922   phase->register_control(iftrue, loop, new_iff);
 923   c = iftrue;
 924   const Type *t = phase->igvn().type(val);
 925   assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
 926   Node* uncasted_val = val->in(1);
 927   val = new CastPPNode(uncasted_val, t);
 928   val->init_req(0, c);
 929   phase->register_new_node(val, c);
 930   return val;
 931 }
 932 
 933 void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
 934                                                 Unique_Node_List& uses, PhaseIdealLoop* phase) {
 935   IfNode* iff = unc_ctrl->in(0)->as_If();
 936   Node* proj = iff->proj_out(0);
 937   assert(proj != unc_ctrl, "bad projection");
 938   Node* use = proj->unique_ctrl_out();
 939 
 940   assert(use == unc || use->is_Region(), "what else?");
 941 
 942   uses.clear();
 943   if (use == unc) {
 944     phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
 945     for (uint i = 1; i < unc->req(); i++) {
 946       Node* n = unc->in(i);
 947       if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
 948         uses.push(n);
 949       }
 950     }
 951   } else {
 952     assert(use->is_Region(), "what else?");
 953     uint idx = 1;
 954     for (; use->in(idx) != proj; idx++);
 955     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
 956       Node* u = use->fast_out(i);
 957       if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
 958         uses.push(u->in(idx));
 959       }
 960     }
 961   }
 962   for(uint next = 0; next < uses.size(); next++ ) {
 963     Node *n = uses.at(next);
 964     assert(phase->get_ctrl(n) == proj, "bad control");
 965     phase->set_ctrl_and_loop(n, new_unc_ctrl);
 966     if (n->in(0) == proj) {
 967       phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
 968     }
 969     for (uint i = 0; i < n->req(); i++) {
 970       Node* m = n->in(i);
 971       if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
 972         uses.push(m);
 973       }
 974     }
 975   }
 976 
 977   phase->igvn().rehash_node_delayed(use);
 978   int nb = use->replace_edge(proj, new_unc_ctrl);
 979   assert(nb == 1, "only use expected");
 980 }
 981 
 982 void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 983   IdealLoopTree *loop = phase->get_loop(ctrl);
 984   Node* raw_rbtrue = new CastP2XNode(ctrl, val);
 985   phase->register_new_node(raw_rbtrue, ctrl);
 986   Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 987   phase->register_new_node(cset_offset, ctrl);
 988   Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
 989   phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root());
 990   Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset);
 991   phase->register_new_node(in_cset_fast_test_adr, ctrl);
 992   uint in_cset_fast_test_idx = Compile::AliasIdxRaw;
 993   const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument
 994   debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx));
 995   Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered);
 996   phase->register_new_node(in_cset_fast_test_load, ctrl);
 997   Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT));
 998   phase->register_new_node(in_cset_fast_test_cmp, ctrl);
 999   Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq);
1000   phase->register_new_node(in_cset_fast_test_test, ctrl);
1001   IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
1002   phase->register_control(in_cset_fast_test_iff, loop, ctrl);
1003 
1004   not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff);
1005   phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff);
1006 
1007   ctrl = new IfFalseNode(in_cset_fast_test_iff);
1008   phase->register_control(ctrl, loop, in_cset_fast_test_iff);
1009 }
1010 
1011 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) {
1012   IdealLoopTree*loop = phase->get_loop(ctrl);
1013   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
1014 
1015   // The slow path stub consumes and produces raw memory in addition
1016   // to the existing memory edges
1017   Node* base = find_bottom_mem(ctrl, phase);
1018   MergeMemNode* mm = MergeMemNode::make(base);
1019   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1020   phase->register_new_node(mm, ctrl);
1021 
1022   address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
1023           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) :
1024           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier);
1025 
1026   address calladdr = is_native ? CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native)
1027                                : target;
1028   const char* name = is_native ? "load_reference_barrier_native" : "load_reference_barrier";
1029   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
1030 
1031   call->init_req(TypeFunc::Control, ctrl);
1032   call->init_req(TypeFunc::I_O, phase->C->top());
1033   call->init_req(TypeFunc::Memory, mm);
1034   call->init_req(TypeFunc::FramePtr, phase->C->top());
1035   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1036   call->init_req(TypeFunc::Parms, val);
1037   call->init_req(TypeFunc::Parms+1, load_addr);
1038   phase->register_control(call, loop, ctrl);
1039   ctrl = new ProjNode(call, TypeFunc::Control);
1040   phase->register_control(ctrl, loop, call);
1041   result_mem = new ProjNode(call, TypeFunc::Memory);
1042   phase->register_new_node(result_mem, call);
1043   val = new ProjNode(call, TypeFunc::Parms);
1044   phase->register_new_node(val, call);
1045   val = new CheckCastPPNode(ctrl, val, obj_type);
1046   phase->register_new_node(val, ctrl);
1047 }
1048 
1049 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1050   Node* ctrl = phase->get_ctrl(barrier);
1051   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1052 
1053   // Update the control of all nodes that should be after the
1054   // barrier control flow
1055   uses.clear();
1056   // Every node that is control dependent on the barrier's input
1057   // control will be after the expanded barrier. The raw memory (if
1058   // its memory is control dependent on the barrier's input control)
1059   // must stay above the barrier.
1060   uses_to_ignore.clear();
1061   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1062     uses_to_ignore.push(init_raw_mem);
1063   }
1064   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1065     Node *n = uses_to_ignore.at(next);
1066     for (uint i = 0; i < n->req(); i++) {
1067       Node* in = n->in(i);
1068       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1069         uses_to_ignore.push(in);
1070       }
1071     }
1072   }
1073   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1074     Node* u = ctrl->fast_out(i);
1075     if (u->_idx < last &&
1076         u != barrier &&
1077         !uses_to_ignore.member(u) &&
1078         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1079         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1080       Node* old_c = phase->ctrl_or_self(u);
1081       Node* c = old_c;
1082       if (c != ctrl ||
1083           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1084           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1085         phase->igvn().rehash_node_delayed(u);
1086         int nb = u->replace_edge(ctrl, region);
1087         if (u->is_CFG()) {
1088           if (phase->idom(u) == ctrl) {
1089             phase->set_idom(u, region, phase->dom_depth(region));
1090           }
1091         } else if (phase->get_ctrl(u) == ctrl) {
1092           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1093           uses.push(u);
1094         }
1095         assert(nb == 1, "more than 1 ctrl input?");
1096         --i, imax -= nb;
1097       }
1098     }
1099   }
1100 }
1101 
1102 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1103   Node* region = NULL;
1104   while (c != ctrl) {
1105     if (c->is_Region()) {
1106       region = c;
1107     }
1108     c = phase->idom(c);
1109   }
1110   assert(region != NULL, "");
1111   Node* phi = new PhiNode(region, n->bottom_type());
1112   for (uint j = 1; j < region->req(); j++) {
1113     Node* in = region->in(j);
1114     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1115       phi->init_req(j, n);
1116     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1117       phi->init_req(j, n_clone);
1118     } else {
1119       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1120     }
1121   }
1122   phase->register_new_node(phi, region);
1123   return phi;
1124 }
1125 
1126 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1127   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1128 
1129   Unique_Node_List uses;
1130   for (int i = 0; i < state->enqueue_barriers_count(); i++) {
1131     Node* barrier = state->enqueue_barrier(i);
1132     Node* ctrl = phase->get_ctrl(barrier);
1133     IdealLoopTree* loop = phase->get_loop(ctrl);
1134     if (loop->_head->is_OuterStripMinedLoop()) {
1135       // Expanding a barrier here will break loop strip mining
1136       // verification. Transform the loop so the loop nest doesn't
1137       // appear as strip mined.
1138       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1139       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1140     }
1141   }
1142 
1143   Node_Stack stack(0);
1144   Node_List clones;
1145   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1146     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1147     if (lrb->is_redundant()) {
1148       continue;
1149     }
1150 
1151     Node* ctrl = phase->get_ctrl(lrb);
1152     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1153 
1154     CallStaticJavaNode* unc = NULL;
1155     Node* unc_ctrl = NULL;
1156     Node* uncasted_val = val;
1157 
1158     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1159       Node* u = lrb->fast_out(i);
1160       if (u->Opcode() == Op_CastPP &&
1161           u->in(0) != NULL &&
1162           phase->is_dominator(u->in(0), ctrl)) {
1163         const Type* u_t = phase->igvn().type(u);
1164 
1165         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1166             u->in(0)->Opcode() == Op_IfTrue &&
1167             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1168             u->in(0)->in(0)->is_If() &&
1169             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1170             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1171             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1172             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1173             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1174           IdealLoopTree* loop = phase->get_loop(ctrl);
1175           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1176 
1177           if (!unc_loop->is_member(loop)) {
1178             continue;
1179           }
1180 
1181           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1182           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1183           if (branch == NodeSentinel) {
1184             continue;
1185           }
1186 
1187           phase->igvn().replace_input_of(u, 1, val);
1188           phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
1189           phase->set_ctrl(u, u->in(0));
1190           phase->set_ctrl(lrb, u->in(0));
1191           unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1192           unc_ctrl = u->in(0);
1193           val = u;
1194 
1195           for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
1196             Node* u = val->fast_out(j);
1197             if (u == lrb) continue;
1198             phase->igvn().rehash_node_delayed(u);
1199             int nb = u->replace_edge(val, lrb);
1200             --j; jmax -= nb;
1201           }
1202 
1203           RegionNode* r = new RegionNode(3);
1204           IfNode* iff = unc_ctrl->in(0)->as_If();
1205 
1206           Node* ctrl_use = unc_ctrl->unique_ctrl_out();
1207           Node* unc_ctrl_clone = unc_ctrl->clone();
1208           phase->register_control(unc_ctrl_clone, loop, iff);
1209           Node* c = unc_ctrl_clone;
1210           Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
1211           r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
1212 
1213           phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
1214           phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
1215           phase->lazy_replace(c, unc_ctrl);
1216           c = NULL;;
1217           phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
1218           phase->set_ctrl(val, unc_ctrl_clone);
1219 
1220           IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
1221           fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
1222           Node* iff_proj = iff->proj_out(0);
1223           r->init_req(2, iff_proj);
1224           phase->register_control(r, phase->ltree_root(), iff);
1225 
1226           Node* new_bol = new_iff->in(1)->clone();
1227           Node* new_cmp = new_bol->in(1)->clone();
1228           assert(new_cmp->Opcode() == Op_CmpP, "broken");
1229           assert(new_cmp->in(1) == val->in(1), "broken");
1230           new_bol->set_req(1, new_cmp);
1231           new_cmp->set_req(1, lrb);
1232           phase->register_new_node(new_bol, new_iff->in(0));
1233           phase->register_new_node(new_cmp, new_iff->in(0));
1234           phase->igvn().replace_input_of(new_iff, 1, new_bol);
1235           phase->igvn().replace_input_of(new_cast, 1, lrb);
1236 
1237           for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1238             Node* u = lrb->fast_out(i);
1239             if (u == new_cast || u == new_cmp) {
1240               continue;
1241             }
1242             phase->igvn().rehash_node_delayed(u);
1243             int nb = u->replace_edge(lrb, new_cast);
1244             assert(nb > 0, "no update?");
1245             --i; imax -= nb;
1246           }
1247 
1248           for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1249             Node* u = val->fast_out(i);
1250             if (u == lrb) {
1251               continue;
1252             }
1253             phase->igvn().rehash_node_delayed(u);
1254             int nb = u->replace_edge(val, new_cast);
1255             assert(nb > 0, "no update?");
1256             --i; imax -= nb;
1257           }
1258 
1259           ctrl = unc_ctrl_clone;
1260           phase->set_ctrl_and_loop(lrb, ctrl);
1261           break;
1262         }
1263       }
1264     }
1265     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1266       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1267       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1268         // The rethrow call may have too many projections to be
1269         // properly handled here. Given there's no reason for a
1270         // barrier to depend on the call, move it above the call
1271         stack.push(lrb, 0);
1272         do {
1273           Node* n = stack.node();
1274           uint idx = stack.index();
1275           if (idx < n->req()) {
1276             Node* in = n->in(idx);
1277             stack.set_index(idx+1);
1278             if (in != NULL) {
1279               if (phase->has_ctrl(in)) {
1280                 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1281 #ifdef ASSERT
1282                   for (uint i = 0; i < stack.size(); i++) {
1283                     assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1284                   }
1285 #endif
1286                   stack.push(in, 0);
1287                 }
1288               } else {
1289                 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1290               }
1291             }
1292           } else {
1293             phase->set_ctrl(n, call->in(0));
1294             stack.pop();
1295           }
1296         } while(stack.size() > 0);
1297         continue;
1298       }
1299       CallProjections projs;
1300       call->extract_projections(&projs, false, false);
1301 
1302       Node* lrb_clone = lrb->clone();
1303       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1304       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1305 
1306       stack.push(lrb, 0);
1307       clones.push(lrb_clone);
1308 
1309       do {
1310         assert(stack.size() == clones.size(), "");
1311         Node* n = stack.node();
1312 #ifdef ASSERT
1313         if (n->is_Load()) {
1314           Node* mem = n->in(MemNode::Memory);
1315           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1316             Node* u = mem->fast_out(j);
1317             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1318           }
1319         }
1320 #endif
1321         uint idx = stack.index();
1322         Node* n_clone = clones.at(clones.size()-1);
1323         if (idx < n->outcnt()) {
1324           Node* u = n->raw_out(idx);
1325           Node* c = phase->ctrl_or_self(u);
1326           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1327             stack.set_index(idx+1);
1328             assert(!u->is_CFG(), "");
1329             stack.push(u, 0);
1330             Node* u_clone = u->clone();
1331             int nb = u_clone->replace_edge(n, n_clone);
1332             assert(nb > 0, "should have replaced some uses");
1333             phase->register_new_node(u_clone, projs.catchall_catchproj);
1334             clones.push(u_clone);
1335             phase->set_ctrl(u, projs.fallthrough_catchproj);
1336           } else {
1337             bool replaced = false;
1338             if (u->is_Phi()) {
1339               for (uint k = 1; k < u->req(); k++) {
1340                 if (u->in(k) == n) {
1341                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1342                     phase->igvn().replace_input_of(u, k, n_clone);
1343                     replaced = true;
1344                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1345                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1346                     replaced = true;
1347                   }
1348                 }
1349               }
1350             } else {
1351               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1352                 phase->igvn().rehash_node_delayed(u);
1353                 int nb = u->replace_edge(n, n_clone);
1354                 assert(nb > 0, "should have replaced some uses");
1355                 replaced = true;
1356               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1357                 phase->igvn().rehash_node_delayed(u);
1358                 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1359                 assert(nb > 0, "should have replaced some uses");
1360                 replaced = true;
1361               }
1362             }
1363             if (!replaced) {
1364               stack.set_index(idx+1);
1365             }
1366           }
1367         } else {
1368           stack.pop();
1369           clones.pop();
1370         }
1371       } while (stack.size() > 0);
1372       assert(stack.size() == 0 && clones.size() == 0, "");
1373     }
1374   }
1375 
1376   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1377     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1378     if (lrb->is_redundant()) {
1379       continue;
1380     }
1381     Node* ctrl = phase->get_ctrl(lrb);
1382     IdealLoopTree* loop = phase->get_loop(ctrl);
1383     if (loop->_head->is_OuterStripMinedLoop()) {
1384       // Expanding a barrier here will break loop strip mining
1385       // verification. Transform the loop so the loop nest doesn't
1386       // appear as strip mined.
1387       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1388       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1389     }
1390   }
1391 
1392   // Expand load-reference-barriers
1393   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1394   Unique_Node_List uses_to_ignore;
1395   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1396     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1397     if (lrb->is_redundant()) {
1398       phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1399       continue;
1400     }
1401     uint last = phase->C->unique();
1402     Node* ctrl = phase->get_ctrl(lrb);
1403     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1404 
1405 
1406     Node* orig_ctrl = ctrl;
1407 
1408     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1409     Node* init_raw_mem = raw_mem;
1410     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1411 
1412     IdealLoopTree *loop = phase->get_loop(ctrl);
1413     CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
1414     Node* unc_ctrl = NULL;
1415     if (unc != NULL) {
1416       if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
1417         unc = NULL;
1418       } else {
1419         unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
1420       }
1421     }
1422 
1423     Node* uncasted_val = val;
1424     if (unc != NULL) {
1425       uncasted_val = val->in(1);
1426     }
1427 
1428     Node* heap_stable_ctrl = NULL;
1429     Node* null_ctrl = NULL;
1430 
1431     assert(val->bottom_type()->make_oopptr(), "need oop");
1432     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1433 
1434     enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT };
1435     Node* region = new RegionNode(PATH_LIMIT);
1436     Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
1437     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1438 
1439     // Stable path.
1440     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::HAS_FORWARDED);
1441     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1442 
1443     // Heap stable case
1444     region->init_req(_heap_stable, heap_stable_ctrl);
1445     val_phi->init_req(_heap_stable, uncasted_val);
1446     raw_mem_phi->init_req(_heap_stable, raw_mem);
1447 
1448     Node* reg2_ctrl = NULL;
1449     // Null case
1450     test_null(ctrl, val, null_ctrl, phase);
1451     if (null_ctrl != NULL) {
1452       reg2_ctrl = null_ctrl->in(0);
1453       region->init_req(_null_path, null_ctrl);
1454       val_phi->init_req(_null_path, uncasted_val);
1455       raw_mem_phi->init_req(_null_path, raw_mem);
1456     } else {
1457       region->del_req(_null_path);
1458       val_phi->del_req(_null_path);
1459       raw_mem_phi->del_req(_null_path);
1460     }
1461 
1462     // Test for in-cset.
1463     // Wires !in_cset(obj) to slot 2 of region and phis
1464     Node* not_cset_ctrl = NULL;
1465     in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
1466     if (not_cset_ctrl != NULL) {
1467       if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
1468       region->init_req(_not_cset, not_cset_ctrl);
1469       val_phi->init_req(_not_cset, uncasted_val);
1470       raw_mem_phi->init_req(_not_cset, raw_mem);
1471     }
1472 
1473     // Resolve object when orig-value is in cset.
1474     // Make the unconditional resolve for fwdptr.
1475     Node* new_val = uncasted_val;
1476     if (unc_ctrl != NULL) {
1477       // Clone the null check in this branch to allow implicit null check
1478       new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
1479       fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
1480 
1481       IfNode* iff = unc_ctrl->in(0)->as_If();
1482       phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
1483     }
1484 
1485     // Call lrb-stub and wire up that path in slots 4
1486     Node* result_mem = NULL;
1487 
1488     Node* fwd = new_val;
1489     Node* addr;
1490     if (ShenandoahSelfFixing) {
1491       VectorSet visited(Thread::current()->resource_area());
1492       addr = get_load_addr(phase, visited, lrb);
1493     } else {
1494       addr = phase->igvn().zerocon(T_OBJECT);
1495     }
1496     if (addr->Opcode() == Op_AddP) {
1497       Node* orig_base = addr->in(AddPNode::Base);
1498       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), true);
1499       phase->register_new_node(base, ctrl);
1500       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1501         // Field access
1502         addr = addr->clone();
1503         addr->set_req(AddPNode::Base, base);
1504         addr->set_req(AddPNode::Address, base);
1505         phase->register_new_node(addr, ctrl);
1506       } else {
1507         Node* addr2 = addr->in(AddPNode::Address);
1508         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1509               addr2->in(AddPNode::Base) == orig_base) {
1510           addr2 = addr2->clone();
1511           addr2->set_req(AddPNode::Base, base);
1512           addr2->set_req(AddPNode::Address, base);
1513           phase->register_new_node(addr2, ctrl);
1514           addr = addr->clone();
1515           addr->set_req(AddPNode::Base, base);
1516           addr->set_req(AddPNode::Address, addr2);
1517           phase->register_new_node(addr, ctrl);
1518         }
1519       }
1520     }
1521     call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, lrb->is_native(), phase);
1522     region->init_req(_evac_path, ctrl);
1523     val_phi->init_req(_evac_path, fwd);
1524     raw_mem_phi->init_req(_evac_path, result_mem);
1525 
1526     phase->register_control(region, loop, heap_stable_iff);
1527     Node* out_val = val_phi;
1528     phase->register_new_node(val_phi, region);
1529     phase->register_new_node(raw_mem_phi, region);
1530 
1531     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1532 
1533     ctrl = orig_ctrl;
1534 
1535     if (unc != NULL) {
1536       for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1537         Node* u = val->fast_out(i);
1538         Node* c = phase->ctrl_or_self(u);
1539         if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
1540           phase->igvn().rehash_node_delayed(u);
1541           int nb = u->replace_edge(val, out_val);
1542           --i, imax -= nb;
1543         }
1544       }
1545       if (val->outcnt() == 0) {
1546         phase->igvn()._worklist.push(val);
1547       }
1548     }
1549     phase->igvn().replace_node(lrb, out_val);
1550 
1551     follow_barrier_uses(out_val, ctrl, uses, phase);
1552 
1553     for(uint next = 0; next < uses.size(); next++ ) {
1554       Node *n = uses.at(next);
1555       assert(phase->get_ctrl(n) == ctrl, "bad control");
1556       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1557       phase->set_ctrl(n, region);
1558       follow_barrier_uses(n, ctrl, uses, phase);
1559     }
1560 
1561     // The slow path call produces memory: hook the raw memory phi
1562     // from the expanded load reference barrier with the rest of the graph
1563     // which may require adding memory phis at every post dominated
1564     // region and at enclosing loop heads. Use the memory state
1565     // collected in memory_nodes to fix the memory graph. Update that
1566     // memory state as we go.
1567     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1568   }
1569   // Done expanding load-reference-barriers.
1570   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1571 
1572   for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
1573     Node* barrier = state->enqueue_barrier(i);
1574     Node* pre_val = barrier->in(1);
1575 
1576     assert(!phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR), "no known-NULLs here");
1577 
1578     if (((ShenandoahEnqueueBarrierNode*)barrier)->can_eliminate(phase)) {
1579       phase->igvn().replace_node(barrier, pre_val);
1580       continue;
1581     }
1582 
1583     Node* ctrl = phase->get_ctrl(barrier);
1584 
1585     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1586       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1587       ctrl = ctrl->in(0)->in(0);
1588       phase->set_ctrl(barrier, ctrl);
1589     } else if (ctrl->is_CallRuntime()) {
1590       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1591       ctrl = ctrl->in(0);
1592       phase->set_ctrl(barrier, ctrl);
1593     }
1594 
1595     Node* init_ctrl = ctrl;
1596     IdealLoopTree* loop = phase->get_loop(ctrl);
1597     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1598     Node* init_raw_mem = raw_mem;
1599     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1600     Node* heap_stable_ctrl = NULL;
1601     Node* null_ctrl = NULL;
1602     uint last = phase->C->unique();
1603 
1604     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1605     Node* region = new RegionNode(PATH_LIMIT);
1606     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1607 
1608     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1609     Node* region2 = new RegionNode(PATH_LIMIT2);
1610     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1611 
1612     // Stable path.
1613     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::TRAVERSAL | ShenandoahHeap::MARKING);
1614     region->init_req(_heap_stable, heap_stable_ctrl);
1615     phi->init_req(_heap_stable, raw_mem);
1616 
1617     // Null path
1618     Node* reg2_ctrl = NULL;
1619     test_null(ctrl, pre_val, null_ctrl, phase);
1620     if (null_ctrl != NULL) {
1621       reg2_ctrl = null_ctrl->in(0);
1622       region2->init_req(_null_path, null_ctrl);
1623       phi2->init_req(_null_path, raw_mem);
1624     } else {
1625       region2->del_req(_null_path);
1626       phi2->del_req(_null_path);
1627     }
1628 
1629     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1630     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1631     Node* thread = new ThreadLocalNode();
1632     phase->register_new_node(thread, ctrl);
1633     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1634     phase->register_new_node(buffer_adr, ctrl);
1635     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1636     phase->register_new_node(index_adr, ctrl);
1637 
1638     BasicType index_bt = TypeX_X->basic_type();
1639     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
1640     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1641     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1642     phase->register_new_node(index, ctrl);
1643     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1644     phase->register_new_node(index_cmp, ctrl);
1645     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1646     phase->register_new_node(index_test, ctrl);
1647     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1648     if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1649     phase->register_control(queue_full_iff, loop, ctrl);
1650     Node* not_full = new IfTrueNode(queue_full_iff);
1651     phase->register_control(not_full, loop, queue_full_iff);
1652     Node* full = new IfFalseNode(queue_full_iff);
1653     phase->register_control(full, loop, queue_full_iff);
1654 
1655     ctrl = not_full;
1656 
1657     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1658     phase->register_new_node(next_index, ctrl);
1659 
1660     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1661     phase->register_new_node(buffer, ctrl);
1662     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1663     phase->register_new_node(log_addr, ctrl);
1664     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1665     phase->register_new_node(log_store, ctrl);
1666     // update the index
1667     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1668     phase->register_new_node(index_update, ctrl);
1669 
1670     // Fast-path case
1671     region2->init_req(_fast_path, ctrl);
1672     phi2->init_req(_fast_path, index_update);
1673 
1674     ctrl = full;
1675 
1676     Node* base = find_bottom_mem(ctrl, phase);
1677 
1678     MergeMemNode* mm = MergeMemNode::make(base);
1679     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1680     phase->register_new_node(mm, ctrl);
1681 
1682     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1683     call->init_req(TypeFunc::Control, ctrl);
1684     call->init_req(TypeFunc::I_O, phase->C->top());
1685     call->init_req(TypeFunc::Memory, mm);
1686     call->init_req(TypeFunc::FramePtr, phase->C->top());
1687     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1688     call->init_req(TypeFunc::Parms, pre_val);
1689     call->init_req(TypeFunc::Parms+1, thread);
1690     phase->register_control(call, loop, ctrl);
1691 
1692     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1693     phase->register_control(ctrl_proj, loop, call);
1694     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1695     phase->register_new_node(mem_proj, call);
1696 
1697     // Slow-path case
1698     region2->init_req(_slow_path, ctrl_proj);
1699     phi2->init_req(_slow_path, mem_proj);
1700 
1701     phase->register_control(region2, loop, reg2_ctrl);
1702     phase->register_new_node(phi2, region2);
1703 
1704     region->init_req(_heap_unstable, region2);
1705     phi->init_req(_heap_unstable, phi2);
1706 
1707     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1708     phase->register_new_node(phi, region);
1709 
1710     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1711     for(uint next = 0; next < uses.size(); next++ ) {
1712       Node *n = uses.at(next);
1713       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1714       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1715       phase->set_ctrl(n, region);
1716       follow_barrier_uses(n, init_ctrl, uses, phase);
1717     }
1718     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1719 
1720     phase->igvn().replace_node(barrier, pre_val);
1721   }
1722   assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1723 
1724 }
1725 
1726 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1727   if (visited.test_set(in->_idx)) {
1728     return NULL;
1729   }
1730   switch (in->Opcode()) {
1731     case Op_Proj:
1732       return get_load_addr(phase, visited, in->in(0));
1733     case Op_CastPP:
1734     case Op_CheckCastPP:
1735     case Op_DecodeN:
1736     case Op_EncodeP:
1737       return get_load_addr(phase, visited, in->in(1));
1738     case Op_LoadN:
1739     case Op_LoadP:
1740       return in->in(MemNode::Address);
1741     case Op_CompareAndExchangeN:
1742     case Op_CompareAndExchangeP:
1743     case Op_GetAndSetN:
1744     case Op_GetAndSetP:
1745     case Op_ShenandoahCompareAndExchangeP:
1746     case Op_ShenandoahCompareAndExchangeN:
1747       // Those instructions would just have stored a different
1748       // value into the field. No use to attempt to fix it at this point.
1749       return phase->igvn().zerocon(T_OBJECT);
1750     case Op_CMoveP:
1751     case Op_CMoveN: {
1752       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1753       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1754       // Handle unambiguous cases: single address reported on both branches.
1755       if (t != NULL && f == NULL) return t;
1756       if (t == NULL && f != NULL) return f;
1757       if (t != NULL && t == f)    return t;
1758       // Ambiguity.
1759       return phase->igvn().zerocon(T_OBJECT);
1760     }
1761     case Op_Phi: {
1762       Node* addr = NULL;
1763       for (uint i = 1; i < in->req(); i++) {
1764         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1765         if (addr == NULL) {
1766           addr = addr1;
1767         }
1768         if (addr != addr1) {
1769           return phase->igvn().zerocon(T_OBJECT);
1770         }
1771       }
1772       return addr;
1773     }
1774     case Op_ShenandoahLoadReferenceBarrier:
1775       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1776     case Op_ShenandoahEnqueueBarrier:
1777       return get_load_addr(phase, visited, in->in(1));
1778     case Op_CallDynamicJava:
1779     case Op_CallLeaf:
1780     case Op_CallStaticJava:
1781     case Op_ConN:
1782     case Op_ConP:
1783     case Op_Parm:
1784     case Op_CreateEx:
1785       return phase->igvn().zerocon(T_OBJECT);
1786     default:
1787 #ifdef ASSERT
1788       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1789 #endif
1790       return phase->igvn().zerocon(T_OBJECT);
1791   }
1792 
1793 }
1794 
1795 void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1796   IdealLoopTree *loop = phase->get_loop(iff);
1797   Node* loop_head = loop->_head;
1798   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1799 
1800   Node* bol = iff->in(1);
1801   Node* cmp = bol->in(1);
1802   Node* andi = cmp->in(1);
1803   Node* load = andi->in(1);
1804 
1805   assert(is_gc_state_load(load), "broken");
1806   if (!phase->is_dominator(load->in(0), entry_c)) {
1807     Node* mem_ctrl = NULL;
1808     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1809     load = load->clone();
1810     load->set_req(MemNode::Memory, mem);
1811     load->set_req(0, entry_c);
1812     phase->register_new_node(load, entry_c);
1813     andi = andi->clone();
1814     andi->set_req(1, load);
1815     phase->register_new_node(andi, entry_c);
1816     cmp = cmp->clone();
1817     cmp->set_req(1, andi);
1818     phase->register_new_node(cmp, entry_c);
1819     bol = bol->clone();
1820     bol->set_req(1, cmp);
1821     phase->register_new_node(bol, entry_c);
1822 
1823     Node* old_bol =iff->in(1);
1824     phase->igvn().replace_input_of(iff, 1, bol);
1825   }
1826 }
1827 
1828 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1829   if (!n->is_If() || n->is_CountedLoopEnd()) {
1830     return false;
1831   }
1832   Node* region = n->in(0);
1833 
1834   if (!region->is_Region()) {
1835     return false;
1836   }
1837   Node* dom = phase->idom(region);
1838   if (!dom->is_If()) {
1839     return false;
1840   }
1841 
1842   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1843     return false;
1844   }
1845 
1846   IfNode* dom_if = dom->as_If();
1847   Node* proj_true = dom_if->proj_out(1);
1848   Node* proj_false = dom_if->proj_out(0);
1849 
1850   for (uint i = 1; i < region->req(); i++) {
1851     if (phase->is_dominator(proj_true, region->in(i))) {
1852       continue;
1853     }
1854     if (phase->is_dominator(proj_false, region->in(i))) {
1855       continue;
1856     }
1857     return false;
1858   }
1859 
1860   return true;
1861 }
1862 
1863 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1864   assert(is_heap_stable_test(n), "no other tests");
1865   if (identical_backtoback_ifs(n, phase)) {
1866     Node* n_ctrl = n->in(0);
1867     if (phase->can_split_if(n_ctrl)) {
1868       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1869       if (is_heap_stable_test(n)) {
1870         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1871         assert(is_gc_state_load(gc_state_load), "broken");
1872         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1873         assert(is_gc_state_load(dom_gc_state_load), "broken");
1874         if (gc_state_load != dom_gc_state_load) {
1875           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1876         }
1877       }
1878       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1879       Node* proj_true = dom_if->proj_out(1);
1880       Node* proj_false = dom_if->proj_out(0);
1881       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1882       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1883 
1884       for (uint i = 1; i < n_ctrl->req(); i++) {
1885         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1886           bolphi->init_req(i, con_true);
1887         } else {
1888           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1889           bolphi->init_req(i, con_false);
1890         }
1891       }
1892       phase->register_new_node(bolphi, n_ctrl);
1893       phase->igvn().replace_input_of(n, 1, bolphi);
1894       phase->do_split_if(n);
1895     }
1896   }
1897 }
1898 
1899 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1900   // Find first invariant test that doesn't exit the loop
1901   LoopNode *head = loop->_head->as_Loop();
1902   IfNode* unswitch_iff = NULL;
1903   Node* n = head->in(LoopNode::LoopBackControl);
1904   int loop_has_sfpts = -1;
1905   while (n != head) {
1906     Node* n_dom = phase->idom(n);
1907     if (n->is_Region()) {
1908       if (n_dom->is_If()) {
1909         IfNode* iff = n_dom->as_If();
1910         if (iff->in(1)->is_Bool()) {
1911           BoolNode* bol = iff->in(1)->as_Bool();
1912           if (bol->in(1)->is_Cmp()) {
1913             // If condition is invariant and not a loop exit,
1914             // then found reason to unswitch.
1915             if (is_heap_stable_test(iff) &&
1916                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1917               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1918               if (loop_has_sfpts == -1) {
1919                 for(uint i = 0; i < loop->_body.size(); i++) {
1920                   Node *m = loop->_body[i];
1921                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1922                     loop_has_sfpts = 1;
1923                     break;
1924                   }
1925                 }
1926                 if (loop_has_sfpts == -1) {
1927                   loop_has_sfpts = 0;
1928                 }
1929               }
1930               if (!loop_has_sfpts) {
1931                 unswitch_iff = iff;
1932               }
1933             }
1934           }
1935         }
1936       }
1937     }
1938     n = n_dom;
1939   }
1940   return unswitch_iff;
1941 }
1942 
1943 
1944 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1945   Node_List heap_stable_tests;
1946   stack.push(phase->C->start(), 0);
1947   do {
1948     Node* n = stack.node();
1949     uint i = stack.index();
1950 
1951     if (i < n->outcnt()) {
1952       Node* u = n->raw_out(i);
1953       stack.set_index(i+1);
1954       if (!visited.test_set(u->_idx)) {
1955         stack.push(u, 0);
1956       }
1957     } else {
1958       stack.pop();
1959       if (n->is_If() && is_heap_stable_test(n)) {
1960         heap_stable_tests.push(n);
1961       }
1962     }
1963   } while (stack.size() > 0);
1964 
1965   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1966     Node* n = heap_stable_tests.at(i);
1967     assert(is_heap_stable_test(n), "only evacuation test");
1968     merge_back_to_back_tests(n, phase);
1969   }
1970 
1971   if (!phase->C->major_progress()) {
1972     VectorSet seen(Thread::current()->resource_area());
1973     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1974       Node* n = heap_stable_tests.at(i);
1975       IdealLoopTree* loop = phase->get_loop(n);
1976       if (loop != phase->ltree_root() &&
1977           loop->_child == NULL &&
1978           !loop->_irreducible) {
1979         Node* head = loop->_head;
1980         if (head->is_Loop() &&
1981             (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1982             !seen.test_set(head->_idx)) {
1983           IfNode* iff = find_unswitching_candidate(loop, phase);
1984           if (iff != NULL) {
1985             Node* bol = iff->in(1);
1986             if (head->as_Loop()->is_strip_mined()) {
1987               head->as_Loop()->verify_strip_mined(0);
1988             }
1989             move_heap_stable_test_out_of_loop(iff, phase);
1990 
1991             AutoNodeBudget node_budget(phase);
1992 
1993             if (loop->policy_unswitching(phase)) {
1994               if (head->as_Loop()->is_strip_mined()) {
1995                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1996                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1997               }
1998               phase->do_unswitching(loop, old_new);
1999             } else {
2000               // Not proceeding with unswitching. Move load back in
2001               // the loop.
2002               phase->igvn().replace_input_of(iff, 1, bol);
2003             }
2004           }
2005         }
2006       }
2007     }
2008   }
2009 }
2010 
2011 #ifdef ASSERT
2012 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
2013   const bool trace = false;
2014   ResourceMark rm;
2015   Unique_Node_List nodes;
2016   Unique_Node_List controls;
2017   Unique_Node_List memories;
2018 
2019   nodes.push(root);
2020   for (uint next = 0; next < nodes.size(); next++) {
2021     Node *n  = nodes.at(next);
2022     if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
2023       controls.push(n);
2024       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
2025       for (uint next2 = 0; next2 < controls.size(); next2++) {
2026         Node *m = controls.at(next2);
2027         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2028           Node* u = m->fast_out(i);
2029           if (u->is_CFG() && !u->is_Root() &&
2030               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
2031               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
2032             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
2033             controls.push(u);
2034           }
2035         }
2036       }
2037       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
2038       for (uint next2 = 0; next2 < memories.size(); next2++) {
2039         Node *m = memories.at(next2);
2040         assert(m->bottom_type() == Type::MEMORY, "");
2041         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2042           Node* u = m->fast_out(i);
2043           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
2044             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2045             memories.push(u);
2046           } else if (u->is_LoadStore()) {
2047             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
2048             memories.push(u->find_out_with(Op_SCMemProj));
2049           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
2050             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2051             memories.push(u);
2052           } else if (u->is_Phi()) {
2053             assert(u->bottom_type() == Type::MEMORY, "");
2054             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
2055               assert(controls.member(u->in(0)), "");
2056               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2057               memories.push(u);
2058             }
2059           } else if (u->is_SafePoint() || u->is_MemBar()) {
2060             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2061               Node* uu = u->fast_out(j);
2062               if (uu->bottom_type() == Type::MEMORY) {
2063                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
2064                 memories.push(uu);
2065               }
2066             }
2067           }
2068         }
2069       }
2070       for (uint next2 = 0; next2 < controls.size(); next2++) {
2071         Node *m = controls.at(next2);
2072         if (m->is_Region()) {
2073           bool all_in = true;
2074           for (uint i = 1; i < m->req(); i++) {
2075             if (!controls.member(m->in(i))) {
2076               all_in = false;
2077               break;
2078             }
2079           }
2080           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
2081           bool found_phi = false;
2082           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
2083             Node* u = m->fast_out(j);
2084             if (u->is_Phi() && memories.member(u)) {
2085               found_phi = true;
2086               for (uint i = 1; i < u->req() && found_phi; i++) {
2087                 Node* k = u->in(i);
2088                 if (memories.member(k) != controls.member(m->in(i))) {
2089                   found_phi = false;
2090                 }
2091               }
2092             }
2093           }
2094           assert(found_phi || all_in, "");
2095         }
2096       }
2097       controls.clear();
2098       memories.clear();
2099     }
2100     for( uint i = 0; i < n->len(); ++i ) {
2101       Node *m = n->in(i);
2102       if (m != NULL) {
2103         nodes.push(m);
2104       }
2105     }
2106   }
2107 }
2108 #endif
2109 
2110 ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
2111   ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
2112 }
2113 
2114 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
2115   if (in(1) == NULL || in(1)->is_top()) {
2116     return Type::TOP;
2117   }
2118   const Type* t = in(1)->bottom_type();
2119   if (t == TypePtr::NULL_PTR) {
2120     return t;
2121   }
2122   return t->is_oopptr();
2123 }
2124 
2125 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
2126   if (in(1) == NULL) {
2127     return Type::TOP;
2128   }
2129   const Type* t = phase->type(in(1));
2130   if (t == Type::TOP) {
2131     return Type::TOP;
2132   }
2133   if (t == TypePtr::NULL_PTR) {
2134     return t;
2135   }
2136   return t->is_oopptr();
2137 }
2138 
2139 int ShenandoahEnqueueBarrierNode::needed(Node* n) {
2140   if (n == NULL ||
2141       n->is_Allocate() ||
2142       n->Opcode() == Op_ShenandoahEnqueueBarrier ||
2143       n->bottom_type() == TypePtr::NULL_PTR ||
2144       (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2145     return NotNeeded;
2146   }
2147   if (n->is_Phi() ||
2148       n->is_CMove()) {
2149     return MaybeNeeded;
2150   }
2151   return Needed;
2152 }
2153 
2154 Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
2155   for (;;) {
2156     if (n == NULL) {
2157       return n;
2158     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2159       return n;
2160     } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2161       return n;
2162     } else if (n->is_ConstraintCast() ||
2163                n->Opcode() == Op_DecodeN ||
2164                n->Opcode() == Op_EncodeP) {
2165       n = n->in(1);
2166     } else if (n->is_Proj()) {
2167       n = n->in(0);
2168     } else {
2169       return n;
2170     }
2171   }
2172   ShouldNotReachHere();
2173   return NULL;
2174 }
2175 
2176 bool ShenandoahEnqueueBarrierNode::can_eliminate(PhaseIdealLoop* phase) {
2177   return ShenandoahHeap::heap()->traversal_gc() == NULL &&
2178          is_redundant() && ShenandoahAggressiveReferenceDiscovery;
2179 }
2180 
2181 bool ShenandoahEnqueueBarrierNode::is_redundant() {
2182   Unique_Node_List visited;
2183   Node_Stack stack(0);
2184   stack.push(this, 0);
2185 
2186   while (stack.size() > 0) {
2187     Node* n = stack.node();
2188     if (visited.member(n)) {
2189       stack.pop();
2190       continue;
2191     }
2192     visited.push(n);
2193     bool visit_users = false;
2194     switch (n->Opcode()) {
2195       case Op_CallStaticJava:
2196         if (n->as_CallStaticJava()->uncommon_trap_request() == 0) {
2197           return false;
2198         }
2199         break;
2200       case Op_CallDynamicJava:
2201       case Op_CompareAndExchangeN:
2202       case Op_CompareAndExchangeP:
2203       case Op_CompareAndSwapN:
2204       case Op_CompareAndSwapP:
2205       case Op_ShenandoahCompareAndSwapN:
2206       case Op_ShenandoahCompareAndSwapP:
2207       case Op_GetAndSetN:
2208       case Op_GetAndSetP:
2209       case Op_Return:
2210       case Op_StoreN:
2211       case Op_StoreP:
2212         return false;
2213         break;
2214       case Op_AddP:
2215       case Op_Allocate:
2216       case Op_AllocateArray:
2217       case Op_ArrayCopy:
2218       case Op_CmpP:
2219       case Op_LoadL:
2220       case Op_SafePoint:
2221       case Op_SubTypeCheck:
2222       case Op_StoreLConditional:
2223       case Op_StoreIConditional:
2224       case Op_FastUnlock:
2225         break;
2226       case Op_CastPP:
2227       case Op_CheckCastPP:
2228       case Op_CMoveN:
2229       case Op_CMoveP:
2230       case Op_EncodeP:
2231       case Op_Phi:
2232       case Op_ShenandoahEnqueueBarrier:
2233         visit_users = true;
2234         break;
2235       default: {
2236 #ifdef ASSERT
2237         fatal("Unknown node in is_redundant: %s", NodeClassNames[n->Opcode()]);
2238 #endif
2239         // Default to useful: better to have excess barriers, rather than miss some.
2240         return false;
2241       }
2242     }
2243 
2244     stack.pop();
2245     if (visit_users) {
2246       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2247         Node* user = n->fast_out(i);
2248         if (user != NULL) {
2249           stack.push(user, 0);
2250         }
2251       }
2252     }
2253   }
2254   return true;
2255 }
2256 
2257 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
2258   PhaseIterGVN* igvn = phase->is_IterGVN();
2259 
2260   Node* n = next(in(1));
2261 
2262   int cont = needed(n);
2263 
2264   if (cont == NotNeeded) {
2265     return in(1);
2266   } else if (cont == MaybeNeeded) {
2267     if (igvn == NULL) {
2268       phase->record_for_igvn(this);
2269       return this;
2270     } else {
2271       ResourceMark rm;
2272       Unique_Node_List wq;
2273       uint wq_i = 0;
2274 
2275       for (;;) {
2276         if (n->is_Phi()) {
2277           for (uint i = 1; i < n->req(); i++) {
2278             Node* m = n->in(i);
2279             if (m != NULL) {
2280               wq.push(m);
2281             }
2282           }
2283         } else {
2284           assert(n->is_CMove(), "nothing else here");
2285           Node* m = n->in(CMoveNode::IfFalse);
2286           wq.push(m);
2287           m = n->in(CMoveNode::IfTrue);
2288           wq.push(m);
2289         }
2290         Node* orig_n = NULL;
2291         do {
2292           if (wq_i >= wq.size()) {
2293             return in(1);
2294           }
2295           n = wq.at(wq_i);
2296           wq_i++;
2297           orig_n = n;
2298           n = next(n);
2299           cont = needed(n);
2300           if (cont == Needed) {
2301             return this;
2302           }
2303         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2304       }
2305     }
2306   }
2307 
2308   return this;
2309 }
2310 
2311 #ifdef ASSERT
2312 static bool has_never_branch(Node* root) {
2313   for (uint i = 1; i < root->req(); i++) {
2314     Node* in = root->in(i);
2315     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2316       return true;
2317     }
2318   }
2319   return false;
2320 }
2321 #endif
2322 
2323 void MemoryGraphFixer::collect_memory_nodes() {
2324   Node_Stack stack(0);
2325   VectorSet visited(Thread::current()->resource_area());
2326   Node_List regions;
2327 
2328   // Walk the raw memory graph and create a mapping from CFG node to
2329   // memory node. Exclude phis for now.
2330   stack.push(_phase->C->root(), 1);
2331   do {
2332     Node* n = stack.node();
2333     int opc = n->Opcode();
2334     uint i = stack.index();
2335     if (i < n->req()) {
2336       Node* mem = NULL;
2337       if (opc == Op_Root) {
2338         Node* in = n->in(i);
2339         int in_opc = in->Opcode();
2340         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2341           mem = in->in(TypeFunc::Memory);
2342         } else if (in_opc == Op_Halt) {
2343           if (in->in(0)->is_Region()) {
2344             Node* r = in->in(0);
2345             for (uint j = 1; j < r->req(); j++) {
2346               assert(r->in(j)->Opcode() != Op_NeverBranch, "");
2347             }
2348           } else {
2349             Node* proj = in->in(0);
2350             assert(proj->is_Proj(), "");
2351             Node* in = proj->in(0);
2352             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2353             if (in->is_CallStaticJava()) {
2354               mem = in->in(TypeFunc::Memory);
2355             } else if (in->Opcode() == Op_Catch) {
2356               Node* call = in->in(0)->in(0);
2357               assert(call->is_Call(), "");
2358               mem = call->in(TypeFunc::Memory);
2359             } else if (in->Opcode() == Op_NeverBranch) {
2360               Node* head = in->in(0);
2361               assert(head->is_Region() && head->req() == 3, "unexpected infinite loop graph shape");
2362               assert(_phase->is_dominator(head, head->in(1)) || _phase->is_dominator(head, head->in(2)), "no back branch?");
2363               Node* tail = _phase->is_dominator(head, head->in(1)) ? head->in(1) : head->in(2);
2364               Node* c = tail;
2365               while (c != head) {
2366                 if (c->is_SafePoint() && !c->is_CallLeaf()) {
2367                   mem = c->in(TypeFunc::Memory);
2368                 }
2369                 c = _phase->idom(c);
2370               }
2371               assert(mem != NULL, "should have found safepoint");
2372 
2373               Node* phi_mem = NULL;
2374               for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
2375                 Node* u = head->fast_out(j);
2376                 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
2377                   if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2378                     assert(phi_mem == NULL || phi_mem->adr_type() == TypePtr::BOTTOM, "");
2379                     phi_mem = u;
2380                   } else if (u->adr_type() == TypePtr::BOTTOM) {
2381                     assert(phi_mem == NULL || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
2382                     if (phi_mem == NULL) {
2383                       phi_mem = u;
2384                     }
2385                   }
2386                 }
2387               }
2388               if (phi_mem != NULL) {
2389                 mem = phi_mem;
2390               }
2391             }
2392           }
2393         } else {
2394 #ifdef ASSERT
2395           n->dump();
2396           in->dump();
2397 #endif
2398           ShouldNotReachHere();
2399         }
2400       } else {
2401         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2402         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2403         mem = n->in(i);
2404       }
2405       i++;
2406       stack.set_index(i);
2407       if (mem == NULL) {
2408         continue;
2409       }
2410       for (;;) {
2411         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2412           break;
2413         }
2414         if (mem->is_Phi()) {
2415           stack.push(mem, 2);
2416           mem = mem->in(1);
2417         } else if (mem->is_Proj()) {
2418           stack.push(mem, mem->req());
2419           mem = mem->in(0);
2420         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2421           mem = mem->in(TypeFunc::Memory);
2422         } else if (mem->is_MergeMem()) {
2423           MergeMemNode* mm = mem->as_MergeMem();
2424           mem = mm->memory_at(_alias);
2425         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2426           assert(_alias == Compile::AliasIdxRaw, "");
2427           stack.push(mem, mem->req());
2428           mem = mem->in(MemNode::Memory);
2429         } else {
2430 #ifdef ASSERT
2431           mem->dump();
2432 #endif
2433           ShouldNotReachHere();
2434         }
2435       }
2436     } else {
2437       if (n->is_Phi()) {
2438         // Nothing
2439       } else if (!n->is_Root()) {
2440         Node* c = get_ctrl(n);
2441         _memory_nodes.map(c->_idx, n);
2442       }
2443       stack.pop();
2444     }
2445   } while(stack.is_nonempty());
2446 
2447   // Iterate over CFG nodes in rpo and propagate memory state to
2448   // compute memory state at regions, creating new phis if needed.
2449   Node_List rpo_list;
2450   visited.clear();
2451   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2452   Node* root = rpo_list.pop();
2453   assert(root == _phase->C->root(), "");
2454 
2455   const bool trace = false;
2456 #ifdef ASSERT
2457   if (trace) {
2458     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2459       Node* c = rpo_list.at(i);
2460       if (_memory_nodes[c->_idx] != NULL) {
2461         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2462       }
2463     }
2464   }
2465 #endif
2466   uint last = _phase->C->unique();
2467 
2468 #ifdef ASSERT
2469   uint8_t max_depth = 0;
2470   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2471     IdealLoopTree* lpt = iter.current();
2472     max_depth = MAX2(max_depth, lpt->_nest);
2473   }
2474 #endif
2475 
2476   bool progress = true;
2477   int iteration = 0;
2478   Node_List dead_phis;
2479   while (progress) {
2480     progress = false;
2481     iteration++;
2482     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2483     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2484     IdealLoopTree* last_updated_ilt = NULL;
2485     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2486       Node* c = rpo_list.at(i);
2487 
2488       Node* prev_mem = _memory_nodes[c->_idx];
2489       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2490         Node* prev_region = regions[c->_idx];
2491         Node* unique = NULL;
2492         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2493           Node* m = _memory_nodes[c->in(j)->_idx];
2494           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2495           if (m != NULL) {
2496             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2497               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), "");
2498               // continue
2499             } else if (unique == NULL) {
2500               unique = m;
2501             } else if (m == unique) {
2502               // continue
2503             } else {
2504               unique = NodeSentinel;
2505             }
2506           }
2507         }
2508         assert(unique != NULL, "empty phi???");
2509         if (unique != NodeSentinel) {
2510           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2511             dead_phis.push(prev_region);
2512           }
2513           regions.map(c->_idx, unique);
2514         } else {
2515           Node* phi = NULL;
2516           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2517             phi = prev_region;
2518             for (uint k = 1; k < c->req(); k++) {
2519               Node* m = _memory_nodes[c->in(k)->_idx];
2520               assert(m != NULL, "expect memory state");
2521               phi->set_req(k, m);
2522             }
2523           } else {
2524             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2525               Node* u = c->fast_out(j);
2526               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2527                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2528                 phi = u;
2529                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2530                   Node* m = _memory_nodes[c->in(k)->_idx];
2531                   assert(m != NULL, "expect memory state");
2532                   if (u->in(k) != m) {
2533                     phi = NULL;
2534                   }
2535                 }
2536               }
2537             }
2538             if (phi == NULL) {
2539               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2540               for (uint k = 1; k < c->req(); k++) {
2541                 Node* m = _memory_nodes[c->in(k)->_idx];
2542                 assert(m != NULL, "expect memory state");
2543                 phi->init_req(k, m);
2544               }
2545             }
2546           }
2547           assert(phi != NULL, "");
2548           regions.map(c->_idx, phi);
2549         }
2550         Node* current_region = regions[c->_idx];
2551         if (current_region != prev_region) {
2552           progress = true;
2553           if (prev_region == prev_mem) {
2554             _memory_nodes.map(c->_idx, current_region);
2555           }
2556         }
2557       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2558         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2559         assert(m != NULL, "expect memory state");
2560         if (m != prev_mem) {
2561           _memory_nodes.map(c->_idx, m);
2562           progress = true;
2563         }
2564       }
2565 #ifdef ASSERT
2566       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2567 #endif
2568     }
2569   }
2570 
2571   // Replace existing phi with computed memory state for that region
2572   // if different (could be a new phi or a dominating memory node if
2573   // that phi was found to be useless).
2574   while (dead_phis.size() > 0) {
2575     Node* n = dead_phis.pop();
2576     n->replace_by(_phase->C->top());
2577     n->destruct();
2578   }
2579   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2580     Node* c = rpo_list.at(i);
2581     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2582       Node* n = regions[c->_idx];
2583       if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2584         _phase->register_new_node(n, c);
2585       }
2586     }
2587   }
2588   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2589     Node* c = rpo_list.at(i);
2590     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2591       Node* n = regions[c->_idx];
2592       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2593         Node* u = c->fast_out(i);
2594         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2595             u != n) {
2596           if (u->adr_type() == TypePtr::BOTTOM) {
2597             fix_memory_uses(u, n, n, c);
2598           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2599             _phase->lazy_replace(u, n);
2600             --i; --imax;
2601           }
2602         }
2603       }
2604     }
2605   }
2606 }
2607 
2608 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2609   Node* c = _phase->get_ctrl(n);
2610   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2611     assert(c == n->in(0), "");
2612     CallNode* call = c->as_Call();
2613     CallProjections projs;
2614     call->extract_projections(&projs, true, false);
2615     if (projs.catchall_memproj != NULL) {
2616       if (projs.fallthrough_memproj == n) {
2617         c = projs.fallthrough_catchproj;
2618       } else {
2619         assert(projs.catchall_memproj == n, "");
2620         c = projs.catchall_catchproj;
2621       }
2622     }
2623   }
2624   return c;
2625 }
2626 
2627 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2628   if (_phase->has_ctrl(n))
2629     return get_ctrl(n);
2630   else {
2631     assert (n->is_CFG(), "must be a CFG node");
2632     return n;
2633   }
2634 }
2635 
2636 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2637   return m != NULL && get_ctrl(m) == c;
2638 }
2639 
2640 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2641   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2642   Node* mem = _memory_nodes[ctrl->_idx];
2643   Node* c = ctrl;
2644   while (!mem_is_valid(mem, c) &&
2645          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2646     c = _phase->idom(c);
2647     mem = _memory_nodes[c->_idx];
2648   }
2649   if (n != NULL && mem_is_valid(mem, c)) {
2650     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2651       mem = next_mem(mem, _alias);
2652     }
2653     if (mem->is_MergeMem()) {
2654       mem = mem->as_MergeMem()->memory_at(_alias);
2655     }
2656     if (!mem_is_valid(mem, c)) {
2657       do {
2658         c = _phase->idom(c);
2659         mem = _memory_nodes[c->_idx];
2660       } while (!mem_is_valid(mem, c) &&
2661                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2662     }
2663   }
2664   assert(mem->bottom_type() == Type::MEMORY, "");
2665   return mem;
2666 }
2667 
2668 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2669   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2670     Node* use = region->fast_out(i);
2671     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2672         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2673       return true;
2674     }
2675   }
2676   return false;
2677 }
2678 
2679 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2680   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2681   const bool trace = false;
2682   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2683   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2684   GrowableArray<Node*> phis;
2685   if (mem_for_ctrl != mem) {
2686     Node* old = mem_for_ctrl;
2687     Node* prev = NULL;
2688     while (old != mem) {
2689       prev = old;
2690       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2691         assert(_alias == Compile::AliasIdxRaw, "");
2692         old = old->in(MemNode::Memory);
2693       } else if (old->Opcode() == Op_SCMemProj) {
2694         assert(_alias == Compile::AliasIdxRaw, "");
2695         old = old->in(0);
2696       } else {
2697         ShouldNotReachHere();
2698       }
2699     }
2700     assert(prev != NULL, "");
2701     if (new_ctrl != ctrl) {
2702       _memory_nodes.map(ctrl->_idx, mem);
2703       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2704     }
2705     uint input = (uint)MemNode::Memory;
2706     _phase->igvn().replace_input_of(prev, input, new_mem);
2707   } else {
2708     uses.clear();
2709     _memory_nodes.map(new_ctrl->_idx, new_mem);
2710     uses.push(new_ctrl);
2711     for(uint next = 0; next < uses.size(); next++ ) {
2712       Node *n = uses.at(next);
2713       assert(n->is_CFG(), "");
2714       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2715       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2716         Node* u = n->fast_out(i);
2717         if (!u->is_Root() && u->is_CFG() && u != n) {
2718           Node* m = _memory_nodes[u->_idx];
2719           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2720               !has_mem_phi(u) &&
2721               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2722             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2723             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2724 
2725             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2726               bool push = true;
2727               bool create_phi = true;
2728               if (_phase->is_dominator(new_ctrl, u)) {
2729                 create_phi = false;
2730               } else if (!_phase->C->has_irreducible_loop()) {
2731                 IdealLoopTree* loop = _phase->get_loop(ctrl);
2732                 bool do_check = true;
2733                 IdealLoopTree* l = loop;
2734                 create_phi = false;
2735                 while (l != _phase->ltree_root()) {
2736                   Node* head = l->_head;
2737                   if (head->in(0) == NULL) {
2738                     head = _phase->get_ctrl(head);
2739                   }
2740                   if (_phase->is_dominator(head, u) && _phase->is_dominator(_phase->idom(u), head)) {
2741                     create_phi = true;
2742                     do_check = false;
2743                     break;
2744                   }
2745                   l = l->_parent;
2746                 }
2747 
2748                 if (do_check) {
2749                   assert(!create_phi, "");
2750                   IdealLoopTree* u_loop = _phase->get_loop(u);
2751                   if (u_loop != _phase->ltree_root() && u_loop->is_member(loop)) {
2752                     Node* c = ctrl;
2753                     while (!_phase->is_dominator(c, u_loop->tail())) {
2754                       c = _phase->idom(c);
2755                     }
2756                     if (!_phase->is_dominator(c, u)) {
2757                       do_check = false;
2758                     }
2759                   }
2760                 }
2761 
2762                 if (do_check && _phase->is_dominator(_phase->idom(u), new_ctrl)) {
2763                   create_phi = true;
2764                 }
2765               }
2766               if (create_phi) {
2767                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2768                 _phase->register_new_node(phi, u);
2769                 phis.push(phi);
2770                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2771                 if (!mem_is_valid(m, u)) {
2772                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2773                   _memory_nodes.map(u->_idx, phi);
2774                 } else {
2775                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2776                   for (;;) {
2777                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2778                     Node* next = NULL;
2779                     if (m->is_Proj()) {
2780                       next = m->in(0);
2781                     } else {
2782                       assert(m->is_Mem() || m->is_LoadStore(), "");
2783                       assert(_alias == Compile::AliasIdxRaw, "");
2784                       next = m->in(MemNode::Memory);
2785                     }
2786                     if (_phase->get_ctrl(next) != u) {
2787                       break;
2788                     }
2789                     if (next->is_MergeMem()) {
2790                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2791                       break;
2792                     }
2793                     if (next->is_Phi()) {
2794                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2795                       break;
2796                     }
2797                     m = next;
2798                   }
2799 
2800                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2801                   assert(m->is_Mem() || m->is_LoadStore(), "");
2802                   uint input = (uint)MemNode::Memory;
2803                   _phase->igvn().replace_input_of(m, input, phi);
2804                   push = false;
2805                 }
2806               } else {
2807                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2808               }
2809               if (push) {
2810                 uses.push(u);
2811               }
2812             }
2813           } else if (!mem_is_valid(m, u) &&
2814                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2815             uses.push(u);
2816           }
2817         }
2818       }
2819     }
2820     for (int i = 0; i < phis.length(); i++) {
2821       Node* n = phis.at(i);
2822       Node* r = n->in(0);
2823       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2824       for (uint j = 1; j < n->req(); j++) {
2825         Node* m = find_mem(r->in(j), NULL);
2826         _phase->igvn().replace_input_of(n, j, m);
2827         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2828       }
2829     }
2830   }
2831   uint last = _phase->C->unique();
2832   MergeMemNode* mm = NULL;
2833   int alias = _alias;
2834   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2835   // Process loads first to not miss an anti-dependency: if the memory
2836   // edge of a store is updated before a load is processed then an
2837   // anti-dependency may be missed.
2838   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2839     Node* u = mem->out(i);
2840     if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2841       Node* m = find_mem(_phase->get_ctrl(u), u);
2842       if (m != mem) {
2843         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2844         _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2845         --i;
2846       }
2847     }
2848   }
2849   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2850     Node* u = mem->out(i);
2851     if (u->_idx < last) {
2852       if (u->is_Mem()) {
2853         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2854           Node* m = find_mem(_phase->get_ctrl(u), u);
2855           if (m != mem) {
2856             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2857             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2858             --i;
2859           }
2860         }
2861       } else if (u->is_MergeMem()) {
2862         MergeMemNode* u_mm = u->as_MergeMem();
2863         if (u_mm->memory_at(alias) == mem) {
2864           MergeMemNode* newmm = NULL;
2865           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2866             Node* uu = u->fast_out(j);
2867             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2868             if (uu->is_Phi()) {
2869               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2870               Node* region = uu->in(0);
2871               int nb = 0;
2872               for (uint k = 1; k < uu->req(); k++) {
2873                 if (uu->in(k) == u) {
2874                   Node* m = find_mem(region->in(k), NULL);
2875                   if (m != mem) {
2876                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2877                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2878                     if (newmm != u) {
2879                       _phase->igvn().replace_input_of(uu, k, newmm);
2880                       nb++;
2881                       --jmax;
2882                     }
2883                   }
2884                 }
2885               }
2886               if (nb > 0) {
2887                 --j;
2888               }
2889             } else {
2890               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2891               if (m != mem) {
2892                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2893                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2894                 if (newmm != u) {
2895                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2896                   --j, --jmax;
2897                 }
2898               }
2899             }
2900           }
2901         }
2902       } else if (u->is_Phi()) {
2903         assert(u->bottom_type() == Type::MEMORY, "what else?");
2904         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2905           Node* region = u->in(0);
2906           bool replaced = false;
2907           for (uint j = 1; j < u->req(); j++) {
2908             if (u->in(j) == mem) {
2909               Node* m = find_mem(region->in(j), NULL);
2910               Node* nnew = m;
2911               if (m != mem) {
2912                 if (u->adr_type() == TypePtr::BOTTOM) {
2913                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2914                   nnew = mm;
2915                 }
2916                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2917                 _phase->igvn().replace_input_of(u, j, nnew);
2918                 replaced = true;
2919               }
2920             }
2921           }
2922           if (replaced) {
2923             --i;
2924           }
2925         }
2926       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2927                  u->adr_type() == NULL) {
2928         assert(u->adr_type() != NULL ||
2929                u->Opcode() == Op_Rethrow ||
2930                u->Opcode() == Op_Return ||
2931                u->Opcode() == Op_SafePoint ||
2932                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2933                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2934                u->Opcode() == Op_CallLeaf, "");
2935         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2936         if (m != mem) {
2937           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2938           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2939           --i;
2940         }
2941       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2942         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2943         if (m != mem) {
2944           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2945           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2946           --i;
2947         }
2948       } else if (u->adr_type() != TypePtr::BOTTOM &&
2949                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2950         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2951         assert(m != mem, "");
2952         // u is on the wrong slice...
2953         assert(u->is_ClearArray(), "");
2954         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2955         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2956         --i;
2957       }
2958     }
2959   }
2960 #ifdef ASSERT
2961   assert(new_mem->outcnt() > 0, "");
2962   for (int i = 0; i < phis.length(); i++) {
2963     Node* n = phis.at(i);
2964     assert(n->outcnt() > 0, "new phi must have uses now");
2965   }
2966 #endif
2967 }
2968 
2969 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2970   MergeMemNode* mm = MergeMemNode::make(mem);
2971   mm->set_memory_at(_alias, rep_proj);
2972   _phase->register_new_node(mm, rep_ctrl);
2973   return mm;
2974 }
2975 
2976 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2977   MergeMemNode* newmm = NULL;
2978   MergeMemNode* u_mm = u->as_MergeMem();
2979   Node* c = _phase->get_ctrl(u);
2980   if (_phase->is_dominator(c, rep_ctrl)) {
2981     c = rep_ctrl;
2982   } else {
2983     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2984   }
2985   if (u->outcnt() == 1) {
2986     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2987       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2988       --i;
2989     } else {
2990       _phase->igvn().rehash_node_delayed(u);
2991       u_mm->set_memory_at(_alias, rep_proj);
2992     }
2993     newmm = u_mm;
2994     _phase->set_ctrl_and_loop(u, c);
2995   } else {
2996     // can't simply clone u and then change one of its input because
2997     // it adds and then removes an edge which messes with the
2998     // DUIterator
2999     newmm = MergeMemNode::make(u_mm->base_memory());
3000     for (uint j = 0; j < u->req(); j++) {
3001       if (j < newmm->req()) {
3002         if (j == (uint)_alias) {
3003           newmm->set_req(j, rep_proj);
3004         } else if (newmm->in(j) != u->in(j)) {
3005           newmm->set_req(j, u->in(j));
3006         }
3007       } else if (j == (uint)_alias) {
3008         newmm->add_req(rep_proj);
3009       } else {
3010         newmm->add_req(u->in(j));
3011       }
3012     }
3013     if ((uint)_alias >= u->req()) {
3014       newmm->set_memory_at(_alias, rep_proj);
3015     }
3016     _phase->register_new_node(newmm, c);
3017   }
3018   return newmm;
3019 }
3020 
3021 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
3022   if (phi->adr_type() == TypePtr::BOTTOM) {
3023     Node* region = phi->in(0);
3024     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
3025       Node* uu = region->fast_out(j);
3026       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
3027         return false;
3028       }
3029     }
3030     return true;
3031   }
3032   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
3033 }
3034 
3035 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
3036   uint last = _phase-> C->unique();
3037   MergeMemNode* mm = NULL;
3038   assert(mem->bottom_type() == Type::MEMORY, "");
3039   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
3040     Node* u = mem->out(i);
3041     if (u != replacement && u->_idx < last) {
3042       if (u->is_MergeMem()) {
3043         MergeMemNode* u_mm = u->as_MergeMem();
3044         if (u_mm->memory_at(_alias) == mem) {
3045           MergeMemNode* newmm = NULL;
3046           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
3047             Node* uu = u->fast_out(j);
3048             assert(!uu->is_MergeMem(), "chain of MergeMems?");
3049             if (uu->is_Phi()) {
3050               if (should_process_phi(uu)) {
3051                 Node* region = uu->in(0);
3052                 int nb = 0;
3053                 for (uint k = 1; k < uu->req(); k++) {
3054                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
3055                     if (newmm == NULL) {
3056                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
3057                     }
3058                     if (newmm != u) {
3059                       _phase->igvn().replace_input_of(uu, k, newmm);
3060                       nb++;
3061                       --jmax;
3062                     }
3063                   }
3064                 }
3065                 if (nb > 0) {
3066                   --j;
3067                 }
3068               }
3069             } else {
3070               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
3071                 if (newmm == NULL) {
3072                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
3073                 }
3074                 if (newmm != u) {
3075                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
3076                   --j, --jmax;
3077                 }
3078               }
3079             }
3080           }
3081         }
3082       } else if (u->is_Phi()) {
3083         assert(u->bottom_type() == Type::MEMORY, "what else?");
3084         Node* region = u->in(0);
3085         if (should_process_phi(u)) {
3086           bool replaced = false;
3087           for (uint j = 1; j < u->req(); j++) {
3088             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
3089               Node* nnew = rep_proj;
3090               if (u->adr_type() == TypePtr::BOTTOM) {
3091                 if (mm == NULL) {
3092                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
3093                 }
3094                 nnew = mm;
3095               }
3096               _phase->igvn().replace_input_of(u, j, nnew);
3097               replaced = true;
3098             }
3099           }
3100           if (replaced) {
3101             --i;
3102           }
3103 
3104         }
3105       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
3106                  u->adr_type() == NULL) {
3107         assert(u->adr_type() != NULL ||
3108                u->Opcode() == Op_Rethrow ||
3109                u->Opcode() == Op_Return ||
3110                u->Opcode() == Op_SafePoint ||
3111                u->Opcode() == Op_StoreIConditional ||
3112                u->Opcode() == Op_StoreLConditional ||
3113                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
3114                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
3115                u->Opcode() == Op_CallLeaf, "%s", u->Name());
3116         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3117           if (mm == NULL) {
3118             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
3119           }
3120           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
3121           --i;
3122         }
3123       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
3124         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3125           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
3126           --i;
3127         }
3128       }
3129     }
3130   }
3131 }
3132 
3133 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, bool native)
3134 : Node(ctrl, obj), _native(native) {
3135   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
3136 }
3137 
3138 bool ShenandoahLoadReferenceBarrierNode::is_native() const {
3139   return _native;
3140 }
3141 
3142 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
3143   return sizeof(*this);
3144 }
3145 
3146 uint ShenandoahLoadReferenceBarrierNode::hash() const {
3147   return Node::hash() + (_native ? 1 : 0);
3148 }
3149 
3150 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
3151   return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
3152          _native == ((const ShenandoahLoadReferenceBarrierNode&)n)._native;
3153 }
3154 
3155 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
3156   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
3157     return Type::TOP;
3158   }
3159   const Type* t = in(ValueIn)->bottom_type();
3160   if (t == TypePtr::NULL_PTR) {
3161     return t;
3162   }
3163   return t->is_oopptr();
3164 }
3165 
3166 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
3167   // Either input is TOP ==> the result is TOP
3168   const Type *t2 = phase->type(in(ValueIn));
3169   if( t2 == Type::TOP ) return Type::TOP;
3170 
3171   if (t2 == TypePtr::NULL_PTR) {
3172     return t2;
3173   }
3174 
3175   const Type* type = t2->is_oopptr();
3176   return type;
3177 }
3178 
3179 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
3180   Node* value = in(ValueIn);
3181   if (!needs_barrier(phase, value)) {
3182     return value;
3183   }
3184   return this;
3185 }
3186 
3187 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
3188   Unique_Node_List visited;
3189   return needs_barrier_impl(phase, n, visited);
3190 }
3191 
3192 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
3193   if (n == NULL) return false;
3194   if (visited.member(n)) {
3195     return false; // Been there.
3196   }
3197   visited.push(n);
3198 
3199   if (n->is_Allocate()) {
3200     // tty->print_cr("optimize barrier on alloc");
3201     return false;
3202   }
3203   if (n->is_Call()) {
3204     // tty->print_cr("optimize barrier on call");
3205     return false;
3206   }
3207 
3208   const Type* type = phase->type(n);
3209   if (type == Type::TOP) {
3210     return false;
3211   }
3212   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
3213     // tty->print_cr("optimize barrier on null");
3214     return false;
3215   }
3216   if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
3217     // tty->print_cr("optimize barrier on constant");
3218     return false;
3219   }
3220 
3221   switch (n->Opcode()) {
3222     case Op_AddP:
3223       return true; // TODO: Can refine?
3224     case Op_LoadP:
3225     case Op_ShenandoahCompareAndExchangeN:
3226     case Op_ShenandoahCompareAndExchangeP:
3227     case Op_CompareAndExchangeN:
3228     case Op_CompareAndExchangeP:
3229     case Op_GetAndSetN:
3230     case Op_GetAndSetP:
3231       return true;
3232     case Op_Phi: {
3233       for (uint i = 1; i < n->req(); i++) {
3234         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3235       }
3236       return false;
3237     }
3238     case Op_CheckCastPP:
3239     case Op_CastPP:
3240       return needs_barrier_impl(phase, n->in(1), visited);
3241     case Op_Proj:
3242       return needs_barrier_impl(phase, n->in(0), visited);
3243     case Op_ShenandoahLoadReferenceBarrier:
3244       // tty->print_cr("optimize barrier on barrier");
3245       return false;
3246     case Op_Parm:
3247       // tty->print_cr("optimize barrier on input arg");
3248       return false;
3249     case Op_DecodeN:
3250     case Op_EncodeP:
3251       return needs_barrier_impl(phase, n->in(1), visited);
3252     case Op_LoadN:
3253       return true;
3254     case Op_CMoveN:
3255     case Op_CMoveP:
3256       return needs_barrier_impl(phase, n->in(2), visited) ||
3257              needs_barrier_impl(phase, n->in(3), visited);
3258     case Op_ShenandoahEnqueueBarrier:
3259       return needs_barrier_impl(phase, n->in(1), visited);
3260     case Op_CreateEx:
3261       return false;
3262     default:
3263       break;
3264   }
3265 #ifdef ASSERT
3266   tty->print("need barrier on?: ");
3267   tty->print_cr("ins:");
3268   n->dump(2);
3269   tty->print_cr("outs:");
3270   n->dump(-2);
3271   ShouldNotReachHere();
3272 #endif
3273   return true;
3274 }
3275 
3276 bool ShenandoahLoadReferenceBarrierNode::is_redundant() {
3277   Unique_Node_List visited;
3278   Node_Stack stack(0);
3279   stack.push(this, 0);
3280 
3281   // Check if the barrier is actually useful: go over nodes looking for useful uses
3282   // (e.g. memory accesses). Stop once we detected a required use. Otherwise, walk
3283   // until we ran out of nodes, and then declare the barrier redundant.
3284   while (stack.size() > 0) {
3285     Node* n = stack.node();
3286     if (visited.member(n)) {
3287       stack.pop();
3288       continue;
3289     }
3290     visited.push(n);
3291     bool visit_users = false;
3292     switch (n->Opcode()) {
3293       case Op_CallStaticJava:
3294       case Op_CallDynamicJava:
3295       case Op_CallLeaf:
3296       case Op_CallLeafNoFP:
3297       case Op_CompareAndSwapL:
3298       case Op_CompareAndSwapI:
3299       case Op_CompareAndSwapB:
3300       case Op_CompareAndSwapS:
3301       case Op_CompareAndSwapN:
3302       case Op_CompareAndSwapP:
3303       case Op_CompareAndExchangeL:
3304       case Op_CompareAndExchangeI:
3305       case Op_CompareAndExchangeB:
3306       case Op_CompareAndExchangeS:
3307       case Op_CompareAndExchangeN:
3308       case Op_CompareAndExchangeP:
3309       case Op_WeakCompareAndSwapL:
3310       case Op_WeakCompareAndSwapI:
3311       case Op_WeakCompareAndSwapB:
3312       case Op_WeakCompareAndSwapS:
3313       case Op_WeakCompareAndSwapN:
3314       case Op_WeakCompareAndSwapP:
3315       case Op_ShenandoahCompareAndSwapN:
3316       case Op_ShenandoahCompareAndSwapP:
3317       case Op_ShenandoahWeakCompareAndSwapN:
3318       case Op_ShenandoahWeakCompareAndSwapP:
3319       case Op_ShenandoahCompareAndExchangeN:
3320       case Op_ShenandoahCompareAndExchangeP:
3321       case Op_GetAndSetL:
3322       case Op_GetAndSetI:
3323       case Op_GetAndSetB:
3324       case Op_GetAndSetS:
3325       case Op_GetAndSetP:
3326       case Op_GetAndSetN:
3327       case Op_GetAndAddL:
3328       case Op_GetAndAddI:
3329       case Op_GetAndAddB:
3330       case Op_GetAndAddS:
3331       case Op_FastLock:
3332       case Op_FastUnlock:
3333       case Op_Rethrow:
3334       case Op_Return:
3335       case Op_StoreB:
3336       case Op_StoreC:
3337       case Op_StoreD:
3338       case Op_StoreF:
3339       case Op_StoreL:
3340       case Op_StoreLConditional:
3341       case Op_StoreI:
3342       case Op_StoreIConditional:
3343       case Op_StoreN:
3344       case Op_StoreP:
3345       case Op_StoreVector:
3346       case Op_StrInflatedCopy:
3347       case Op_StrCompressedCopy:
3348       case Op_EncodeP:
3349       case Op_CastP2X:
3350       case Op_SafePoint:
3351       case Op_EncodeISOArray:
3352       case Op_AryEq:
3353       case Op_StrEquals:
3354       case Op_StrComp:
3355       case Op_StrIndexOf:
3356       case Op_StrIndexOfChar:
3357       case Op_HasNegatives:
3358         // Known to require barriers
3359         return false;
3360       case Op_CmpP: {
3361         if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) ||
3362             n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3363           // One of the sides is known null, no need for barrier.
3364         } else {
3365           return false;
3366         }
3367         break;
3368       }
3369       case Op_LoadB:
3370       case Op_LoadUB:
3371       case Op_LoadUS:
3372       case Op_LoadD:
3373       case Op_LoadF:
3374       case Op_LoadL:
3375       case Op_LoadI:
3376       case Op_LoadS:
3377       case Op_LoadN:
3378       case Op_LoadP:
3379       case Op_LoadVector: {
3380         const TypePtr* adr_type = n->adr_type();
3381         int alias_idx = Compile::current()->get_alias_index(adr_type);
3382         Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3383         ciField* field = alias_type->field();
3384         bool is_static = field != NULL && field->is_static();
3385         bool is_final = field != NULL && field->is_final();
3386 
3387         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3388           // Loading the constant does not require barriers: it should be handled
3389           // as part of GC roots already.
3390         } else {
3391           return false;
3392         }
3393         break;
3394       }
3395       case Op_Conv2B:
3396       case Op_LoadRange:
3397       case Op_LoadKlass:
3398       case Op_LoadNKlass:
3399         // Do not require barriers
3400         break;
3401       case Op_AddP:
3402       case Op_CheckCastPP:
3403       case Op_CastPP:
3404       case Op_CMoveP:
3405       case Op_Phi:
3406       case Op_ShenandoahLoadReferenceBarrier:
3407       case Op_ShenandoahEnqueueBarrier:
3408         // Whether or not these need the barriers depends on their users
3409         visit_users = true;
3410         break;
3411       default: {
3412 #ifdef ASSERT
3413         fatal("Unknown node in is_redundant: %s", NodeClassNames[n->Opcode()]);
3414 #else
3415         // Default to have excess barriers, rather than miss some.
3416         return false;
3417 #endif
3418       }
3419     }
3420 
3421     stack.pop();
3422     if (visit_users) {
3423       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3424         Node* user = n->fast_out(i);
3425         if (user != NULL) {
3426           stack.push(user, 0);
3427         }
3428       }
3429     }
3430   }
3431 
3432   // No need for barrier found.
3433   return true;
3434 }
3435 
3436 CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
3437   Node* val = in(ValueIn);
3438 
3439   const Type* val_t = igvn.type(val);
3440 
3441   if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
3442       val->Opcode() == Op_CastPP &&
3443       val->in(0) != NULL &&
3444       val->in(0)->Opcode() == Op_IfTrue &&
3445       val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
3446       val->in(0)->in(0)->is_If() &&
3447       val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
3448       val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
3449       val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
3450       val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
3451       val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
3452     assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
3453     CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
3454     return unc;
3455   }
3456   return NULL;
3457 }