1 /*
   2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  28 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  30 #include "gc/shenandoah/shenandoahForwarding.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  33 #include "gc/shenandoah/shenandoahRuntime.hpp"
  34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/block.hpp"
  37 #include "opto/callnode.hpp"
  38 #include "opto/castnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/phaseX.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/runtime.hpp"
  43 #include "opto/subnode.hpp"
  44 
  45 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  46   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  47   if ((state->enqueue_barriers_count() +
  48        state->load_reference_barriers_count()) > 0) {
  49     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  50     C->clear_major_progress();
  51     PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
  52     if (C->failing()) return false;
  53     PhaseIdealLoop::verify(igvn);
  54     DEBUG_ONLY(verify_raw_mem(C->root());)
  55     if (attempt_more_loopopts) {
  56       C->set_major_progress();
  57       if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
  58         return false;
  59       }
  60       C->clear_major_progress();
  61       if (C->range_check_cast_count() > 0) {
  62         // No more loop optimizations. Remove all range check dependent CastIINodes.
  63         C->remove_range_check_casts(igvn);
  64         igvn.optimize();
  65       }
  66     }
  67   }
  68   return true;
  69 }
  70 
  71 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
  72   if (!UseShenandoahGC) {
  73     return false;
  74   }
  75   assert(iff->is_If(), "bad input");
  76   if (iff->Opcode() != Op_If) {
  77     return false;
  78   }
  79   Node* bol = iff->in(1);
  80   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  81     return false;
  82   }
  83   Node* cmp = bol->in(1);
  84   if (cmp->Opcode() != Op_CmpI) {
  85     return false;
  86   }
  87   Node* in1 = cmp->in(1);
  88   Node* in2 = cmp->in(2);
  89   if (in2->find_int_con(-1) != 0) {
  90     return false;
  91   }
  92   if (in1->Opcode() != Op_AndI) {
  93     return false;
  94   }
  95   in2 = in1->in(2);
  96   if (in2->find_int_con(-1) != mask) {
  97     return false;
  98   }
  99   in1 = in1->in(1);
 100 
 101   return is_gc_state_load(in1);
 102 }
 103 
 104 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 105   return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 106 }
 107 
 108 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 109   if (!UseShenandoahGC) {
 110     return false;
 111   }
 112   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 113     return false;
 114   }
 115   Node* addp = n->in(MemNode::Address);
 116   if (!addp->is_AddP()) {
 117     return false;
 118   }
 119   Node* base = addp->in(AddPNode::Address);
 120   Node* off = addp->in(AddPNode::Offset);
 121   if (base->Opcode() != Op_ThreadLocal) {
 122     return false;
 123   }
 124   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 125     return false;
 126   }
 127   return true;
 128 }
 129 
 130 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 131   assert(phase->is_dominator(stop, start), "bad inputs");
 132   ResourceMark rm;
 133   Unique_Node_List wq;
 134   wq.push(start);
 135   for (uint next = 0; next < wq.size(); next++) {
 136     Node *m = wq.at(next);
 137     if (m == stop) {
 138       continue;
 139     }
 140     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 141       return true;
 142     }
 143     if (m->is_Region()) {
 144       for (uint i = 1; i < m->req(); i++) {
 145         wq.push(m->in(i));
 146       }
 147     } else {
 148       wq.push(m->in(0));
 149     }
 150   }
 151   return false;
 152 }
 153 
 154 #ifdef ASSERT
 155 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 156   assert(phis.size() == 0, "");
 157 
 158   while (true) {
 159     if (in->bottom_type() == TypePtr::NULL_PTR) {
 160       if (trace) {tty->print_cr("NULL");}
 161     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 162       if (trace) {tty->print_cr("Non oop");}
 163     } else {
 164       if (in->is_ConstraintCast()) {
 165         in = in->in(1);
 166         continue;
 167       } else if (in->is_AddP()) {
 168         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 169         in = in->in(AddPNode::Address);
 170         continue;
 171       } else if (in->is_Con()) {
 172         if (trace) {
 173           tty->print("Found constant");
 174           in->dump();
 175         }
 176       } else if (in->Opcode() == Op_Parm) {
 177         if (trace) {
 178           tty->print("Found argument");
 179         }
 180       } else if (in->Opcode() == Op_CreateEx) {
 181         if (trace) {
 182           tty->print("Found create-exception");
 183         }
 184       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 185         if (trace) {
 186           tty->print("Found raw LoadP (OSR argument?)");
 187         }
 188       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 189         if (t == ShenandoahOopStore) {
 190           uint i = 0;
 191           for (; i < phis.size(); i++) {
 192             Node* n = phis.node_at(i);
 193             if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
 194               break;
 195             }
 196           }
 197           if (i == phis.size()) {
 198             return false;
 199           }
 200         }
 201         barriers_used.push(in);
 202         if (trace) {tty->print("Found barrier"); in->dump();}
 203       } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
 204         if (t != ShenandoahOopStore) {
 205           in = in->in(1);
 206           continue;
 207         }
 208         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 209         phis.push(in, in->req());
 210         in = in->in(1);
 211         continue;
 212       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 213         if (trace) {
 214           tty->print("Found alloc");
 215           in->in(0)->dump();
 216         }
 217       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 218         if (trace) {
 219           tty->print("Found Java call");
 220         }
 221       } else if (in->is_Phi()) {
 222         if (!visited.test_set(in->_idx)) {
 223           if (trace) {tty->print("Pushed phi:"); in->dump();}
 224           phis.push(in, 2);
 225           in = in->in(1);
 226           continue;
 227         }
 228         if (trace) {tty->print("Already seen phi:"); in->dump();}
 229       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 230         if (!visited.test_set(in->_idx)) {
 231           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 232           phis.push(in, CMoveNode::IfTrue);
 233           in = in->in(CMoveNode::IfFalse);
 234           continue;
 235         }
 236         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 237       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 238         in = in->in(1);
 239         continue;
 240       } else {
 241         return false;
 242       }
 243     }
 244     bool cont = false;
 245     while (phis.is_nonempty()) {
 246       uint idx = phis.index();
 247       Node* phi = phis.node();
 248       if (idx >= phi->req()) {
 249         if (trace) {tty->print("Popped phi:"); phi->dump();}
 250         phis.pop();
 251         continue;
 252       }
 253       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 254       in = phi->in(idx);
 255       phis.set_index(idx+1);
 256       cont = true;
 257       break;
 258     }
 259     if (!cont) {
 260       break;
 261     }
 262   }
 263   return true;
 264 }
 265 
 266 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 267   if (n1 != NULL) {
 268     n1->dump(+10);
 269   }
 270   if (n2 != NULL) {
 271     n2->dump(+10);
 272   }
 273   fatal("%s", msg);
 274 }
 275 
 276 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 277   ResourceMark rm;
 278   Unique_Node_List wq;
 279   GrowableArray<Node*> barriers;
 280   Unique_Node_List barriers_used;
 281   Node_Stack phis(0);
 282   VectorSet visited(Thread::current()->resource_area());
 283   const bool trace = false;
 284   const bool verify_no_useless_barrier = false;
 285 
 286   wq.push(root);
 287   for (uint next = 0; next < wq.size(); next++) {
 288     Node *n = wq.at(next);
 289     if (n->is_Load()) {
 290       const bool trace = false;
 291       if (trace) {tty->print("Verifying"); n->dump();}
 292       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 293         if (trace) {tty->print_cr("Load range/klass");}
 294       } else {
 295         const TypePtr* adr_type = n->as_Load()->adr_type();
 296 
 297         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 298           if (trace) {tty->print_cr("Mark load");}
 299         } else if (adr_type->isa_instptr() &&
 300                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 301                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
 302           if (trace) {tty->print_cr("Reference.get()");}
 303         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 304           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 305         }
 306       }
 307     } else if (n->is_Store()) {
 308       const bool trace = false;
 309 
 310       if (trace) {tty->print("Verifying"); n->dump();}
 311       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 312         Node* adr = n->in(MemNode::Address);
 313         bool verify = true;
 314 
 315         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 316           adr = adr->in(AddPNode::Address);
 317           if (adr->is_AddP()) {
 318             assert(adr->in(AddPNode::Base)->is_top(), "");
 319             adr = adr->in(AddPNode::Address);
 320             if (adr->Opcode() == Op_LoadP &&
 321                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 322                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 323                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 324               if (trace) {tty->print_cr("SATB prebarrier");}
 325               verify = false;
 326             }
 327           }
 328         }
 329 
 330         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 331           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 332         }
 333       }
 334       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 335         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 336       }
 337     } else if (n->Opcode() == Op_CmpP) {
 338       const bool trace = false;
 339 
 340       Node* in1 = n->in(1);
 341       Node* in2 = n->in(2);
 342       if (in1->bottom_type()->isa_oopptr()) {
 343         if (trace) {tty->print("Verifying"); n->dump();}
 344 
 345         bool mark_inputs = false;
 346         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 347             (in1->is_Con() || in2->is_Con())) {
 348           if (trace) {tty->print_cr("Comparison against a constant");}
 349           mark_inputs = true;
 350         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 351                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 352           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 353           mark_inputs = true;
 354         } else {
 355           assert(in2->bottom_type()->isa_oopptr(), "");
 356 
 357           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 358               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 359             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 360           }
 361         }
 362         if (verify_no_useless_barrier &&
 363             mark_inputs &&
 364             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 365              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 366           phis.clear();
 367           visited.reset();
 368         }
 369       }
 370     } else if (n->is_LoadStore()) {
 371       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 372           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 373         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 374       }
 375 
 376       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 377         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 378       }
 379     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 380       CallNode* call = n->as_Call();
 381 
 382       static struct {
 383         const char* name;
 384         struct {
 385           int pos;
 386           verify_type t;
 387         } args[6];
 388       } calls[] = {
 389         "aescrypt_encryptBlock",
 390         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 391           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 392         "aescrypt_decryptBlock",
 393         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 394           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 395         "multiplyToLen",
 396         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 397           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 398         "squareToLen",
 399         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 400           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 401         "montgomery_multiply",
 402         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 403           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 404         "montgomery_square",
 405         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 406           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 407         "mulAdd",
 408         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 409           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 410         "vectorizedMismatch",
 411         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 412           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 413         "updateBytesCRC32",
 414         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 415           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 416         "updateBytesAdler32",
 417         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 418           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 419         "updateBytesCRC32C",
 420         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 421           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 422         "counterMode_AESCrypt",
 423         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 424           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 425         "cipherBlockChaining_encryptAESCrypt",
 426         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 427           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 428         "cipherBlockChaining_decryptAESCrypt",
 429         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 430           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 431         "shenandoah_clone_barrier",
 432         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 433           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 434         "ghash_processBlocks",
 435         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 436           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 437         "sha1_implCompress",
 438         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 439           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 440         "sha256_implCompress",
 441         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 442           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 443         "sha512_implCompress",
 444         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 445           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 446         "sha1_implCompressMB",
 447         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 448           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 449         "sha256_implCompressMB",
 450         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 451           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 452         "sha512_implCompressMB",
 453         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 454           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 455         "encodeBlock",
 456         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 457           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 458       };
 459 
 460       if (call->is_call_to_arraycopystub()) {
 461         Node* dest = NULL;
 462         const TypeTuple* args = n->as_Call()->_tf->domain();
 463         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 464           if (args->field_at(i)->isa_ptr()) {
 465             j++;
 466             if (j == 2) {
 467               dest = n->in(i);
 468               break;
 469             }
 470           }
 471         }
 472         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 473             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 474           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 475         }
 476       } else if (strlen(call->_name) > 5 &&
 477                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 478         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 479           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 480         }
 481       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 482         // skip
 483       } else {
 484         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 485         int i = 0;
 486         for (; i < calls_len; i++) {
 487           if (!strcmp(calls[i].name, call->_name)) {
 488             break;
 489           }
 490         }
 491         if (i != calls_len) {
 492           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 493           for (uint j = 0; j < args_len; j++) {
 494             int pos = calls[i].args[j].pos;
 495             if (pos == -1) {
 496               break;
 497             }
 498             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 499               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 500             }
 501           }
 502           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 503             if (call->in(j)->bottom_type()->make_ptr() &&
 504                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 505               uint k = 0;
 506               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 507               if (k == args_len) {
 508                 fatal("arg %d for call %s not covered", j, call->_name);
 509               }
 510             }
 511           }
 512         } else {
 513           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 514             if (call->in(j)->bottom_type()->make_ptr() &&
 515                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 516               fatal("%s not covered", call->_name);
 517             }
 518           }
 519         }
 520       }
 521     } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 522       // skip
 523     } else if (n->is_AddP()
 524                || n->is_Phi()
 525                || n->is_ConstraintCast()
 526                || n->Opcode() == Op_Return
 527                || n->Opcode() == Op_CMoveP
 528                || n->Opcode() == Op_CMoveN
 529                || n->Opcode() == Op_Rethrow
 530                || n->is_MemBar()
 531                || n->Opcode() == Op_Conv2B
 532                || n->Opcode() == Op_SafePoint
 533                || n->is_CallJava()
 534                || n->Opcode() == Op_Unlock
 535                || n->Opcode() == Op_EncodeP
 536                || n->Opcode() == Op_DecodeN) {
 537       // nothing to do
 538     } else {
 539       static struct {
 540         int opcode;
 541         struct {
 542           int pos;
 543           verify_type t;
 544         } inputs[2];
 545       } others[] = {
 546         Op_FastLock,
 547         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 548         Op_Lock,
 549         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 550         Op_ArrayCopy,
 551         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 552         Op_StrCompressedCopy,
 553         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 554         Op_StrInflatedCopy,
 555         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 556         Op_AryEq,
 557         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 558         Op_StrIndexOf,
 559         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 560         Op_StrComp,
 561         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 562         Op_StrEquals,
 563         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 564         Op_EncodeISOArray,
 565         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 566         Op_HasNegatives,
 567         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 568         Op_CastP2X,
 569         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 570         Op_StrIndexOfChar,
 571         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 572       };
 573 
 574       const int others_len = sizeof(others) / sizeof(others[0]);
 575       int i = 0;
 576       for (; i < others_len; i++) {
 577         if (others[i].opcode == n->Opcode()) {
 578           break;
 579         }
 580       }
 581       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 582       if (i != others_len) {
 583         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 584         for (uint j = 0; j < inputs_len; j++) {
 585           int pos = others[i].inputs[j].pos;
 586           if (pos == -1) {
 587             break;
 588           }
 589           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 590             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 591           }
 592         }
 593         for (uint j = 1; j < stop; j++) {
 594           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 595               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 596             uint k = 0;
 597             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 598             if (k == inputs_len) {
 599               fatal("arg %d for node %s not covered", j, n->Name());
 600             }
 601           }
 602         }
 603       } else {
 604         for (uint j = 1; j < stop; j++) {
 605           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 606               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 607             fatal("%s not covered", n->Name());
 608           }
 609         }
 610       }
 611     }
 612 
 613     if (n->is_SafePoint()) {
 614       SafePointNode* sfpt = n->as_SafePoint();
 615       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 616         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 617           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 618             phis.clear();
 619             visited.reset();
 620           }
 621         }
 622       }
 623     }
 624   }
 625 
 626   if (verify_no_useless_barrier) {
 627     for (int i = 0; i < barriers.length(); i++) {
 628       Node* n = barriers.at(i);
 629       if (!barriers_used.member(n)) {
 630         tty->print("XXX useless barrier"); n->dump(-2);
 631         ShouldNotReachHere();
 632       }
 633     }
 634   }
 635 }
 636 #endif
 637 
 638 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 639   // That both nodes have the same control is not sufficient to prove
 640   // domination, verify that there's no path from d to n
 641   ResourceMark rm;
 642   Unique_Node_List wq;
 643   wq.push(d);
 644   for (uint next = 0; next < wq.size(); next++) {
 645     Node *m = wq.at(next);
 646     if (m == n) {
 647       return false;
 648     }
 649     if (m->is_Phi() && m->in(0)->is_Loop()) {
 650       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 651     } else {
 652       if (m->is_Store() || m->is_LoadStore()) {
 653         // Take anti-dependencies into account
 654         Node* mem = m->in(MemNode::Memory);
 655         for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 656           Node* u = mem->fast_out(i);
 657           if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
 658               phase->ctrl_or_self(u) == c) {
 659             wq.push(u);
 660           }
 661         }
 662       }
 663       for (uint i = 0; i < m->req(); i++) {
 664         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 665           wq.push(m->in(i));
 666         }
 667       }
 668     }
 669   }
 670   return true;
 671 }
 672 
 673 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 674   if (d_c != n_c) {
 675     return phase->is_dominator(d_c, n_c);
 676   }
 677   return is_dominator_same_ctrl(d_c, d, n, phase);
 678 }
 679 
 680 Node* next_mem(Node* mem, int alias) {
 681   Node* res = NULL;
 682   if (mem->is_Proj()) {
 683     res = mem->in(0);
 684   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 685     res = mem->in(TypeFunc::Memory);
 686   } else if (mem->is_Phi()) {
 687     res = mem->in(1);
 688   } else if (mem->is_MergeMem()) {
 689     res = mem->as_MergeMem()->memory_at(alias);
 690   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 691     assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 692     res = mem->in(MemNode::Memory);
 693   } else {
 694 #ifdef ASSERT
 695     mem->dump();
 696 #endif
 697     ShouldNotReachHere();
 698   }
 699   return res;
 700 }
 701 
 702 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 703   Node* iffproj = NULL;
 704   while (c != dom) {
 705     Node* next = phase->idom(c);
 706     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 707     if (c->is_Region()) {
 708       ResourceMark rm;
 709       Unique_Node_List wq;
 710       wq.push(c);
 711       for (uint i = 0; i < wq.size(); i++) {
 712         Node *n = wq.at(i);
 713         if (n == next) {
 714           continue;
 715         }
 716         if (n->is_Region()) {
 717           for (uint j = 1; j < n->req(); j++) {
 718             wq.push(n->in(j));
 719           }
 720         } else {
 721           wq.push(n->in(0));
 722         }
 723       }
 724       for (uint i = 0; i < wq.size(); i++) {
 725         Node *n = wq.at(i);
 726         assert(n->is_CFG(), "");
 727         if (n->is_Multi()) {
 728           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 729             Node* u = n->fast_out(j);
 730             if (u->is_CFG()) {
 731               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 732                 return NodeSentinel;
 733               }
 734             }
 735           }
 736         }
 737       }
 738     } else  if (c->is_Proj()) {
 739       if (c->is_IfProj()) {
 740         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 741           // continue;
 742         } else {
 743           if (!allow_one_proj) {
 744             return NodeSentinel;
 745           }
 746           if (iffproj == NULL) {
 747             iffproj = c;
 748           } else {
 749             return NodeSentinel;
 750           }
 751         }
 752       } else if (c->Opcode() == Op_JumpProj) {
 753         return NodeSentinel; // unsupported
 754       } else if (c->Opcode() == Op_CatchProj) {
 755         return NodeSentinel; // unsupported
 756       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 757         return NodeSentinel; // unsupported
 758       } else {
 759         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 760       }
 761     }
 762     c = next;
 763   }
 764   return iffproj;
 765 }
 766 
 767 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 768   ResourceMark rm;
 769   VectorSet wq(Thread::current()->resource_area());
 770   wq.set(mem->_idx);
 771   mem_ctrl = phase->ctrl_or_self(mem);
 772   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 773     mem = next_mem(mem, alias);
 774     if (wq.test_set(mem->_idx)) {
 775       return NULL;
 776     }
 777     mem_ctrl = phase->ctrl_or_self(mem);
 778   }
 779   if (mem->is_MergeMem()) {
 780     mem = mem->as_MergeMem()->memory_at(alias);
 781     mem_ctrl = phase->ctrl_or_self(mem);
 782   }
 783   return mem;
 784 }
 785 
 786 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 787   Node* mem = NULL;
 788   Node* c = ctrl;
 789   do {
 790     if (c->is_Region()) {
 791       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 792         Node* u = c->fast_out(i);
 793         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 794           if (u->adr_type() == TypePtr::BOTTOM) {
 795             mem = u;
 796           }
 797         }
 798       }
 799     } else {
 800       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 801         CallProjections projs;
 802         c->as_Call()->extract_projections(&projs, true, false);
 803         if (projs.fallthrough_memproj != NULL) {
 804           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 805             if (projs.catchall_memproj == NULL) {
 806               mem = projs.fallthrough_memproj;
 807             } else {
 808               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 809                 mem = projs.fallthrough_memproj;
 810               } else {
 811                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 812                 mem = projs.catchall_memproj;
 813               }
 814             }
 815           }
 816         } else {
 817           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 818           if (proj != NULL &&
 819               proj->adr_type() == TypePtr::BOTTOM) {
 820             mem = proj;
 821           }
 822         }
 823       } else {
 824         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 825           Node* u = c->fast_out(i);
 826           if (u->is_Proj() &&
 827               u->bottom_type() == Type::MEMORY &&
 828               u->adr_type() == TypePtr::BOTTOM) {
 829               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 830               assert(mem == NULL, "only one proj");
 831               mem = u;
 832           }
 833         }
 834         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 835       }
 836     }
 837     c = phase->idom(c);
 838   } while (mem == NULL);
 839   return mem;
 840 }
 841 
 842 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 843   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 844     Node* u = n->fast_out(i);
 845     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 846       uses.push(u);
 847     }
 848   }
 849 }
 850 
 851 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 852   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 853   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 854   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 855   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 856   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 857   phase->lazy_replace(outer, new_outer);
 858   phase->lazy_replace(le, new_le);
 859   inner->clear_strip_mined();
 860 }
 861 
 862 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
 863                                                PhaseIdealLoop* phase, int flags) {
 864   PhaseIterGVN& igvn = phase->igvn();
 865   Node* old_ctrl = ctrl;
 866 
 867   Node* thread          = new ThreadLocalNode();
 868   Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 869   Node* gc_state_addr   = new AddPNode(phase->C->top(), thread, gc_state_offset);
 870   Node* gc_state        = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
 871                                         DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
 872                                         TypeInt::BYTE, MemNode::unordered);
 873   Node* gc_state_and    = new AndINode(gc_state, igvn.intcon(flags));
 874   Node* gc_state_cmp    = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
 875   Node* gc_state_bool   = new BoolNode(gc_state_cmp, BoolTest::ne);
 876 
 877   IfNode* gc_state_iff  = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 878   ctrl                  = new IfTrueNode(gc_state_iff);
 879   test_fail_ctrl        = new IfFalseNode(gc_state_iff);
 880 
 881   IdealLoopTree* loop = phase->get_loop(old_ctrl);
 882   phase->register_control(gc_state_iff,   loop, old_ctrl);
 883   phase->register_control(ctrl,           loop, gc_state_iff);
 884   phase->register_control(test_fail_ctrl, loop, gc_state_iff);
 885 
 886   phase->register_new_node(thread,        old_ctrl);
 887   phase->register_new_node(gc_state_addr, old_ctrl);
 888   phase->register_new_node(gc_state,      old_ctrl);
 889   phase->register_new_node(gc_state_and,  old_ctrl);
 890   phase->register_new_node(gc_state_cmp,  old_ctrl);
 891   phase->register_new_node(gc_state_bool, old_ctrl);
 892 
 893   phase->set_ctrl(gc_state_offset, phase->C->root());
 894 
 895   assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
 896 }
 897 
 898 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 899   Node* old_ctrl = ctrl;
 900   PhaseIterGVN& igvn = phase->igvn();
 901 
 902   const Type* val_t = igvn.type(val);
 903   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 904     Node* null_cmp   = new CmpPNode(val, igvn.zerocon(T_OBJECT));
 905     Node* null_test  = new BoolNode(null_cmp, BoolTest::ne);
 906 
 907     IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 908     ctrl             = new IfTrueNode(null_iff);
 909     null_ctrl        = new IfFalseNode(null_iff);
 910 
 911     IdealLoopTree* loop = phase->get_loop(old_ctrl);
 912     phase->register_control(null_iff,  loop, old_ctrl);
 913     phase->register_control(ctrl,      loop, null_iff);
 914     phase->register_control(null_ctrl, loop, null_iff);
 915 
 916     phase->register_new_node(null_cmp,  old_ctrl);
 917     phase->register_new_node(null_test, old_ctrl);
 918   }
 919 }
 920 
 921 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 922   Node* old_ctrl = ctrl;
 923   PhaseIterGVN& igvn = phase->igvn();
 924 
 925   Node* raw_val        = new CastP2XNode(old_ctrl, val);
 926   Node* cset_idx       = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 927 
 928   // Figure out the target cset address with raw pointer math.
 929   // This avoids matching AddP+LoadB that would emit inefficient code.
 930   // See JDK-8245465.
 931   Node* cset_addr_ptr  = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
 932   Node* cset_addr      = new CastP2XNode(old_ctrl, cset_addr_ptr);
 933   Node* cset_load_addr = new AddXNode(cset_addr, cset_idx);
 934   Node* cset_load_ptr  = new CastX2PNode(cset_load_addr);
 935 
 936   Node* cset_load      = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
 937                                        DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
 938                                        TypeInt::BYTE, MemNode::unordered);
 939   Node* cset_cmp       = new CmpINode(cset_load, igvn.zerocon(T_INT));
 940   Node* cset_bool      = new BoolNode(cset_cmp, BoolTest::ne);
 941 
 942   IfNode* cset_iff     = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 943   ctrl                 = new IfTrueNode(cset_iff);
 944   not_cset_ctrl        = new IfFalseNode(cset_iff);
 945 
 946   IdealLoopTree *loop = phase->get_loop(old_ctrl);
 947   phase->register_control(cset_iff,      loop, old_ctrl);
 948   phase->register_control(ctrl,          loop, cset_iff);
 949   phase->register_control(not_cset_ctrl, loop, cset_iff);
 950 
 951   phase->set_ctrl(cset_addr_ptr, phase->C->root());
 952 
 953   phase->register_new_node(raw_val,        old_ctrl);
 954   phase->register_new_node(cset_idx,       old_ctrl);
 955   phase->register_new_node(cset_addr,      old_ctrl);
 956   phase->register_new_node(cset_load_addr, old_ctrl);
 957   phase->register_new_node(cset_load_ptr,  old_ctrl);
 958   phase->register_new_node(cset_load,      old_ctrl);
 959   phase->register_new_node(cset_cmp,       old_ctrl);
 960   phase->register_new_node(cset_bool,      old_ctrl);
 961 }
 962 
 963 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) {
 964   IdealLoopTree*loop = phase->get_loop(ctrl);
 965   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
 966 
 967   // The slow path stub consumes and produces raw memory in addition
 968   // to the existing memory edges
 969   Node* base = find_bottom_mem(ctrl, phase);
 970   MergeMemNode* mm = MergeMemNode::make(base);
 971   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
 972   phase->register_new_node(mm, ctrl);
 973 
 974   address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
 975           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) :
 976           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier);
 977 
 978   address calladdr = is_native ? CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native)
 979                                : target;
 980   const char* name = is_native ? "load_reference_barrier_native" : "load_reference_barrier";
 981   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
 982 
 983   call->init_req(TypeFunc::Control, ctrl);
 984   call->init_req(TypeFunc::I_O, phase->C->top());
 985   call->init_req(TypeFunc::Memory, mm);
 986   call->init_req(TypeFunc::FramePtr, phase->C->top());
 987   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
 988   call->init_req(TypeFunc::Parms, val);
 989   call->init_req(TypeFunc::Parms+1, load_addr);
 990   phase->register_control(call, loop, ctrl);
 991   ctrl = new ProjNode(call, TypeFunc::Control);
 992   phase->register_control(ctrl, loop, call);
 993   result_mem = new ProjNode(call, TypeFunc::Memory);
 994   phase->register_new_node(result_mem, call);
 995   val = new ProjNode(call, TypeFunc::Parms);
 996   phase->register_new_node(val, call);
 997   val = new CheckCastPPNode(ctrl, val, obj_type);
 998   phase->register_new_node(val, ctrl);
 999 }
1000 
1001 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1002   Node* ctrl = phase->get_ctrl(barrier);
1003   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1004 
1005   // Update the control of all nodes that should be after the
1006   // barrier control flow
1007   uses.clear();
1008   // Every node that is control dependent on the barrier's input
1009   // control will be after the expanded barrier. The raw memory (if
1010   // its memory is control dependent on the barrier's input control)
1011   // must stay above the barrier.
1012   uses_to_ignore.clear();
1013   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1014     uses_to_ignore.push(init_raw_mem);
1015   }
1016   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1017     Node *n = uses_to_ignore.at(next);
1018     for (uint i = 0; i < n->req(); i++) {
1019       Node* in = n->in(i);
1020       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1021         uses_to_ignore.push(in);
1022       }
1023     }
1024   }
1025   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1026     Node* u = ctrl->fast_out(i);
1027     if (u->_idx < last &&
1028         u != barrier &&
1029         !uses_to_ignore.member(u) &&
1030         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1031         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1032       Node* old_c = phase->ctrl_or_self(u);
1033       Node* c = old_c;
1034       if (c != ctrl ||
1035           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1036           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1037         phase->igvn().rehash_node_delayed(u);
1038         int nb = u->replace_edge(ctrl, region);
1039         if (u->is_CFG()) {
1040           if (phase->idom(u) == ctrl) {
1041             phase->set_idom(u, region, phase->dom_depth(region));
1042           }
1043         } else if (phase->get_ctrl(u) == ctrl) {
1044           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1045           uses.push(u);
1046         }
1047         assert(nb == 1, "more than 1 ctrl input?");
1048         --i, imax -= nb;
1049       }
1050     }
1051   }
1052 }
1053 
1054 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1055   Node* region = NULL;
1056   while (c != ctrl) {
1057     if (c->is_Region()) {
1058       region = c;
1059     }
1060     c = phase->idom(c);
1061   }
1062   assert(region != NULL, "");
1063   Node* phi = new PhiNode(region, n->bottom_type());
1064   for (uint j = 1; j < region->req(); j++) {
1065     Node* in = region->in(j);
1066     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1067       phi->init_req(j, n);
1068     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1069       phi->init_req(j, n_clone);
1070     } else {
1071       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1072     }
1073   }
1074   phase->register_new_node(phi, region);
1075   return phi;
1076 }
1077 
1078 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1079   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1080 
1081   Unique_Node_List uses;
1082   for (int i = 0; i < state->enqueue_barriers_count(); i++) {
1083     Node* barrier = state->enqueue_barrier(i);
1084     Node* ctrl = phase->get_ctrl(barrier);
1085     IdealLoopTree* loop = phase->get_loop(ctrl);
1086     if (loop->_head->is_OuterStripMinedLoop()) {
1087       // Expanding a barrier here will break loop strip mining
1088       // verification. Transform the loop so the loop nest doesn't
1089       // appear as strip mined.
1090       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1091       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1092     }
1093   }
1094 
1095   Node_Stack stack(0);
1096   Node_List clones;
1097   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1098     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1099     if (lrb->is_redundant()) {
1100       continue;
1101     }
1102 
1103     Node* ctrl = phase->get_ctrl(lrb);
1104     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1105 
1106     CallStaticJavaNode* unc = NULL;
1107     Node* unc_ctrl = NULL;
1108     Node* uncasted_val = val;
1109 
1110     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1111       Node* u = lrb->fast_out(i);
1112       if (u->Opcode() == Op_CastPP &&
1113           u->in(0) != NULL &&
1114           phase->is_dominator(u->in(0), ctrl)) {
1115         const Type* u_t = phase->igvn().type(u);
1116 
1117         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1118             u->in(0)->Opcode() == Op_IfTrue &&
1119             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1120             u->in(0)->in(0)->is_If() &&
1121             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1122             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1123             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1124             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1125             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1126           IdealLoopTree* loop = phase->get_loop(ctrl);
1127           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1128 
1129           if (!unc_loop->is_member(loop)) {
1130             continue;
1131           }
1132 
1133           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1134           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1135           if (branch == NodeSentinel) {
1136             continue;
1137           }
1138 
1139           Node* iff = u->in(0)->in(0);
1140           Node* bol = iff->in(1)->clone();
1141           Node* cmp = bol->in(1)->clone();
1142           cmp->set_req(1, lrb);
1143           bol->set_req(1, cmp);
1144           phase->igvn().replace_input_of(iff, 1, bol);
1145           phase->set_ctrl(lrb, iff->in(0));
1146           phase->register_new_node(cmp, iff->in(0));
1147           phase->register_new_node(bol, iff->in(0));
1148           break;
1149         }
1150       }
1151     }
1152     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1153       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1154       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1155         // The rethrow call may have too many projections to be
1156         // properly handled here. Given there's no reason for a
1157         // barrier to depend on the call, move it above the call
1158         stack.push(lrb, 0);
1159         do {
1160           Node* n = stack.node();
1161           uint idx = stack.index();
1162           if (idx < n->req()) {
1163             Node* in = n->in(idx);
1164             stack.set_index(idx+1);
1165             if (in != NULL) {
1166               if (phase->has_ctrl(in)) {
1167                 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1168 #ifdef ASSERT
1169                   for (uint i = 0; i < stack.size(); i++) {
1170                     assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1171                   }
1172 #endif
1173                   stack.push(in, 0);
1174                 }
1175               } else {
1176                 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1177               }
1178             }
1179           } else {
1180             phase->set_ctrl(n, call->in(0));
1181             stack.pop();
1182           }
1183         } while(stack.size() > 0);
1184         continue;
1185       }
1186       CallProjections projs;
1187       call->extract_projections(&projs, false, false);
1188 
1189 #ifdef ASSERT
1190       VectorSet cloned(Thread::current()->resource_area());
1191 #endif
1192       Node* lrb_clone = lrb->clone();
1193       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1194       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1195 
1196       stack.push(lrb, 0);
1197       clones.push(lrb_clone);
1198 
1199       do {
1200         assert(stack.size() == clones.size(), "");
1201         Node* n = stack.node();
1202 #ifdef ASSERT
1203         if (n->is_Load()) {
1204           Node* mem = n->in(MemNode::Memory);
1205           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1206             Node* u = mem->fast_out(j);
1207             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1208           }
1209         }
1210 #endif
1211         uint idx = stack.index();
1212         Node* n_clone = clones.at(clones.size()-1);
1213         if (idx < n->outcnt()) {
1214           Node* u = n->raw_out(idx);
1215           Node* c = phase->ctrl_or_self(u);
1216           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1217             stack.set_index(idx+1);
1218             assert(!u->is_CFG(), "");
1219             stack.push(u, 0);
1220             assert(!cloned.test_set(u->_idx), "only one clone");
1221             Node* u_clone = u->clone();
1222             int nb = u_clone->replace_edge(n, n_clone);
1223             assert(nb > 0, "should have replaced some uses");
1224             phase->register_new_node(u_clone, projs.catchall_catchproj);
1225             clones.push(u_clone);
1226             phase->set_ctrl(u, projs.fallthrough_catchproj);
1227           } else {
1228             bool replaced = false;
1229             if (u->is_Phi()) {
1230               for (uint k = 1; k < u->req(); k++) {
1231                 if (u->in(k) == n) {
1232                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1233                     phase->igvn().replace_input_of(u, k, n_clone);
1234                     replaced = true;
1235                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1236                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1237                     replaced = true;
1238                   }
1239                 }
1240               }
1241             } else {
1242               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1243                 phase->igvn().rehash_node_delayed(u);
1244                 int nb = u->replace_edge(n, n_clone);
1245                 assert(nb > 0, "should have replaced some uses");
1246                 replaced = true;
1247               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1248                 if (u->is_If()) {
1249                   // Can't break If/Bool/Cmp chain
1250                   assert(n->is_Bool(), "unexpected If shape");
1251                   assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
1252                   assert(n_clone->is_Bool(), "unexpected clone");
1253                   assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
1254                   Node* bol_clone = n->clone();
1255                   Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
1256                   bol_clone->set_req(1, cmp_clone);
1257 
1258                   Node* nn = stack.node_at(stack.size()-3);
1259                   Node* nn_clone = clones.at(clones.size()-3);
1260                   assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
1261 
1262                   int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase));
1263                   assert(nb > 0, "should have replaced some uses");
1264 
1265                   phase->register_new_node(bol_clone, u->in(0));
1266                   phase->register_new_node(cmp_clone, u->in(0));
1267 
1268                   phase->igvn().replace_input_of(u, 1, bol_clone);
1269 
1270                 } else {
1271                   phase->igvn().rehash_node_delayed(u);
1272                   int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1273                   assert(nb > 0, "should have replaced some uses");
1274                 }
1275                 replaced = true;
1276               }
1277             }
1278             if (!replaced) {
1279               stack.set_index(idx+1);
1280             }
1281           }
1282         } else {
1283           stack.pop();
1284           clones.pop();
1285         }
1286       } while (stack.size() > 0);
1287       assert(stack.size() == 0 && clones.size() == 0, "");
1288     }
1289   }
1290 
1291   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1292     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1293     if (lrb->is_redundant()) {
1294       continue;
1295     }
1296     Node* ctrl = phase->get_ctrl(lrb);
1297     IdealLoopTree* loop = phase->get_loop(ctrl);
1298     if (loop->_head->is_OuterStripMinedLoop()) {
1299       // Expanding a barrier here will break loop strip mining
1300       // verification. Transform the loop so the loop nest doesn't
1301       // appear as strip mined.
1302       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1303       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1304     }
1305   }
1306 
1307   // Expand load-reference-barriers
1308   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1309   Unique_Node_List uses_to_ignore;
1310   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1311     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1312     if (lrb->is_redundant()) {
1313       phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1314       continue;
1315     }
1316     uint last = phase->C->unique();
1317     Node* ctrl = phase->get_ctrl(lrb);
1318     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1319 
1320 
1321     Node* orig_ctrl = ctrl;
1322 
1323     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1324     Node* init_raw_mem = raw_mem;
1325     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1326 
1327     IdealLoopTree *loop = phase->get_loop(ctrl);
1328 
1329     Node* heap_stable_ctrl = NULL;
1330     Node* null_ctrl = NULL;
1331 
1332     assert(val->bottom_type()->make_oopptr(), "need oop");
1333     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1334 
1335     enum { _heap_stable = 1, _not_cset, _evac_path, PATH_LIMIT };
1336     Node* region = new RegionNode(PATH_LIMIT);
1337     Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr());
1338     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1339 
1340     // Stable path.
1341     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::HAS_FORWARDED);
1342     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1343 
1344     // Heap stable case
1345     region->init_req(_heap_stable, heap_stable_ctrl);
1346     val_phi->init_req(_heap_stable, val);
1347     raw_mem_phi->init_req(_heap_stable, raw_mem);
1348 
1349     // Test for in-cset.
1350     // Wires !in_cset(obj) to slot 2 of region and phis
1351     Node* not_cset_ctrl = NULL;
1352     test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
1353     if (not_cset_ctrl != NULL) {
1354       region->init_req(_not_cset, not_cset_ctrl);
1355       val_phi->init_req(_not_cset, val);
1356       raw_mem_phi->init_req(_not_cset, raw_mem);
1357     }
1358 
1359     // Resolve object when orig-value is in cset.
1360     // Make the unconditional resolve for fwdptr.
1361 
1362     // Call lrb-stub and wire up that path in slots 4
1363     Node* result_mem = NULL;
1364 
1365     Node* addr;
1366     if (ShenandoahSelfFixing) {
1367       VectorSet visited(Thread::current()->resource_area());
1368       addr = get_load_addr(phase, visited, lrb);
1369     } else {
1370       addr = phase->igvn().zerocon(T_OBJECT);
1371     }
1372     if (addr->Opcode() == Op_AddP) {
1373       Node* orig_base = addr->in(AddPNode::Base);
1374       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), true);
1375       phase->register_new_node(base, ctrl);
1376       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1377         // Field access
1378         addr = addr->clone();
1379         addr->set_req(AddPNode::Base, base);
1380         addr->set_req(AddPNode::Address, base);
1381         phase->register_new_node(addr, ctrl);
1382       } else {
1383         Node* addr2 = addr->in(AddPNode::Address);
1384         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1385               addr2->in(AddPNode::Base) == orig_base) {
1386           addr2 = addr2->clone();
1387           addr2->set_req(AddPNode::Base, base);
1388           addr2->set_req(AddPNode::Address, base);
1389           phase->register_new_node(addr2, ctrl);
1390           addr = addr->clone();
1391           addr->set_req(AddPNode::Base, base);
1392           addr->set_req(AddPNode::Address, addr2);
1393           phase->register_new_node(addr, ctrl);
1394         }
1395       }
1396     }
1397     call_lrb_stub(ctrl, val, addr, result_mem, raw_mem, lrb->is_native(), phase);
1398     region->init_req(_evac_path, ctrl);
1399     val_phi->init_req(_evac_path, val);
1400     raw_mem_phi->init_req(_evac_path, result_mem);
1401 
1402     phase->register_control(region, loop, heap_stable_iff);
1403     Node* out_val = val_phi;
1404     phase->register_new_node(val_phi, region);
1405     phase->register_new_node(raw_mem_phi, region);
1406 
1407     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1408 
1409     ctrl = orig_ctrl;
1410 
1411     phase->igvn().replace_node(lrb, out_val);
1412 
1413     follow_barrier_uses(out_val, ctrl, uses, phase);
1414 
1415     for(uint next = 0; next < uses.size(); next++ ) {
1416       Node *n = uses.at(next);
1417       assert(phase->get_ctrl(n) == ctrl, "bad control");
1418       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1419       phase->set_ctrl(n, region);
1420       follow_barrier_uses(n, ctrl, uses, phase);
1421     }
1422 
1423     // The slow path call produces memory: hook the raw memory phi
1424     // from the expanded load reference barrier with the rest of the graph
1425     // which may require adding memory phis at every post dominated
1426     // region and at enclosing loop heads. Use the memory state
1427     // collected in memory_nodes to fix the memory graph. Update that
1428     // memory state as we go.
1429     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1430   }
1431   // Done expanding load-reference-barriers.
1432   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1433 
1434   for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
1435     Node* barrier = state->enqueue_barrier(i);
1436     Node* pre_val = barrier->in(1);
1437 
1438     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1439       ShouldNotReachHere();
1440       continue;
1441     }
1442 
1443     Node* ctrl = phase->get_ctrl(barrier);
1444 
1445     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1446       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1447       ctrl = ctrl->in(0)->in(0);
1448       phase->set_ctrl(barrier, ctrl);
1449     } else if (ctrl->is_CallRuntime()) {
1450       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1451       ctrl = ctrl->in(0);
1452       phase->set_ctrl(barrier, ctrl);
1453     }
1454 
1455     Node* init_ctrl = ctrl;
1456     IdealLoopTree* loop = phase->get_loop(ctrl);
1457     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1458     Node* init_raw_mem = raw_mem;
1459     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1460     Node* heap_stable_ctrl = NULL;
1461     Node* null_ctrl = NULL;
1462     uint last = phase->C->unique();
1463 
1464     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1465     Node* region = new RegionNode(PATH_LIMIT);
1466     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1467 
1468     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1469     Node* region2 = new RegionNode(PATH_LIMIT2);
1470     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1471 
1472     // Stable path.
1473     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING);
1474     region->init_req(_heap_stable, heap_stable_ctrl);
1475     phi->init_req(_heap_stable, raw_mem);
1476 
1477     // Null path
1478     Node* reg2_ctrl = NULL;
1479     test_null(ctrl, pre_val, null_ctrl, phase);
1480     if (null_ctrl != NULL) {
1481       reg2_ctrl = null_ctrl->in(0);
1482       region2->init_req(_null_path, null_ctrl);
1483       phi2->init_req(_null_path, raw_mem);
1484     } else {
1485       region2->del_req(_null_path);
1486       phi2->del_req(_null_path);
1487     }
1488 
1489     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1490     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1491     Node* thread = new ThreadLocalNode();
1492     phase->register_new_node(thread, ctrl);
1493     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1494     phase->register_new_node(buffer_adr, ctrl);
1495     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1496     phase->register_new_node(index_adr, ctrl);
1497 
1498     BasicType index_bt = TypeX_X->basic_type();
1499     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
1500     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1501     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1502     phase->register_new_node(index, ctrl);
1503     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1504     phase->register_new_node(index_cmp, ctrl);
1505     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1506     phase->register_new_node(index_test, ctrl);
1507     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1508     if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1509     phase->register_control(queue_full_iff, loop, ctrl);
1510     Node* not_full = new IfTrueNode(queue_full_iff);
1511     phase->register_control(not_full, loop, queue_full_iff);
1512     Node* full = new IfFalseNode(queue_full_iff);
1513     phase->register_control(full, loop, queue_full_iff);
1514 
1515     ctrl = not_full;
1516 
1517     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1518     phase->register_new_node(next_index, ctrl);
1519 
1520     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1521     phase->register_new_node(buffer, ctrl);
1522     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1523     phase->register_new_node(log_addr, ctrl);
1524     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1525     phase->register_new_node(log_store, ctrl);
1526     // update the index
1527     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1528     phase->register_new_node(index_update, ctrl);
1529 
1530     // Fast-path case
1531     region2->init_req(_fast_path, ctrl);
1532     phi2->init_req(_fast_path, index_update);
1533 
1534     ctrl = full;
1535 
1536     Node* base = find_bottom_mem(ctrl, phase);
1537 
1538     MergeMemNode* mm = MergeMemNode::make(base);
1539     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1540     phase->register_new_node(mm, ctrl);
1541 
1542     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1543     call->init_req(TypeFunc::Control, ctrl);
1544     call->init_req(TypeFunc::I_O, phase->C->top());
1545     call->init_req(TypeFunc::Memory, mm);
1546     call->init_req(TypeFunc::FramePtr, phase->C->top());
1547     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1548     call->init_req(TypeFunc::Parms, pre_val);
1549     call->init_req(TypeFunc::Parms+1, thread);
1550     phase->register_control(call, loop, ctrl);
1551 
1552     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1553     phase->register_control(ctrl_proj, loop, call);
1554     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1555     phase->register_new_node(mem_proj, call);
1556 
1557     // Slow-path case
1558     region2->init_req(_slow_path, ctrl_proj);
1559     phi2->init_req(_slow_path, mem_proj);
1560 
1561     phase->register_control(region2, loop, reg2_ctrl);
1562     phase->register_new_node(phi2, region2);
1563 
1564     region->init_req(_heap_unstable, region2);
1565     phi->init_req(_heap_unstable, phi2);
1566 
1567     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1568     phase->register_new_node(phi, region);
1569 
1570     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1571     for(uint next = 0; next < uses.size(); next++ ) {
1572       Node *n = uses.at(next);
1573       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1574       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1575       phase->set_ctrl(n, region);
1576       follow_barrier_uses(n, init_ctrl, uses, phase);
1577     }
1578     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1579 
1580     phase->igvn().replace_node(barrier, pre_val);
1581   }
1582   assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1583 
1584 }
1585 
1586 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1587   if (visited.test_set(in->_idx)) {
1588     return NULL;
1589   }
1590   switch (in->Opcode()) {
1591     case Op_Proj:
1592       return get_load_addr(phase, visited, in->in(0));
1593     case Op_CastPP:
1594     case Op_CheckCastPP:
1595     case Op_DecodeN:
1596     case Op_EncodeP:
1597       return get_load_addr(phase, visited, in->in(1));
1598     case Op_LoadN:
1599     case Op_LoadP:
1600       return in->in(MemNode::Address);
1601     case Op_CompareAndExchangeN:
1602     case Op_CompareAndExchangeP:
1603     case Op_GetAndSetN:
1604     case Op_GetAndSetP:
1605     case Op_ShenandoahCompareAndExchangeP:
1606     case Op_ShenandoahCompareAndExchangeN:
1607       // Those instructions would just have stored a different
1608       // value into the field. No use to attempt to fix it at this point.
1609       return phase->igvn().zerocon(T_OBJECT);
1610     case Op_CMoveP:
1611     case Op_CMoveN: {
1612       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1613       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1614       // Handle unambiguous cases: single address reported on both branches.
1615       if (t != NULL && f == NULL) return t;
1616       if (t == NULL && f != NULL) return f;
1617       if (t != NULL && t == f)    return t;
1618       // Ambiguity.
1619       return phase->igvn().zerocon(T_OBJECT);
1620     }
1621     case Op_Phi: {
1622       Node* addr = NULL;
1623       for (uint i = 1; i < in->req(); i++) {
1624         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1625         if (addr == NULL) {
1626           addr = addr1;
1627         }
1628         if (addr != addr1) {
1629           return phase->igvn().zerocon(T_OBJECT);
1630         }
1631       }
1632       return addr;
1633     }
1634     case Op_ShenandoahLoadReferenceBarrier:
1635       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1636     case Op_ShenandoahEnqueueBarrier:
1637       return get_load_addr(phase, visited, in->in(1));
1638     case Op_CallDynamicJava:
1639     case Op_CallLeaf:
1640     case Op_CallStaticJava:
1641     case Op_ConN:
1642     case Op_ConP:
1643     case Op_Parm:
1644     case Op_CreateEx:
1645       return phase->igvn().zerocon(T_OBJECT);
1646     default:
1647 #ifdef ASSERT
1648       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1649 #endif
1650       return phase->igvn().zerocon(T_OBJECT);
1651   }
1652 
1653 }
1654 
1655 void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1656   IdealLoopTree *loop = phase->get_loop(iff);
1657   Node* loop_head = loop->_head;
1658   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1659 
1660   Node* bol = iff->in(1);
1661   Node* cmp = bol->in(1);
1662   Node* andi = cmp->in(1);
1663   Node* load = andi->in(1);
1664 
1665   assert(is_gc_state_load(load), "broken");
1666   if (!phase->is_dominator(load->in(0), entry_c)) {
1667     Node* mem_ctrl = NULL;
1668     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1669     load = load->clone();
1670     load->set_req(MemNode::Memory, mem);
1671     load->set_req(0, entry_c);
1672     phase->register_new_node(load, entry_c);
1673     andi = andi->clone();
1674     andi->set_req(1, load);
1675     phase->register_new_node(andi, entry_c);
1676     cmp = cmp->clone();
1677     cmp->set_req(1, andi);
1678     phase->register_new_node(cmp, entry_c);
1679     bol = bol->clone();
1680     bol->set_req(1, cmp);
1681     phase->register_new_node(bol, entry_c);
1682 
1683     phase->igvn().replace_input_of(iff, 1, bol);
1684   }
1685 }
1686 
1687 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1688   if (!n->is_If() || n->is_CountedLoopEnd()) {
1689     return false;
1690   }
1691   Node* region = n->in(0);
1692 
1693   if (!region->is_Region()) {
1694     return false;
1695   }
1696   Node* dom = phase->idom(region);
1697   if (!dom->is_If()) {
1698     return false;
1699   }
1700 
1701   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1702     return false;
1703   }
1704 
1705   IfNode* dom_if = dom->as_If();
1706   Node* proj_true = dom_if->proj_out(1);
1707   Node* proj_false = dom_if->proj_out(0);
1708 
1709   for (uint i = 1; i < region->req(); i++) {
1710     if (phase->is_dominator(proj_true, region->in(i))) {
1711       continue;
1712     }
1713     if (phase->is_dominator(proj_false, region->in(i))) {
1714       continue;
1715     }
1716     return false;
1717   }
1718 
1719   return true;
1720 }
1721 
1722 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1723   assert(is_heap_stable_test(n), "no other tests");
1724   if (identical_backtoback_ifs(n, phase)) {
1725     Node* n_ctrl = n->in(0);
1726     if (phase->can_split_if(n_ctrl)) {
1727       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1728       if (is_heap_stable_test(n)) {
1729         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1730         assert(is_gc_state_load(gc_state_load), "broken");
1731         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1732         assert(is_gc_state_load(dom_gc_state_load), "broken");
1733         if (gc_state_load != dom_gc_state_load) {
1734           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1735         }
1736       }
1737       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1738       Node* proj_true = dom_if->proj_out(1);
1739       Node* proj_false = dom_if->proj_out(0);
1740       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1741       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1742 
1743       for (uint i = 1; i < n_ctrl->req(); i++) {
1744         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1745           bolphi->init_req(i, con_true);
1746         } else {
1747           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1748           bolphi->init_req(i, con_false);
1749         }
1750       }
1751       phase->register_new_node(bolphi, n_ctrl);
1752       phase->igvn().replace_input_of(n, 1, bolphi);
1753       phase->do_split_if(n);
1754     }
1755   }
1756 }
1757 
1758 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1759   // Find first invariant test that doesn't exit the loop
1760   LoopNode *head = loop->_head->as_Loop();
1761   IfNode* unswitch_iff = NULL;
1762   Node* n = head->in(LoopNode::LoopBackControl);
1763   int loop_has_sfpts = -1;
1764   while (n != head) {
1765     Node* n_dom = phase->idom(n);
1766     if (n->is_Region()) {
1767       if (n_dom->is_If()) {
1768         IfNode* iff = n_dom->as_If();
1769         if (iff->in(1)->is_Bool()) {
1770           BoolNode* bol = iff->in(1)->as_Bool();
1771           if (bol->in(1)->is_Cmp()) {
1772             // If condition is invariant and not a loop exit,
1773             // then found reason to unswitch.
1774             if (is_heap_stable_test(iff) &&
1775                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1776               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1777               if (loop_has_sfpts == -1) {
1778                 for(uint i = 0; i < loop->_body.size(); i++) {
1779                   Node *m = loop->_body[i];
1780                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1781                     loop_has_sfpts = 1;
1782                     break;
1783                   }
1784                 }
1785                 if (loop_has_sfpts == -1) {
1786                   loop_has_sfpts = 0;
1787                 }
1788               }
1789               if (!loop_has_sfpts) {
1790                 unswitch_iff = iff;
1791               }
1792             }
1793           }
1794         }
1795       }
1796     }
1797     n = n_dom;
1798   }
1799   return unswitch_iff;
1800 }
1801 
1802 
1803 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1804   Node_List heap_stable_tests;
1805   stack.push(phase->C->start(), 0);
1806   do {
1807     Node* n = stack.node();
1808     uint i = stack.index();
1809 
1810     if (i < n->outcnt()) {
1811       Node* u = n->raw_out(i);
1812       stack.set_index(i+1);
1813       if (!visited.test_set(u->_idx)) {
1814         stack.push(u, 0);
1815       }
1816     } else {
1817       stack.pop();
1818       if (n->is_If() && is_heap_stable_test(n)) {
1819         heap_stable_tests.push(n);
1820       }
1821     }
1822   } while (stack.size() > 0);
1823 
1824   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1825     Node* n = heap_stable_tests.at(i);
1826     assert(is_heap_stable_test(n), "only evacuation test");
1827     merge_back_to_back_tests(n, phase);
1828   }
1829 
1830   if (!phase->C->major_progress()) {
1831     VectorSet seen(Thread::current()->resource_area());
1832     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1833       Node* n = heap_stable_tests.at(i);
1834       IdealLoopTree* loop = phase->get_loop(n);
1835       if (loop != phase->ltree_root() &&
1836           loop->_child == NULL &&
1837           !loop->_irreducible) {
1838         Node* head = loop->_head;
1839         if (head->is_Loop() &&
1840             (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1841             !seen.test_set(head->_idx)) {
1842           IfNode* iff = find_unswitching_candidate(loop, phase);
1843           if (iff != NULL) {
1844             Node* bol = iff->in(1);
1845             if (head->as_Loop()->is_strip_mined()) {
1846               head->as_Loop()->verify_strip_mined(0);
1847             }
1848             move_gc_state_test_out_of_loop(iff, phase);
1849 
1850             AutoNodeBudget node_budget(phase);
1851 
1852             if (loop->policy_unswitching(phase)) {
1853               if (head->as_Loop()->is_strip_mined()) {
1854                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1855                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1856               }
1857               phase->do_unswitching(loop, old_new);
1858             } else {
1859               // Not proceeding with unswitching. Move load back in
1860               // the loop.
1861               phase->igvn().replace_input_of(iff, 1, bol);
1862             }
1863           }
1864         }
1865       }
1866     }
1867   }
1868 }
1869 
1870 #ifdef ASSERT
1871 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
1872   const bool trace = false;
1873   ResourceMark rm;
1874   Unique_Node_List nodes;
1875   Unique_Node_List controls;
1876   Unique_Node_List memories;
1877 
1878   nodes.push(root);
1879   for (uint next = 0; next < nodes.size(); next++) {
1880     Node *n  = nodes.at(next);
1881     if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
1882       controls.push(n);
1883       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
1884       for (uint next2 = 0; next2 < controls.size(); next2++) {
1885         Node *m = controls.at(next2);
1886         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
1887           Node* u = m->fast_out(i);
1888           if (u->is_CFG() && !u->is_Root() &&
1889               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
1890               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
1891             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
1892             controls.push(u);
1893           }
1894         }
1895       }
1896       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
1897       for (uint next2 = 0; next2 < memories.size(); next2++) {
1898         Node *m = memories.at(next2);
1899         assert(m->bottom_type() == Type::MEMORY, "");
1900         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
1901           Node* u = m->fast_out(i);
1902           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
1903             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
1904             memories.push(u);
1905           } else if (u->is_LoadStore()) {
1906             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
1907             memories.push(u->find_out_with(Op_SCMemProj));
1908           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
1909             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
1910             memories.push(u);
1911           } else if (u->is_Phi()) {
1912             assert(u->bottom_type() == Type::MEMORY, "");
1913             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
1914               assert(controls.member(u->in(0)), "");
1915               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
1916               memories.push(u);
1917             }
1918           } else if (u->is_SafePoint() || u->is_MemBar()) {
1919             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
1920               Node* uu = u->fast_out(j);
1921               if (uu->bottom_type() == Type::MEMORY) {
1922                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
1923                 memories.push(uu);
1924               }
1925             }
1926           }
1927         }
1928       }
1929       for (uint next2 = 0; next2 < controls.size(); next2++) {
1930         Node *m = controls.at(next2);
1931         if (m->is_Region()) {
1932           bool all_in = true;
1933           for (uint i = 1; i < m->req(); i++) {
1934             if (!controls.member(m->in(i))) {
1935               all_in = false;
1936               break;
1937             }
1938           }
1939           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
1940           bool found_phi = false;
1941           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
1942             Node* u = m->fast_out(j);
1943             if (u->is_Phi() && memories.member(u)) {
1944               found_phi = true;
1945               for (uint i = 1; i < u->req() && found_phi; i++) {
1946                 Node* k = u->in(i);
1947                 if (memories.member(k) != controls.member(m->in(i))) {
1948                   found_phi = false;
1949                 }
1950               }
1951             }
1952           }
1953           assert(found_phi || all_in, "");
1954         }
1955       }
1956       controls.clear();
1957       memories.clear();
1958     }
1959     for( uint i = 0; i < n->len(); ++i ) {
1960       Node *m = n->in(i);
1961       if (m != NULL) {
1962         nodes.push(m);
1963       }
1964     }
1965   }
1966 }
1967 #endif
1968 
1969 ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
1970   ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
1971 }
1972 
1973 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
1974   if (in(1) == NULL || in(1)->is_top()) {
1975     return Type::TOP;
1976   }
1977   const Type* t = in(1)->bottom_type();
1978   if (t == TypePtr::NULL_PTR) {
1979     return t;
1980   }
1981   return t->is_oopptr();
1982 }
1983 
1984 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
1985   if (in(1) == NULL) {
1986     return Type::TOP;
1987   }
1988   const Type* t = phase->type(in(1));
1989   if (t == Type::TOP) {
1990     return Type::TOP;
1991   }
1992   if (t == TypePtr::NULL_PTR) {
1993     return t;
1994   }
1995   return t->is_oopptr();
1996 }
1997 
1998 int ShenandoahEnqueueBarrierNode::needed(Node* n) {
1999   if (n == NULL ||
2000       n->is_Allocate() ||
2001       n->Opcode() == Op_ShenandoahEnqueueBarrier ||
2002       n->bottom_type() == TypePtr::NULL_PTR ||
2003       (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2004     return NotNeeded;
2005   }
2006   if (n->is_Phi() ||
2007       n->is_CMove()) {
2008     return MaybeNeeded;
2009   }
2010   return Needed;
2011 }
2012 
2013 Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
2014   for (;;) {
2015     if (n == NULL) {
2016       return n;
2017     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2018       return n;
2019     } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2020       return n;
2021     } else if (n->is_ConstraintCast() ||
2022                n->Opcode() == Op_DecodeN ||
2023                n->Opcode() == Op_EncodeP) {
2024       n = n->in(1);
2025     } else if (n->is_Proj()) {
2026       n = n->in(0);
2027     } else {
2028       return n;
2029     }
2030   }
2031   ShouldNotReachHere();
2032   return NULL;
2033 }
2034 
2035 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
2036   PhaseIterGVN* igvn = phase->is_IterGVN();
2037 
2038   Node* n = next(in(1));
2039 
2040   int cont = needed(n);
2041 
2042   if (cont == NotNeeded) {
2043     return in(1);
2044   } else if (cont == MaybeNeeded) {
2045     if (igvn == NULL) {
2046       phase->record_for_igvn(this);
2047       return this;
2048     } else {
2049       ResourceMark rm;
2050       Unique_Node_List wq;
2051       uint wq_i = 0;
2052 
2053       for (;;) {
2054         if (n->is_Phi()) {
2055           for (uint i = 1; i < n->req(); i++) {
2056             Node* m = n->in(i);
2057             if (m != NULL) {
2058               wq.push(m);
2059             }
2060           }
2061         } else {
2062           assert(n->is_CMove(), "nothing else here");
2063           Node* m = n->in(CMoveNode::IfFalse);
2064           wq.push(m);
2065           m = n->in(CMoveNode::IfTrue);
2066           wq.push(m);
2067         }
2068         Node* orig_n = NULL;
2069         do {
2070           if (wq_i >= wq.size()) {
2071             return in(1);
2072           }
2073           n = wq.at(wq_i);
2074           wq_i++;
2075           orig_n = n;
2076           n = next(n);
2077           cont = needed(n);
2078           if (cont == Needed) {
2079             return this;
2080           }
2081         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2082       }
2083     }
2084   }
2085 
2086   return this;
2087 }
2088 
2089 #ifdef ASSERT
2090 static bool has_never_branch(Node* root) {
2091   for (uint i = 1; i < root->req(); i++) {
2092     Node* in = root->in(i);
2093     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2094       return true;
2095     }
2096   }
2097   return false;
2098 }
2099 #endif
2100 
2101 void MemoryGraphFixer::collect_memory_nodes() {
2102   Node_Stack stack(0);
2103   VectorSet visited(Thread::current()->resource_area());
2104   Node_List regions;
2105 
2106   // Walk the raw memory graph and create a mapping from CFG node to
2107   // memory node. Exclude phis for now.
2108   stack.push(_phase->C->root(), 1);
2109   do {
2110     Node* n = stack.node();
2111     int opc = n->Opcode();
2112     uint i = stack.index();
2113     if (i < n->req()) {
2114       Node* mem = NULL;
2115       if (opc == Op_Root) {
2116         Node* in = n->in(i);
2117         int in_opc = in->Opcode();
2118         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2119           mem = in->in(TypeFunc::Memory);
2120         } else if (in_opc == Op_Halt) {
2121           if (in->in(0)->is_Region()) {
2122             Node* r = in->in(0);
2123             for (uint j = 1; j < r->req(); j++) {
2124               assert(r->in(j)->Opcode() != Op_NeverBranch, "");
2125             }
2126           } else {
2127             Node* proj = in->in(0);
2128             assert(proj->is_Proj(), "");
2129             Node* in = proj->in(0);
2130             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2131             if (in->is_CallStaticJava()) {
2132               mem = in->in(TypeFunc::Memory);
2133             } else if (in->Opcode() == Op_Catch) {
2134               Node* call = in->in(0)->in(0);
2135               assert(call->is_Call(), "");
2136               mem = call->in(TypeFunc::Memory);
2137             } else if (in->Opcode() == Op_NeverBranch) {
2138               Node* head = in->in(0);
2139               assert(head->is_Region(), "unexpected infinite loop graph shape");
2140 
2141               Node* phi_mem = NULL;
2142               for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
2143                 Node* u = head->fast_out(j);
2144                 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
2145                   if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2146                     assert(phi_mem == NULL || phi_mem->adr_type() == TypePtr::BOTTOM, "");
2147                     phi_mem = u;
2148                   } else if (u->adr_type() == TypePtr::BOTTOM) {
2149                     assert(phi_mem == NULL || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
2150                     if (phi_mem == NULL) {
2151                       phi_mem = u;
2152                     }
2153                   }
2154                 }
2155               }
2156               if (phi_mem == NULL) {
2157                 for (uint j = 1; j < head->req(); j++) {
2158                   Node* tail = head->in(j);
2159                   if (!_phase->is_dominator(head, tail)) {
2160                     continue;
2161                   }
2162                   Node* c = tail;
2163                   while (c != head) {
2164                     if (c->is_SafePoint() && !c->is_CallLeaf()) {
2165                       Node* m =c->in(TypeFunc::Memory);
2166                       if (m->is_MergeMem()) {
2167                         m = m->as_MergeMem()->memory_at(_alias);
2168                       }
2169                       assert(mem == NULL || mem == m, "several memory states");
2170                       mem = m;
2171                     }
2172                     c = _phase->idom(c);
2173                   }
2174                   assert(mem != NULL, "should have found safepoint");
2175                 }
2176                 assert(mem != NULL, "should have found safepoint");
2177               } else {
2178                 mem = phi_mem;
2179               }
2180             }
2181           }
2182         } else {
2183 #ifdef ASSERT
2184           n->dump();
2185           in->dump();
2186 #endif
2187           ShouldNotReachHere();
2188         }
2189       } else {
2190         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2191         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2192         mem = n->in(i);
2193       }
2194       i++;
2195       stack.set_index(i);
2196       if (mem == NULL) {
2197         continue;
2198       }
2199       for (;;) {
2200         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2201           break;
2202         }
2203         if (mem->is_Phi()) {
2204           stack.push(mem, 2);
2205           mem = mem->in(1);
2206         } else if (mem->is_Proj()) {
2207           stack.push(mem, mem->req());
2208           mem = mem->in(0);
2209         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2210           mem = mem->in(TypeFunc::Memory);
2211         } else if (mem->is_MergeMem()) {
2212           MergeMemNode* mm = mem->as_MergeMem();
2213           mem = mm->memory_at(_alias);
2214         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2215           assert(_alias == Compile::AliasIdxRaw, "");
2216           stack.push(mem, mem->req());
2217           mem = mem->in(MemNode::Memory);
2218         } else {
2219 #ifdef ASSERT
2220           mem->dump();
2221 #endif
2222           ShouldNotReachHere();
2223         }
2224       }
2225     } else {
2226       if (n->is_Phi()) {
2227         // Nothing
2228       } else if (!n->is_Root()) {
2229         Node* c = get_ctrl(n);
2230         _memory_nodes.map(c->_idx, n);
2231       }
2232       stack.pop();
2233     }
2234   } while(stack.is_nonempty());
2235 
2236   // Iterate over CFG nodes in rpo and propagate memory state to
2237   // compute memory state at regions, creating new phis if needed.
2238   Node_List rpo_list;
2239   visited.clear();
2240   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2241   Node* root = rpo_list.pop();
2242   assert(root == _phase->C->root(), "");
2243 
2244   const bool trace = false;
2245 #ifdef ASSERT
2246   if (trace) {
2247     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2248       Node* c = rpo_list.at(i);
2249       if (_memory_nodes[c->_idx] != NULL) {
2250         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2251       }
2252     }
2253   }
2254 #endif
2255   uint last = _phase->C->unique();
2256 
2257 #ifdef ASSERT
2258   uint8_t max_depth = 0;
2259   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2260     IdealLoopTree* lpt = iter.current();
2261     max_depth = MAX2(max_depth, lpt->_nest);
2262   }
2263 #endif
2264 
2265   bool progress = true;
2266   int iteration = 0;
2267   Node_List dead_phis;
2268   while (progress) {
2269     progress = false;
2270     iteration++;
2271     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2272     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2273 
2274     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2275       Node* c = rpo_list.at(i);
2276 
2277       Node* prev_mem = _memory_nodes[c->_idx];
2278       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2279         Node* prev_region = regions[c->_idx];
2280         Node* unique = NULL;
2281         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2282           Node* m = _memory_nodes[c->in(j)->_idx];
2283           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2284           if (m != NULL) {
2285             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2286               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2287               // continue
2288             } else if (unique == NULL) {
2289               unique = m;
2290             } else if (m == unique) {
2291               // continue
2292             } else {
2293               unique = NodeSentinel;
2294             }
2295           }
2296         }
2297         assert(unique != NULL, "empty phi???");
2298         if (unique != NodeSentinel) {
2299           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2300             dead_phis.push(prev_region);
2301           }
2302           regions.map(c->_idx, unique);
2303         } else {
2304           Node* phi = NULL;
2305           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2306             phi = prev_region;
2307             for (uint k = 1; k < c->req(); k++) {
2308               Node* m = _memory_nodes[c->in(k)->_idx];
2309               assert(m != NULL, "expect memory state");
2310               phi->set_req(k, m);
2311             }
2312           } else {
2313             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2314               Node* u = c->fast_out(j);
2315               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2316                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2317                 phi = u;
2318                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2319                   Node* m = _memory_nodes[c->in(k)->_idx];
2320                   assert(m != NULL, "expect memory state");
2321                   if (u->in(k) != m) {
2322                     phi = NULL;
2323                   }
2324                 }
2325               }
2326             }
2327             if (phi == NULL) {
2328               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2329               for (uint k = 1; k < c->req(); k++) {
2330                 Node* m = _memory_nodes[c->in(k)->_idx];
2331                 assert(m != NULL, "expect memory state");
2332                 phi->init_req(k, m);
2333               }
2334             }
2335           }
2336           assert(phi != NULL, "");
2337           regions.map(c->_idx, phi);
2338         }
2339         Node* current_region = regions[c->_idx];
2340         if (current_region != prev_region) {
2341           progress = true;
2342           if (prev_region == prev_mem) {
2343             _memory_nodes.map(c->_idx, current_region);
2344           }
2345         }
2346       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2347         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2348         assert(m != NULL, "expect memory state");
2349         if (m != prev_mem) {
2350           _memory_nodes.map(c->_idx, m);
2351           progress = true;
2352         }
2353       }
2354 #ifdef ASSERT
2355       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2356 #endif
2357     }
2358   }
2359 
2360   // Replace existing phi with computed memory state for that region
2361   // if different (could be a new phi or a dominating memory node if
2362   // that phi was found to be useless).
2363   while (dead_phis.size() > 0) {
2364     Node* n = dead_phis.pop();
2365     n->replace_by(_phase->C->top());
2366     n->destruct();
2367   }
2368   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2369     Node* c = rpo_list.at(i);
2370     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2371       Node* n = regions[c->_idx];
2372       if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2373         _phase->register_new_node(n, c);
2374       }
2375     }
2376   }
2377   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2378     Node* c = rpo_list.at(i);
2379     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2380       Node* n = regions[c->_idx];
2381       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2382         Node* u = c->fast_out(i);
2383         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2384             u != n) {
2385           if (u->adr_type() == TypePtr::BOTTOM) {
2386             fix_memory_uses(u, n, n, c);
2387           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2388             _phase->lazy_replace(u, n);
2389             --i; --imax;
2390           }
2391         }
2392       }
2393     }
2394   }
2395 }
2396 
2397 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2398   Node* c = _phase->get_ctrl(n);
2399   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2400     assert(c == n->in(0), "");
2401     CallNode* call = c->as_Call();
2402     CallProjections projs;
2403     call->extract_projections(&projs, true, false);
2404     if (projs.catchall_memproj != NULL) {
2405       if (projs.fallthrough_memproj == n) {
2406         c = projs.fallthrough_catchproj;
2407       } else {
2408         assert(projs.catchall_memproj == n, "");
2409         c = projs.catchall_catchproj;
2410       }
2411     }
2412   }
2413   return c;
2414 }
2415 
2416 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2417   if (_phase->has_ctrl(n))
2418     return get_ctrl(n);
2419   else {
2420     assert (n->is_CFG(), "must be a CFG node");
2421     return n;
2422   }
2423 }
2424 
2425 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2426   return m != NULL && get_ctrl(m) == c;
2427 }
2428 
2429 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2430   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2431   Node* mem = _memory_nodes[ctrl->_idx];
2432   Node* c = ctrl;
2433   while (!mem_is_valid(mem, c) &&
2434          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2435     c = _phase->idom(c);
2436     mem = _memory_nodes[c->_idx];
2437   }
2438   if (n != NULL && mem_is_valid(mem, c)) {
2439     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2440       mem = next_mem(mem, _alias);
2441     }
2442     if (mem->is_MergeMem()) {
2443       mem = mem->as_MergeMem()->memory_at(_alias);
2444     }
2445     if (!mem_is_valid(mem, c)) {
2446       do {
2447         c = _phase->idom(c);
2448         mem = _memory_nodes[c->_idx];
2449       } while (!mem_is_valid(mem, c) &&
2450                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2451     }
2452   }
2453   assert(mem->bottom_type() == Type::MEMORY, "");
2454   return mem;
2455 }
2456 
2457 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2458   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2459     Node* use = region->fast_out(i);
2460     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2461         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2462       return true;
2463     }
2464   }
2465   return false;
2466 }
2467 
2468 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2469   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2470   const bool trace = false;
2471   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2472   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2473   GrowableArray<Node*> phis;
2474   if (mem_for_ctrl != mem) {
2475     Node* old = mem_for_ctrl;
2476     Node* prev = NULL;
2477     while (old != mem) {
2478       prev = old;
2479       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2480         assert(_alias == Compile::AliasIdxRaw, "");
2481         old = old->in(MemNode::Memory);
2482       } else if (old->Opcode() == Op_SCMemProj) {
2483         assert(_alias == Compile::AliasIdxRaw, "");
2484         old = old->in(0);
2485       } else {
2486         ShouldNotReachHere();
2487       }
2488     }
2489     assert(prev != NULL, "");
2490     if (new_ctrl != ctrl) {
2491       _memory_nodes.map(ctrl->_idx, mem);
2492       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2493     }
2494     uint input = (uint)MemNode::Memory;
2495     _phase->igvn().replace_input_of(prev, input, new_mem);
2496   } else {
2497     uses.clear();
2498     _memory_nodes.map(new_ctrl->_idx, new_mem);
2499     uses.push(new_ctrl);
2500     for(uint next = 0; next < uses.size(); next++ ) {
2501       Node *n = uses.at(next);
2502       assert(n->is_CFG(), "");
2503       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2504       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2505         Node* u = n->fast_out(i);
2506         if (!u->is_Root() && u->is_CFG() && u != n) {
2507           Node* m = _memory_nodes[u->_idx];
2508           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2509               !has_mem_phi(u) &&
2510               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2511             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2512             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2513 
2514             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2515               bool push = true;
2516               bool create_phi = true;
2517               if (_phase->is_dominator(new_ctrl, u)) {
2518                 create_phi = false;
2519               }
2520               if (create_phi) {
2521                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2522                 _phase->register_new_node(phi, u);
2523                 phis.push(phi);
2524                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2525                 if (!mem_is_valid(m, u)) {
2526                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2527                   _memory_nodes.map(u->_idx, phi);
2528                 } else {
2529                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2530                   for (;;) {
2531                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2532                     Node* next = NULL;
2533                     if (m->is_Proj()) {
2534                       next = m->in(0);
2535                     } else {
2536                       assert(m->is_Mem() || m->is_LoadStore(), "");
2537                       assert(_alias == Compile::AliasIdxRaw, "");
2538                       next = m->in(MemNode::Memory);
2539                     }
2540                     if (_phase->get_ctrl(next) != u) {
2541                       break;
2542                     }
2543                     if (next->is_MergeMem()) {
2544                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2545                       break;
2546                     }
2547                     if (next->is_Phi()) {
2548                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2549                       break;
2550                     }
2551                     m = next;
2552                   }
2553 
2554                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2555                   assert(m->is_Mem() || m->is_LoadStore(), "");
2556                   uint input = (uint)MemNode::Memory;
2557                   _phase->igvn().replace_input_of(m, input, phi);
2558                   push = false;
2559                 }
2560               } else {
2561                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2562               }
2563               if (push) {
2564                 uses.push(u);
2565               }
2566             }
2567           } else if (!mem_is_valid(m, u) &&
2568                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2569             uses.push(u);
2570           }
2571         }
2572       }
2573     }
2574     for (int i = 0; i < phis.length(); i++) {
2575       Node* n = phis.at(i);
2576       Node* r = n->in(0);
2577       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2578       for (uint j = 1; j < n->req(); j++) {
2579         Node* m = find_mem(r->in(j), NULL);
2580         _phase->igvn().replace_input_of(n, j, m);
2581         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2582       }
2583     }
2584   }
2585   uint last = _phase->C->unique();
2586   MergeMemNode* mm = NULL;
2587   int alias = _alias;
2588   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2589   // Process loads first to not miss an anti-dependency: if the memory
2590   // edge of a store is updated before a load is processed then an
2591   // anti-dependency may be missed.
2592   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2593     Node* u = mem->out(i);
2594     if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2595       Node* m = find_mem(_phase->get_ctrl(u), u);
2596       if (m != mem) {
2597         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2598         _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2599         --i;
2600       }
2601     }
2602   }
2603   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2604     Node* u = mem->out(i);
2605     if (u->_idx < last) {
2606       if (u->is_Mem()) {
2607         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2608           Node* m = find_mem(_phase->get_ctrl(u), u);
2609           if (m != mem) {
2610             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2611             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2612             --i;
2613           }
2614         }
2615       } else if (u->is_MergeMem()) {
2616         MergeMemNode* u_mm = u->as_MergeMem();
2617         if (u_mm->memory_at(alias) == mem) {
2618           MergeMemNode* newmm = NULL;
2619           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2620             Node* uu = u->fast_out(j);
2621             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2622             if (uu->is_Phi()) {
2623               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2624               Node* region = uu->in(0);
2625               int nb = 0;
2626               for (uint k = 1; k < uu->req(); k++) {
2627                 if (uu->in(k) == u) {
2628                   Node* m = find_mem(region->in(k), NULL);
2629                   if (m != mem) {
2630                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2631                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2632                     if (newmm != u) {
2633                       _phase->igvn().replace_input_of(uu, k, newmm);
2634                       nb++;
2635                       --jmax;
2636                     }
2637                   }
2638                 }
2639               }
2640               if (nb > 0) {
2641                 --j;
2642               }
2643             } else {
2644               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2645               if (m != mem) {
2646                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2647                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2648                 if (newmm != u) {
2649                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2650                   --j, --jmax;
2651                 }
2652               }
2653             }
2654           }
2655         }
2656       } else if (u->is_Phi()) {
2657         assert(u->bottom_type() == Type::MEMORY, "what else?");
2658         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2659           Node* region = u->in(0);
2660           bool replaced = false;
2661           for (uint j = 1; j < u->req(); j++) {
2662             if (u->in(j) == mem) {
2663               Node* m = find_mem(region->in(j), NULL);
2664               Node* nnew = m;
2665               if (m != mem) {
2666                 if (u->adr_type() == TypePtr::BOTTOM) {
2667                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2668                   nnew = mm;
2669                 }
2670                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2671                 _phase->igvn().replace_input_of(u, j, nnew);
2672                 replaced = true;
2673               }
2674             }
2675           }
2676           if (replaced) {
2677             --i;
2678           }
2679         }
2680       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2681                  u->adr_type() == NULL) {
2682         assert(u->adr_type() != NULL ||
2683                u->Opcode() == Op_Rethrow ||
2684                u->Opcode() == Op_Return ||
2685                u->Opcode() == Op_SafePoint ||
2686                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2687                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2688                u->Opcode() == Op_CallLeaf, "");
2689         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2690         if (m != mem) {
2691           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2692           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2693           --i;
2694         }
2695       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2696         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2697         if (m != mem) {
2698           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2699           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2700           --i;
2701         }
2702       } else if (u->adr_type() != TypePtr::BOTTOM &&
2703                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2704         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2705         assert(m != mem, "");
2706         // u is on the wrong slice...
2707         assert(u->is_ClearArray(), "");
2708         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2709         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2710         --i;
2711       }
2712     }
2713   }
2714 #ifdef ASSERT
2715   assert(new_mem->outcnt() > 0, "");
2716   for (int i = 0; i < phis.length(); i++) {
2717     Node* n = phis.at(i);
2718     assert(n->outcnt() > 0, "new phi must have uses now");
2719   }
2720 #endif
2721 }
2722 
2723 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2724   MergeMemNode* mm = MergeMemNode::make(mem);
2725   mm->set_memory_at(_alias, rep_proj);
2726   _phase->register_new_node(mm, rep_ctrl);
2727   return mm;
2728 }
2729 
2730 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2731   MergeMemNode* newmm = NULL;
2732   MergeMemNode* u_mm = u->as_MergeMem();
2733   Node* c = _phase->get_ctrl(u);
2734   if (_phase->is_dominator(c, rep_ctrl)) {
2735     c = rep_ctrl;
2736   } else {
2737     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2738   }
2739   if (u->outcnt() == 1) {
2740     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2741       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2742       --i;
2743     } else {
2744       _phase->igvn().rehash_node_delayed(u);
2745       u_mm->set_memory_at(_alias, rep_proj);
2746     }
2747     newmm = u_mm;
2748     _phase->set_ctrl_and_loop(u, c);
2749   } else {
2750     // can't simply clone u and then change one of its input because
2751     // it adds and then removes an edge which messes with the
2752     // DUIterator
2753     newmm = MergeMemNode::make(u_mm->base_memory());
2754     for (uint j = 0; j < u->req(); j++) {
2755       if (j < newmm->req()) {
2756         if (j == (uint)_alias) {
2757           newmm->set_req(j, rep_proj);
2758         } else if (newmm->in(j) != u->in(j)) {
2759           newmm->set_req(j, u->in(j));
2760         }
2761       } else if (j == (uint)_alias) {
2762         newmm->add_req(rep_proj);
2763       } else {
2764         newmm->add_req(u->in(j));
2765       }
2766     }
2767     if ((uint)_alias >= u->req()) {
2768       newmm->set_memory_at(_alias, rep_proj);
2769     }
2770     _phase->register_new_node(newmm, c);
2771   }
2772   return newmm;
2773 }
2774 
2775 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2776   if (phi->adr_type() == TypePtr::BOTTOM) {
2777     Node* region = phi->in(0);
2778     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2779       Node* uu = region->fast_out(j);
2780       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2781         return false;
2782       }
2783     }
2784     return true;
2785   }
2786   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2787 }
2788 
2789 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2790   uint last = _phase-> C->unique();
2791   MergeMemNode* mm = NULL;
2792   assert(mem->bottom_type() == Type::MEMORY, "");
2793   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2794     Node* u = mem->out(i);
2795     if (u != replacement && u->_idx < last) {
2796       if (u->is_MergeMem()) {
2797         MergeMemNode* u_mm = u->as_MergeMem();
2798         if (u_mm->memory_at(_alias) == mem) {
2799           MergeMemNode* newmm = NULL;
2800           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2801             Node* uu = u->fast_out(j);
2802             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2803             if (uu->is_Phi()) {
2804               if (should_process_phi(uu)) {
2805                 Node* region = uu->in(0);
2806                 int nb = 0;
2807                 for (uint k = 1; k < uu->req(); k++) {
2808                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2809                     if (newmm == NULL) {
2810                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2811                     }
2812                     if (newmm != u) {
2813                       _phase->igvn().replace_input_of(uu, k, newmm);
2814                       nb++;
2815                       --jmax;
2816                     }
2817                   }
2818                 }
2819                 if (nb > 0) {
2820                   --j;
2821                 }
2822               }
2823             } else {
2824               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2825                 if (newmm == NULL) {
2826                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2827                 }
2828                 if (newmm != u) {
2829                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2830                   --j, --jmax;
2831                 }
2832               }
2833             }
2834           }
2835         }
2836       } else if (u->is_Phi()) {
2837         assert(u->bottom_type() == Type::MEMORY, "what else?");
2838         Node* region = u->in(0);
2839         if (should_process_phi(u)) {
2840           bool replaced = false;
2841           for (uint j = 1; j < u->req(); j++) {
2842             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2843               Node* nnew = rep_proj;
2844               if (u->adr_type() == TypePtr::BOTTOM) {
2845                 if (mm == NULL) {
2846                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2847                 }
2848                 nnew = mm;
2849               }
2850               _phase->igvn().replace_input_of(u, j, nnew);
2851               replaced = true;
2852             }
2853           }
2854           if (replaced) {
2855             --i;
2856           }
2857 
2858         }
2859       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2860                  u->adr_type() == NULL) {
2861         assert(u->adr_type() != NULL ||
2862                u->Opcode() == Op_Rethrow ||
2863                u->Opcode() == Op_Return ||
2864                u->Opcode() == Op_SafePoint ||
2865                u->Opcode() == Op_StoreIConditional ||
2866                u->Opcode() == Op_StoreLConditional ||
2867                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2868                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2869                u->Opcode() == Op_CallLeaf, "%s", u->Name());
2870         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2871           if (mm == NULL) {
2872             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2873           }
2874           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2875           --i;
2876         }
2877       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2878         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2879           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2880           --i;
2881         }
2882       }
2883     }
2884   }
2885 }
2886 
2887 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, bool native)
2888 : Node(ctrl, obj), _native(native) {
2889   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
2890 }
2891 
2892 bool ShenandoahLoadReferenceBarrierNode::is_native() const {
2893   return _native;
2894 }
2895 
2896 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
2897   return sizeof(*this);
2898 }
2899 
2900 uint ShenandoahLoadReferenceBarrierNode::hash() const {
2901   return Node::hash() + (_native ? 1 : 0);
2902 }
2903 
2904 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
2905   return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
2906          _native == ((const ShenandoahLoadReferenceBarrierNode&)n)._native;
2907 }
2908 
2909 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2910   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
2911     return Type::TOP;
2912   }
2913   const Type* t = in(ValueIn)->bottom_type();
2914   if (t == TypePtr::NULL_PTR) {
2915     return t;
2916   }
2917   return t->is_oopptr();
2918 }
2919 
2920 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
2921   // Either input is TOP ==> the result is TOP
2922   const Type *t2 = phase->type(in(ValueIn));
2923   if( t2 == Type::TOP ) return Type::TOP;
2924 
2925   if (t2 == TypePtr::NULL_PTR) {
2926     return t2;
2927   }
2928 
2929   const Type* type = t2->is_oopptr();
2930   return type;
2931 }
2932 
2933 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
2934   Node* value = in(ValueIn);
2935   if (!needs_barrier(phase, value)) {
2936     return value;
2937   }
2938   return this;
2939 }
2940 
2941 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
2942   Unique_Node_List visited;
2943   return needs_barrier_impl(phase, n, visited);
2944 }
2945 
2946 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
2947   if (n == NULL) return false;
2948   if (visited.member(n)) {
2949     return false; // Been there.
2950   }
2951   visited.push(n);
2952 
2953   if (n->is_Allocate()) {
2954     // tty->print_cr("optimize barrier on alloc");
2955     return false;
2956   }
2957   if (n->is_Call()) {
2958     // tty->print_cr("optimize barrier on call");
2959     return false;
2960   }
2961 
2962   const Type* type = phase->type(n);
2963   if (type == Type::TOP) {
2964     return false;
2965   }
2966   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
2967     // tty->print_cr("optimize barrier on null");
2968     return false;
2969   }
2970   if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
2971     // tty->print_cr("optimize barrier on constant");
2972     return false;
2973   }
2974 
2975   switch (n->Opcode()) {
2976     case Op_AddP:
2977       return true; // TODO: Can refine?
2978     case Op_LoadP:
2979     case Op_ShenandoahCompareAndExchangeN:
2980     case Op_ShenandoahCompareAndExchangeP:
2981     case Op_CompareAndExchangeN:
2982     case Op_CompareAndExchangeP:
2983     case Op_GetAndSetN:
2984     case Op_GetAndSetP:
2985       return true;
2986     case Op_Phi: {
2987       for (uint i = 1; i < n->req(); i++) {
2988         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
2989       }
2990       return false;
2991     }
2992     case Op_CheckCastPP:
2993     case Op_CastPP:
2994       return needs_barrier_impl(phase, n->in(1), visited);
2995     case Op_Proj:
2996       return needs_barrier_impl(phase, n->in(0), visited);
2997     case Op_ShenandoahLoadReferenceBarrier:
2998       // tty->print_cr("optimize barrier on barrier");
2999       return false;
3000     case Op_Parm:
3001       // tty->print_cr("optimize barrier on input arg");
3002       return false;
3003     case Op_DecodeN:
3004     case Op_EncodeP:
3005       return needs_barrier_impl(phase, n->in(1), visited);
3006     case Op_LoadN:
3007       return true;
3008     case Op_CMoveN:
3009     case Op_CMoveP:
3010       return needs_barrier_impl(phase, n->in(2), visited) ||
3011              needs_barrier_impl(phase, n->in(3), visited);
3012     case Op_ShenandoahEnqueueBarrier:
3013       return needs_barrier_impl(phase, n->in(1), visited);
3014     case Op_CreateEx:
3015       return false;
3016     default:
3017       break;
3018   }
3019 #ifdef ASSERT
3020   tty->print("need barrier on?: ");
3021   tty->print_cr("ins:");
3022   n->dump(2);
3023   tty->print_cr("outs:");
3024   n->dump(-2);
3025   ShouldNotReachHere();
3026 #endif
3027   return true;
3028 }
3029 
3030 bool ShenandoahLoadReferenceBarrierNode::is_redundant() {
3031   Unique_Node_List visited;
3032   Node_Stack stack(0);
3033   stack.push(this, 0);
3034 
3035   // Check if the barrier is actually useful: go over nodes looking for useful uses
3036   // (e.g. memory accesses). Stop once we detected a required use. Otherwise, walk
3037   // until we ran out of nodes, and then declare the barrier redundant.
3038   while (stack.size() > 0) {
3039     Node* n = stack.node();
3040     if (visited.member(n)) {
3041       stack.pop();
3042       continue;
3043     }
3044     visited.push(n);
3045     bool visit_users = false;
3046     switch (n->Opcode()) {
3047       case Op_CallStaticJava:
3048       case Op_CallDynamicJava:
3049       case Op_CallLeaf:
3050       case Op_CallLeafNoFP:
3051       case Op_CompareAndSwapL:
3052       case Op_CompareAndSwapI:
3053       case Op_CompareAndSwapB:
3054       case Op_CompareAndSwapS:
3055       case Op_CompareAndSwapN:
3056       case Op_CompareAndSwapP:
3057       case Op_CompareAndExchangeL:
3058       case Op_CompareAndExchangeI:
3059       case Op_CompareAndExchangeB:
3060       case Op_CompareAndExchangeS:
3061       case Op_CompareAndExchangeN:
3062       case Op_CompareAndExchangeP:
3063       case Op_WeakCompareAndSwapL:
3064       case Op_WeakCompareAndSwapI:
3065       case Op_WeakCompareAndSwapB:
3066       case Op_WeakCompareAndSwapS:
3067       case Op_WeakCompareAndSwapN:
3068       case Op_WeakCompareAndSwapP:
3069       case Op_ShenandoahCompareAndSwapN:
3070       case Op_ShenandoahCompareAndSwapP:
3071       case Op_ShenandoahWeakCompareAndSwapN:
3072       case Op_ShenandoahWeakCompareAndSwapP:
3073       case Op_ShenandoahCompareAndExchangeN:
3074       case Op_ShenandoahCompareAndExchangeP:
3075       case Op_GetAndSetL:
3076       case Op_GetAndSetI:
3077       case Op_GetAndSetB:
3078       case Op_GetAndSetS:
3079       case Op_GetAndSetP:
3080       case Op_GetAndSetN:
3081       case Op_GetAndAddL:
3082       case Op_GetAndAddI:
3083       case Op_GetAndAddB:
3084       case Op_GetAndAddS:
3085       case Op_ShenandoahEnqueueBarrier:
3086       case Op_FastLock:
3087       case Op_FastUnlock:
3088       case Op_Rethrow:
3089       case Op_Return:
3090       case Op_StoreB:
3091       case Op_StoreC:
3092       case Op_StoreD:
3093       case Op_StoreF:
3094       case Op_StoreL:
3095       case Op_StoreLConditional:
3096       case Op_StoreI:
3097       case Op_StoreIConditional:
3098       case Op_StoreN:
3099       case Op_StoreP:
3100       case Op_StoreVector:
3101       case Op_StrInflatedCopy:
3102       case Op_StrCompressedCopy:
3103       case Op_EncodeP:
3104       case Op_CastP2X:
3105       case Op_SafePoint:
3106       case Op_EncodeISOArray:
3107       case Op_AryEq:
3108       case Op_StrEquals:
3109       case Op_StrComp:
3110       case Op_StrIndexOf:
3111       case Op_StrIndexOfChar:
3112       case Op_HasNegatives:
3113         // Known to require barriers
3114         return false;
3115       case Op_CmpP: {
3116         if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) ||
3117             n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3118           // One of the sides is known null, no need for barrier.
3119         } else {
3120           return false;
3121         }
3122         break;
3123       }
3124       case Op_LoadB:
3125       case Op_LoadUB:
3126       case Op_LoadUS:
3127       case Op_LoadD:
3128       case Op_LoadF:
3129       case Op_LoadL:
3130       case Op_LoadI:
3131       case Op_LoadS:
3132       case Op_LoadN:
3133       case Op_LoadP:
3134       case Op_LoadVector: {
3135         const TypePtr* adr_type = n->adr_type();
3136         int alias_idx = Compile::current()->get_alias_index(adr_type);
3137         Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3138         ciField* field = alias_type->field();
3139         bool is_static = field != NULL && field->is_static();
3140         bool is_final = field != NULL && field->is_final();
3141 
3142         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3143           // Loading the constant does not require barriers: it should be handled
3144           // as part of GC roots already.
3145         } else {
3146           return false;
3147         }
3148         break;
3149       }
3150       case Op_Conv2B:
3151       case Op_LoadRange:
3152       case Op_LoadKlass:
3153       case Op_LoadNKlass:
3154         // Do not require barriers
3155         break;
3156       case Op_AddP:
3157       case Op_CheckCastPP:
3158       case Op_CastPP:
3159       case Op_CMoveP:
3160       case Op_Phi:
3161       case Op_ShenandoahLoadReferenceBarrier:
3162         // Whether or not these need the barriers depends on their users
3163         visit_users = true;
3164         break;
3165       default: {
3166 #ifdef ASSERT
3167         fatal("Unknown node in is_redundant: %s", NodeClassNames[n->Opcode()]);
3168 #else
3169         // Default to have excess barriers, rather than miss some.
3170         return false;
3171 #endif
3172       }
3173     }
3174 
3175     stack.pop();
3176     if (visit_users) {
3177       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3178         Node* user = n->fast_out(i);
3179         if (user != NULL) {
3180           stack.push(user, 0);
3181         }
3182       }
3183     }
3184   }
3185 
3186   // No need for barrier found.
3187   return true;
3188 }