1 /*
   2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  28 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  30 #include "gc/shenandoah/shenandoahForwarding.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  33 #include "gc/shenandoah/shenandoahRuntime.hpp"
  34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/block.hpp"
  37 #include "opto/callnode.hpp"
  38 #include "opto/castnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/phaseX.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/runtime.hpp"
  43 #include "opto/subnode.hpp"
  44 
  45 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  46   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  47   if ((state->enqueue_barriers_count() +
  48        state->load_reference_barriers_count()) > 0) {
  49     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  50     C->clear_major_progress();
  51     PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
  52     if (C->failing()) return false;
  53     PhaseIdealLoop::verify(igvn);
  54     DEBUG_ONLY(verify_raw_mem(C->root());)
  55     if (attempt_more_loopopts) {
  56       C->set_major_progress();
  57       if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
  58         return false;
  59       }
  60       C->clear_major_progress();
  61       if (C->range_check_cast_count() > 0) {
  62         // No more loop optimizations. Remove all range check dependent CastIINodes.
  63         C->remove_range_check_casts(igvn);
  64         igvn.optimize();
  65       }
  66     }
  67   }
  68   return true;
  69 }
  70 
  71 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
  72   if (!UseShenandoahGC) {
  73     return false;
  74   }
  75   assert(iff->is_If(), "bad input");
  76   if (iff->Opcode() != Op_If) {
  77     return false;
  78   }
  79   Node* bol = iff->in(1);
  80   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  81     return false;
  82   }
  83   Node* cmp = bol->in(1);
  84   if (cmp->Opcode() != Op_CmpI) {
  85     return false;
  86   }
  87   Node* in1 = cmp->in(1);
  88   Node* in2 = cmp->in(2);
  89   if (in2->find_int_con(-1) != 0) {
  90     return false;
  91   }
  92   if (in1->Opcode() != Op_AndI) {
  93     return false;
  94   }
  95   in2 = in1->in(2);
  96   if (in2->find_int_con(-1) != mask) {
  97     return false;
  98   }
  99   in1 = in1->in(1);
 100 
 101   return is_gc_state_load(in1);
 102 }
 103 
 104 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 105   return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 106 }
 107 
 108 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 109   if (!UseShenandoahGC) {
 110     return false;
 111   }
 112   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 113     return false;
 114   }
 115   Node* addp = n->in(MemNode::Address);
 116   if (!addp->is_AddP()) {
 117     return false;
 118   }
 119   Node* base = addp->in(AddPNode::Address);
 120   Node* off = addp->in(AddPNode::Offset);
 121   if (base->Opcode() != Op_ThreadLocal) {
 122     return false;
 123   }
 124   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 125     return false;
 126   }
 127   return true;
 128 }
 129 
 130 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 131   assert(phase->is_dominator(stop, start), "bad inputs");
 132   ResourceMark rm;
 133   Unique_Node_List wq;
 134   wq.push(start);
 135   for (uint next = 0; next < wq.size(); next++) {
 136     Node *m = wq.at(next);
 137     if (m == stop) {
 138       continue;
 139     }
 140     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 141       return true;
 142     }
 143     if (m->is_Region()) {
 144       for (uint i = 1; i < m->req(); i++) {
 145         wq.push(m->in(i));
 146       }
 147     } else {
 148       wq.push(m->in(0));
 149     }
 150   }
 151   return false;
 152 }
 153 
 154 #ifdef ASSERT
 155 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 156   assert(phis.size() == 0, "");
 157 
 158   while (true) {
 159     if (in->bottom_type() == TypePtr::NULL_PTR) {
 160       if (trace) {tty->print_cr("NULL");}
 161     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 162       if (trace) {tty->print_cr("Non oop");}
 163     } else {
 164       if (in->is_ConstraintCast()) {
 165         in = in->in(1);
 166         continue;
 167       } else if (in->is_AddP()) {
 168         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 169         in = in->in(AddPNode::Address);
 170         continue;
 171       } else if (in->is_Con()) {
 172         if (trace) {
 173           tty->print("Found constant");
 174           in->dump();
 175         }
 176       } else if (in->Opcode() == Op_Parm) {
 177         if (trace) {
 178           tty->print("Found argument");
 179         }
 180       } else if (in->Opcode() == Op_CreateEx) {
 181         if (trace) {
 182           tty->print("Found create-exception");
 183         }
 184       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 185         if (trace) {
 186           tty->print("Found raw LoadP (OSR argument?)");
 187         }
 188       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 189         if (t == ShenandoahOopStore) {
 190           uint i = 0;
 191           for (; i < phis.size(); i++) {
 192             Node* n = phis.node_at(i);
 193             if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
 194               break;
 195             }
 196           }
 197           if (i == phis.size()) {
 198             return false;
 199           }
 200         }
 201         barriers_used.push(in);
 202         if (trace) {tty->print("Found barrier"); in->dump();}
 203       } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
 204         if (t != ShenandoahOopStore) {
 205           in = in->in(1);
 206           continue;
 207         }
 208         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 209         phis.push(in, in->req());
 210         in = in->in(1);
 211         continue;
 212       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 213         if (trace) {
 214           tty->print("Found alloc");
 215           in->in(0)->dump();
 216         }
 217       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 218         if (trace) {
 219           tty->print("Found Java call");
 220         }
 221       } else if (in->is_Phi()) {
 222         if (!visited.test_set(in->_idx)) {
 223           if (trace) {tty->print("Pushed phi:"); in->dump();}
 224           phis.push(in, 2);
 225           in = in->in(1);
 226           continue;
 227         }
 228         if (trace) {tty->print("Already seen phi:"); in->dump();}
 229       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 230         if (!visited.test_set(in->_idx)) {
 231           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 232           phis.push(in, CMoveNode::IfTrue);
 233           in = in->in(CMoveNode::IfFalse);
 234           continue;
 235         }
 236         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 237       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 238         in = in->in(1);
 239         continue;
 240       } else {
 241         return false;
 242       }
 243     }
 244     bool cont = false;
 245     while (phis.is_nonempty()) {
 246       uint idx = phis.index();
 247       Node* phi = phis.node();
 248       if (idx >= phi->req()) {
 249         if (trace) {tty->print("Popped phi:"); phi->dump();}
 250         phis.pop();
 251         continue;
 252       }
 253       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 254       in = phi->in(idx);
 255       phis.set_index(idx+1);
 256       cont = true;
 257       break;
 258     }
 259     if (!cont) {
 260       break;
 261     }
 262   }
 263   return true;
 264 }
 265 
 266 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 267   if (n1 != NULL) {
 268     n1->dump(+10);
 269   }
 270   if (n2 != NULL) {
 271     n2->dump(+10);
 272   }
 273   fatal("%s", msg);
 274 }
 275 
 276 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 277   ResourceMark rm;
 278   Unique_Node_List wq;
 279   GrowableArray<Node*> barriers;
 280   Unique_Node_List barriers_used;
 281   Node_Stack phis(0);
 282   VectorSet visited(Thread::current()->resource_area());
 283   const bool trace = false;
 284   const bool verify_no_useless_barrier = false;
 285 
 286   wq.push(root);
 287   for (uint next = 0; next < wq.size(); next++) {
 288     Node *n = wq.at(next);
 289     if (n->is_Load()) {
 290       const bool trace = false;
 291       if (trace) {tty->print("Verifying"); n->dump();}
 292       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 293         if (trace) {tty->print_cr("Load range/klass");}
 294       } else {
 295         const TypePtr* adr_type = n->as_Load()->adr_type();
 296 
 297         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 298           if (trace) {tty->print_cr("Mark load");}
 299         } else if (adr_type->isa_instptr() &&
 300                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 301                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
 302           if (trace) {tty->print_cr("Reference.get()");}
 303         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 304           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 305         }
 306       }
 307     } else if (n->is_Store()) {
 308       const bool trace = false;
 309 
 310       if (trace) {tty->print("Verifying"); n->dump();}
 311       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 312         Node* adr = n->in(MemNode::Address);
 313         bool verify = true;
 314 
 315         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 316           adr = adr->in(AddPNode::Address);
 317           if (adr->is_AddP()) {
 318             assert(adr->in(AddPNode::Base)->is_top(), "");
 319             adr = adr->in(AddPNode::Address);
 320             if (adr->Opcode() == Op_LoadP &&
 321                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 322                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 323                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 324               if (trace) {tty->print_cr("SATB prebarrier");}
 325               verify = false;
 326             }
 327           }
 328         }
 329 
 330         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 331           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 332         }
 333       }
 334       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 335         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 336       }
 337     } else if (n->Opcode() == Op_CmpP) {
 338       const bool trace = false;
 339 
 340       Node* in1 = n->in(1);
 341       Node* in2 = n->in(2);
 342       if (in1->bottom_type()->isa_oopptr()) {
 343         if (trace) {tty->print("Verifying"); n->dump();}
 344 
 345         bool mark_inputs = false;
 346         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 347             (in1->is_Con() || in2->is_Con())) {
 348           if (trace) {tty->print_cr("Comparison against a constant");}
 349           mark_inputs = true;
 350         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 351                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 352           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 353           mark_inputs = true;
 354         } else {
 355           assert(in2->bottom_type()->isa_oopptr(), "");
 356 
 357           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 358               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 359             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 360           }
 361         }
 362         if (verify_no_useless_barrier &&
 363             mark_inputs &&
 364             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 365              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 366           phis.clear();
 367           visited.reset();
 368         }
 369       }
 370     } else if (n->is_LoadStore()) {
 371       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 372           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 373         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 374       }
 375 
 376       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 377         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 378       }
 379     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 380       CallNode* call = n->as_Call();
 381 
 382       static struct {
 383         const char* name;
 384         struct {
 385           int pos;
 386           verify_type t;
 387         } args[6];
 388       } calls[] = {
 389         "aescrypt_encryptBlock",
 390         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 391           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 392         "aescrypt_decryptBlock",
 393         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 394           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 395         "multiplyToLen",
 396         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 397           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 398         "squareToLen",
 399         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 400           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 401         "montgomery_multiply",
 402         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 403           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 404         "montgomery_square",
 405         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 406           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 407         "mulAdd",
 408         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 409           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 410         "vectorizedMismatch",
 411         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 412           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 413         "updateBytesCRC32",
 414         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 415           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 416         "updateBytesAdler32",
 417         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 418           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 419         "updateBytesCRC32C",
 420         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 421           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 422         "counterMode_AESCrypt",
 423         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 424           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 425         "cipherBlockChaining_encryptAESCrypt",
 426         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 427           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 428         "cipherBlockChaining_decryptAESCrypt",
 429         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 430           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 431         "shenandoah_clone_barrier",
 432         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 433           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 434         "ghash_processBlocks",
 435         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 436           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 437         "sha1_implCompress",
 438         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 439           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 440         "sha256_implCompress",
 441         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 442           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 443         "sha512_implCompress",
 444         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 445           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 446         "sha1_implCompressMB",
 447         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 448           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 449         "sha256_implCompressMB",
 450         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 451           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 452         "sha512_implCompressMB",
 453         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 454           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 455         "encodeBlock",
 456         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 457           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 458       };
 459 
 460       if (call->is_call_to_arraycopystub()) {
 461         Node* dest = NULL;
 462         const TypeTuple* args = n->as_Call()->_tf->domain();
 463         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 464           if (args->field_at(i)->isa_ptr()) {
 465             j++;
 466             if (j == 2) {
 467               dest = n->in(i);
 468               break;
 469             }
 470           }
 471         }
 472         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 473             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 474           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 475         }
 476       } else if (strlen(call->_name) > 5 &&
 477                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 478         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 479           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 480         }
 481       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 482         // skip
 483       } else {
 484         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 485         int i = 0;
 486         for (; i < calls_len; i++) {
 487           if (!strcmp(calls[i].name, call->_name)) {
 488             break;
 489           }
 490         }
 491         if (i != calls_len) {
 492           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 493           for (uint j = 0; j < args_len; j++) {
 494             int pos = calls[i].args[j].pos;
 495             if (pos == -1) {
 496               break;
 497             }
 498             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 499               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 500             }
 501           }
 502           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 503             if (call->in(j)->bottom_type()->make_ptr() &&
 504                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 505               uint k = 0;
 506               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 507               if (k == args_len) {
 508                 fatal("arg %d for call %s not covered", j, call->_name);
 509               }
 510             }
 511           }
 512         } else {
 513           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 514             if (call->in(j)->bottom_type()->make_ptr() &&
 515                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 516               fatal("%s not covered", call->_name);
 517             }
 518           }
 519         }
 520       }
 521     } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 522       // skip
 523     } else if (n->is_AddP()
 524                || n->is_Phi()
 525                || n->is_ConstraintCast()
 526                || n->Opcode() == Op_Return
 527                || n->Opcode() == Op_CMoveP
 528                || n->Opcode() == Op_CMoveN
 529                || n->Opcode() == Op_Rethrow
 530                || n->is_MemBar()
 531                || n->Opcode() == Op_Conv2B
 532                || n->Opcode() == Op_SafePoint
 533                || n->is_CallJava()
 534                || n->Opcode() == Op_Unlock
 535                || n->Opcode() == Op_EncodeP
 536                || n->Opcode() == Op_DecodeN) {
 537       // nothing to do
 538     } else {
 539       static struct {
 540         int opcode;
 541         struct {
 542           int pos;
 543           verify_type t;
 544         } inputs[2];
 545       } others[] = {
 546         Op_FastLock,
 547         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 548         Op_Lock,
 549         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 550         Op_ArrayCopy,
 551         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 552         Op_StrCompressedCopy,
 553         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 554         Op_StrInflatedCopy,
 555         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 556         Op_AryEq,
 557         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 558         Op_StrIndexOf,
 559         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 560         Op_StrComp,
 561         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 562         Op_StrEquals,
 563         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 564         Op_EncodeISOArray,
 565         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 566         Op_HasNegatives,
 567         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 568         Op_CastP2X,
 569         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 570         Op_StrIndexOfChar,
 571         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 572       };
 573 
 574       const int others_len = sizeof(others) / sizeof(others[0]);
 575       int i = 0;
 576       for (; i < others_len; i++) {
 577         if (others[i].opcode == n->Opcode()) {
 578           break;
 579         }
 580       }
 581       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 582       if (i != others_len) {
 583         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 584         for (uint j = 0; j < inputs_len; j++) {
 585           int pos = others[i].inputs[j].pos;
 586           if (pos == -1) {
 587             break;
 588           }
 589           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 590             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 591           }
 592         }
 593         for (uint j = 1; j < stop; j++) {
 594           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 595               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 596             uint k = 0;
 597             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 598             if (k == inputs_len) {
 599               fatal("arg %d for node %s not covered", j, n->Name());
 600             }
 601           }
 602         }
 603       } else {
 604         for (uint j = 1; j < stop; j++) {
 605           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 606               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 607             fatal("%s not covered", n->Name());
 608           }
 609         }
 610       }
 611     }
 612 
 613     if (n->is_SafePoint()) {
 614       SafePointNode* sfpt = n->as_SafePoint();
 615       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 616         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 617           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 618             phis.clear();
 619             visited.reset();
 620           }
 621         }
 622       }
 623     }
 624   }
 625 
 626   if (verify_no_useless_barrier) {
 627     for (int i = 0; i < barriers.length(); i++) {
 628       Node* n = barriers.at(i);
 629       if (!barriers_used.member(n)) {
 630         tty->print("XXX useless barrier"); n->dump(-2);
 631         ShouldNotReachHere();
 632       }
 633     }
 634   }
 635 }
 636 #endif
 637 
 638 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 639   // That both nodes have the same control is not sufficient to prove
 640   // domination, verify that there's no path from d to n
 641   ResourceMark rm;
 642   Unique_Node_List wq;
 643   wq.push(d);
 644   for (uint next = 0; next < wq.size(); next++) {
 645     Node *m = wq.at(next);
 646     if (m == n) {
 647       return false;
 648     }
 649     if (m->is_Phi() && m->in(0)->is_Loop()) {
 650       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 651     } else {
 652       if (m->is_Store() || m->is_LoadStore()) {
 653         // Take anti-dependencies into account
 654         Node* mem = m->in(MemNode::Memory);
 655         for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 656           Node* u = mem->fast_out(i);
 657           if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
 658               phase->ctrl_or_self(u) == c) {
 659             wq.push(u);
 660           }
 661         }
 662       }
 663       for (uint i = 0; i < m->req(); i++) {
 664         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 665           wq.push(m->in(i));
 666         }
 667       }
 668     }
 669   }
 670   return true;
 671 }
 672 
 673 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 674   if (d_c != n_c) {
 675     return phase->is_dominator(d_c, n_c);
 676   }
 677   return is_dominator_same_ctrl(d_c, d, n, phase);
 678 }
 679 
 680 Node* next_mem(Node* mem, int alias) {
 681   Node* res = NULL;
 682   if (mem->is_Proj()) {
 683     res = mem->in(0);
 684   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 685     res = mem->in(TypeFunc::Memory);
 686   } else if (mem->is_Phi()) {
 687     res = mem->in(1);
 688   } else if (mem->is_MergeMem()) {
 689     res = mem->as_MergeMem()->memory_at(alias);
 690   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 691     assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 692     res = mem->in(MemNode::Memory);
 693   } else {
 694 #ifdef ASSERT
 695     mem->dump();
 696 #endif
 697     ShouldNotReachHere();
 698   }
 699   return res;
 700 }
 701 
 702 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 703   Node* iffproj = NULL;
 704   while (c != dom) {
 705     Node* next = phase->idom(c);
 706     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 707     if (c->is_Region()) {
 708       ResourceMark rm;
 709       Unique_Node_List wq;
 710       wq.push(c);
 711       for (uint i = 0; i < wq.size(); i++) {
 712         Node *n = wq.at(i);
 713         if (n == next) {
 714           continue;
 715         }
 716         if (n->is_Region()) {
 717           for (uint j = 1; j < n->req(); j++) {
 718             wq.push(n->in(j));
 719           }
 720         } else {
 721           wq.push(n->in(0));
 722         }
 723       }
 724       for (uint i = 0; i < wq.size(); i++) {
 725         Node *n = wq.at(i);
 726         assert(n->is_CFG(), "");
 727         if (n->is_Multi()) {
 728           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 729             Node* u = n->fast_out(j);
 730             if (u->is_CFG()) {
 731               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 732                 return NodeSentinel;
 733               }
 734             }
 735           }
 736         }
 737       }
 738     } else  if (c->is_Proj()) {
 739       if (c->is_IfProj()) {
 740         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 741           // continue;
 742         } else {
 743           if (!allow_one_proj) {
 744             return NodeSentinel;
 745           }
 746           if (iffproj == NULL) {
 747             iffproj = c;
 748           } else {
 749             return NodeSentinel;
 750           }
 751         }
 752       } else if (c->Opcode() == Op_JumpProj) {
 753         return NodeSentinel; // unsupported
 754       } else if (c->Opcode() == Op_CatchProj) {
 755         return NodeSentinel; // unsupported
 756       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 757         return NodeSentinel; // unsupported
 758       } else {
 759         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 760       }
 761     }
 762     c = next;
 763   }
 764   return iffproj;
 765 }
 766 
 767 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 768   ResourceMark rm;
 769   VectorSet wq(Thread::current()->resource_area());
 770   wq.set(mem->_idx);
 771   mem_ctrl = phase->ctrl_or_self(mem);
 772   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 773     mem = next_mem(mem, alias);
 774     if (wq.test_set(mem->_idx)) {
 775       return NULL;
 776     }
 777     mem_ctrl = phase->ctrl_or_self(mem);
 778   }
 779   if (mem->is_MergeMem()) {
 780     mem = mem->as_MergeMem()->memory_at(alias);
 781     mem_ctrl = phase->ctrl_or_self(mem);
 782   }
 783   return mem;
 784 }
 785 
 786 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 787   Node* mem = NULL;
 788   Node* c = ctrl;
 789   do {
 790     if (c->is_Region()) {
 791       Node* phi_bottom = NULL;
 792       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 793         Node* u = c->fast_out(i);
 794         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 795           if (u->adr_type() == TypePtr::BOTTOM) {
 796             mem = u;
 797           }
 798         }
 799       }
 800     } else {
 801       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 802         CallProjections projs;
 803         c->as_Call()->extract_projections(&projs, true, false);
 804         if (projs.fallthrough_memproj != NULL) {
 805           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 806             if (projs.catchall_memproj == NULL) {
 807               mem = projs.fallthrough_memproj;
 808             } else {
 809               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 810                 mem = projs.fallthrough_memproj;
 811               } else {
 812                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 813                 mem = projs.catchall_memproj;
 814               }
 815             }
 816           }
 817         } else {
 818           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 819           if (proj != NULL &&
 820               proj->adr_type() == TypePtr::BOTTOM) {
 821             mem = proj;
 822           }
 823         }
 824       } else {
 825         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 826           Node* u = c->fast_out(i);
 827           if (u->is_Proj() &&
 828               u->bottom_type() == Type::MEMORY &&
 829               u->adr_type() == TypePtr::BOTTOM) {
 830               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 831               assert(mem == NULL, "only one proj");
 832               mem = u;
 833           }
 834         }
 835         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 836       }
 837     }
 838     c = phase->idom(c);
 839   } while (mem == NULL);
 840   return mem;
 841 }
 842 
 843 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 844   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 845     Node* u = n->fast_out(i);
 846     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 847       uses.push(u);
 848     }
 849   }
 850 }
 851 
 852 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 853   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 854   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 855   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 856   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 857   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 858   phase->lazy_replace(outer, new_outer);
 859   phase->lazy_replace(le, new_le);
 860   inner->clear_strip_mined();
 861 }
 862 
 863 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
 864                                                PhaseIdealLoop* phase, int flags) {
 865   PhaseIterGVN& igvn = phase->igvn();
 866   Node* old_ctrl = ctrl;
 867 
 868   Node* thread          = new ThreadLocalNode();
 869   Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 870   Node* gc_state_addr   = new AddPNode(phase->C->top(), thread, gc_state_offset);
 871   Node* gc_state        = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
 872                                         DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
 873                                         TypeInt::BYTE, MemNode::unordered);
 874   Node* gc_state_and    = new AndINode(gc_state, igvn.intcon(flags));
 875   Node* gc_state_cmp    = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
 876   Node* gc_state_bool   = new BoolNode(gc_state_cmp, BoolTest::ne);
 877 
 878   IfNode* gc_state_iff  = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 879   ctrl                  = new IfTrueNode(gc_state_iff);
 880   test_fail_ctrl        = new IfFalseNode(gc_state_iff);
 881 
 882   IdealLoopTree* loop = phase->get_loop(ctrl);
 883   phase->register_control(gc_state_iff,   loop, old_ctrl);
 884   phase->register_control(ctrl,           loop, gc_state_iff);
 885   phase->register_control(test_fail_ctrl, loop, gc_state_iff);
 886 
 887   phase->register_new_node(thread,        old_ctrl);
 888   phase->register_new_node(gc_state_addr, old_ctrl);
 889   phase->register_new_node(gc_state,      old_ctrl);
 890   phase->register_new_node(gc_state_and,  old_ctrl);
 891   phase->register_new_node(gc_state_cmp,  old_ctrl);
 892   phase->register_new_node(gc_state_bool, old_ctrl);
 893 
 894   phase->set_ctrl(gc_state_offset, phase->C->root());
 895 
 896   assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
 897 }
 898 
 899 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 900   Node* old_ctrl = ctrl;
 901   PhaseIterGVN& igvn = phase->igvn();
 902 
 903   const Type* val_t = igvn.type(val);
 904   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 905     Node* null_cmp   = new CmpPNode(val, igvn.zerocon(T_OBJECT));
 906     Node* null_test  = new BoolNode(null_cmp, BoolTest::ne);
 907 
 908     IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 909     ctrl             = new IfTrueNode(null_iff);
 910     null_ctrl        = new IfFalseNode(null_iff);
 911 
 912     IdealLoopTree* loop = phase->get_loop(old_ctrl);
 913     phase->register_control(null_iff,  loop, old_ctrl);
 914     phase->register_control(ctrl,      loop, null_iff);
 915     phase->register_control(null_ctrl, loop, null_iff);
 916 
 917     phase->register_new_node(null_cmp,  old_ctrl);
 918     phase->register_new_node(null_test, old_ctrl);
 919   }
 920 }
 921 
 922 Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
 923   IdealLoopTree *loop = phase->get_loop(c);
 924   Node* iff = unc_ctrl->in(0);
 925   assert(iff->is_If(), "broken");
 926   Node* new_iff = iff->clone();
 927   new_iff->set_req(0, c);
 928   phase->register_control(new_iff, loop, c);
 929   Node* iffalse = new IfFalseNode(new_iff->as_If());
 930   phase->register_control(iffalse, loop, new_iff);
 931   Node* iftrue = new IfTrueNode(new_iff->as_If());
 932   phase->register_control(iftrue, loop, new_iff);
 933   c = iftrue;
 934   const Type *t = phase->igvn().type(val);
 935   assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
 936   Node* uncasted_val = val->in(1);
 937   val = new CastPPNode(uncasted_val, t);
 938   val->init_req(0, c);
 939   phase->register_new_node(val, c);
 940   return val;
 941 }
 942 
 943 void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
 944                                                 Unique_Node_List& uses, PhaseIdealLoop* phase) {
 945   IfNode* iff = unc_ctrl->in(0)->as_If();
 946   Node* proj = iff->proj_out(0);
 947   assert(proj != unc_ctrl, "bad projection");
 948   Node* use = proj->unique_ctrl_out();
 949 
 950   assert(use == unc || use->is_Region(), "what else?");
 951 
 952   uses.clear();
 953   if (use == unc) {
 954     phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
 955     for (uint i = 1; i < unc->req(); i++) {
 956       Node* n = unc->in(i);
 957       if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
 958         uses.push(n);
 959       }
 960     }
 961   } else {
 962     assert(use->is_Region(), "what else?");
 963     uint idx = 1;
 964     for (; use->in(idx) != proj; idx++);
 965     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
 966       Node* u = use->fast_out(i);
 967       if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
 968         uses.push(u->in(idx));
 969       }
 970     }
 971   }
 972   for(uint next = 0; next < uses.size(); next++ ) {
 973     Node *n = uses.at(next);
 974     assert(phase->get_ctrl(n) == proj, "bad control");
 975     phase->set_ctrl_and_loop(n, new_unc_ctrl);
 976     if (n->in(0) == proj) {
 977       phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
 978     }
 979     for (uint i = 0; i < n->req(); i++) {
 980       Node* m = n->in(i);
 981       if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
 982         uses.push(m);
 983       }
 984     }
 985   }
 986 
 987   phase->igvn().rehash_node_delayed(use);
 988   int nb = use->replace_edge(proj, new_unc_ctrl);
 989   assert(nb == 1, "only use expected");
 990 }
 991 
 992 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 993   Node* old_ctrl = ctrl;
 994   PhaseIterGVN& igvn = phase->igvn();
 995 
 996   Node* raw_val        = new CastP2XNode(old_ctrl, val);
 997   Node* cset_idx       = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 998   Node* cset_addr      = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
 999   Node* cset_load_addr = new AddPNode(phase->C->top(), cset_addr, cset_idx);
1000   Node* cset_load      = new LoadBNode(old_ctrl, raw_mem, cset_load_addr,
1001                                        DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
1002                                        TypeInt::BYTE, MemNode::unordered);
1003   Node* cset_cmp       = new CmpINode(cset_load, igvn.zerocon(T_INT));
1004   Node* cset_bool      = new BoolNode(cset_cmp, BoolTest::ne);
1005 
1006   IfNode* cset_iff     = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
1007   ctrl                 = new IfTrueNode(cset_iff);
1008   not_cset_ctrl        = new IfFalseNode(cset_iff);
1009 
1010   IdealLoopTree *loop = phase->get_loop(old_ctrl);
1011   phase->register_control(cset_iff,      loop, old_ctrl);
1012   phase->register_control(ctrl,          loop, cset_iff);
1013   phase->register_control(not_cset_ctrl, loop, cset_iff);
1014 
1015   phase->set_ctrl(cset_addr, phase->C->root());
1016 
1017   phase->register_new_node(raw_val,        old_ctrl);
1018   phase->register_new_node(cset_idx,       old_ctrl);
1019   phase->register_new_node(cset_load_addr, old_ctrl);
1020   phase->register_new_node(cset_load,      old_ctrl);
1021   phase->register_new_node(cset_cmp,       old_ctrl);
1022   phase->register_new_node(cset_bool,      old_ctrl);
1023 }
1024 
1025 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) {
1026   IdealLoopTree*loop = phase->get_loop(ctrl);
1027   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
1028 
1029   // The slow path stub consumes and produces raw memory in addition
1030   // to the existing memory edges
1031   Node* base = find_bottom_mem(ctrl, phase);
1032   MergeMemNode* mm = MergeMemNode::make(base);
1033   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1034   phase->register_new_node(mm, ctrl);
1035 
1036   address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
1037           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) :
1038           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier);
1039 
1040   address calladdr = is_native ? CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native)
1041                                : target;
1042   const char* name = is_native ? "load_reference_barrier_native" : "load_reference_barrier";
1043   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
1044 
1045   call->init_req(TypeFunc::Control, ctrl);
1046   call->init_req(TypeFunc::I_O, phase->C->top());
1047   call->init_req(TypeFunc::Memory, mm);
1048   call->init_req(TypeFunc::FramePtr, phase->C->top());
1049   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1050   call->init_req(TypeFunc::Parms, val);
1051   call->init_req(TypeFunc::Parms+1, load_addr);
1052   phase->register_control(call, loop, ctrl);
1053   ctrl = new ProjNode(call, TypeFunc::Control);
1054   phase->register_control(ctrl, loop, call);
1055   result_mem = new ProjNode(call, TypeFunc::Memory);
1056   phase->register_new_node(result_mem, call);
1057   val = new ProjNode(call, TypeFunc::Parms);
1058   phase->register_new_node(val, call);
1059   val = new CheckCastPPNode(ctrl, val, obj_type);
1060   phase->register_new_node(val, ctrl);
1061 }
1062 
1063 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1064   Node* ctrl = phase->get_ctrl(barrier);
1065   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1066 
1067   // Update the control of all nodes that should be after the
1068   // barrier control flow
1069   uses.clear();
1070   // Every node that is control dependent on the barrier's input
1071   // control will be after the expanded barrier. The raw memory (if
1072   // its memory is control dependent on the barrier's input control)
1073   // must stay above the barrier.
1074   uses_to_ignore.clear();
1075   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1076     uses_to_ignore.push(init_raw_mem);
1077   }
1078   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1079     Node *n = uses_to_ignore.at(next);
1080     for (uint i = 0; i < n->req(); i++) {
1081       Node* in = n->in(i);
1082       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1083         uses_to_ignore.push(in);
1084       }
1085     }
1086   }
1087   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1088     Node* u = ctrl->fast_out(i);
1089     if (u->_idx < last &&
1090         u != barrier &&
1091         !uses_to_ignore.member(u) &&
1092         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1093         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1094       Node* old_c = phase->ctrl_or_self(u);
1095       Node* c = old_c;
1096       if (c != ctrl ||
1097           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1098           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1099         phase->igvn().rehash_node_delayed(u);
1100         int nb = u->replace_edge(ctrl, region);
1101         if (u->is_CFG()) {
1102           if (phase->idom(u) == ctrl) {
1103             phase->set_idom(u, region, phase->dom_depth(region));
1104           }
1105         } else if (phase->get_ctrl(u) == ctrl) {
1106           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1107           uses.push(u);
1108         }
1109         assert(nb == 1, "more than 1 ctrl input?");
1110         --i, imax -= nb;
1111       }
1112     }
1113   }
1114 }
1115 
1116 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1117   Node* region = NULL;
1118   while (c != ctrl) {
1119     if (c->is_Region()) {
1120       region = c;
1121     }
1122     c = phase->idom(c);
1123   }
1124   assert(region != NULL, "");
1125   Node* phi = new PhiNode(region, n->bottom_type());
1126   for (uint j = 1; j < region->req(); j++) {
1127     Node* in = region->in(j);
1128     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1129       phi->init_req(j, n);
1130     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1131       phi->init_req(j, n_clone);
1132     } else {
1133       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1134     }
1135   }
1136   phase->register_new_node(phi, region);
1137   return phi;
1138 }
1139 
1140 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1141   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1142 
1143   Unique_Node_List uses;
1144   for (int i = 0; i < state->enqueue_barriers_count(); i++) {
1145     Node* barrier = state->enqueue_barrier(i);
1146     Node* ctrl = phase->get_ctrl(barrier);
1147     IdealLoopTree* loop = phase->get_loop(ctrl);
1148     if (loop->_head->is_OuterStripMinedLoop()) {
1149       // Expanding a barrier here will break loop strip mining
1150       // verification. Transform the loop so the loop nest doesn't
1151       // appear as strip mined.
1152       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1153       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1154     }
1155   }
1156 
1157   Node_Stack stack(0);
1158   Node_List clones;
1159   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1160     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1161     if (lrb->is_redundant()) {
1162       continue;
1163     }
1164 
1165     Node* ctrl = phase->get_ctrl(lrb);
1166     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1167 
1168     CallStaticJavaNode* unc = NULL;
1169     Node* unc_ctrl = NULL;
1170     Node* uncasted_val = val;
1171 
1172     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1173       Node* u = lrb->fast_out(i);
1174       if (u->Opcode() == Op_CastPP &&
1175           u->in(0) != NULL &&
1176           phase->is_dominator(u->in(0), ctrl)) {
1177         const Type* u_t = phase->igvn().type(u);
1178 
1179         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1180             u->in(0)->Opcode() == Op_IfTrue &&
1181             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1182             u->in(0)->in(0)->is_If() &&
1183             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1184             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1185             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1186             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1187             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1188           IdealLoopTree* loop = phase->get_loop(ctrl);
1189           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1190 
1191           if (!unc_loop->is_member(loop)) {
1192             continue;
1193           }
1194 
1195           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1196           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1197           if (branch == NodeSentinel) {
1198             continue;
1199           }
1200 
1201           phase->igvn().replace_input_of(u, 1, val);
1202           phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
1203           phase->set_ctrl(u, u->in(0));
1204           phase->set_ctrl(lrb, u->in(0));
1205           unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1206           unc_ctrl = u->in(0);
1207           val = u;
1208 
1209           for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
1210             Node* u = val->fast_out(j);
1211             if (u == lrb) continue;
1212             phase->igvn().rehash_node_delayed(u);
1213             int nb = u->replace_edge(val, lrb);
1214             --j; jmax -= nb;
1215           }
1216 
1217           RegionNode* r = new RegionNode(3);
1218           IfNode* iff = unc_ctrl->in(0)->as_If();
1219 
1220           Node* ctrl_use = unc_ctrl->unique_ctrl_out();
1221           Node* unc_ctrl_clone = unc_ctrl->clone();
1222           phase->register_control(unc_ctrl_clone, loop, iff);
1223           Node* c = unc_ctrl_clone;
1224           Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
1225           r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
1226 
1227           phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
1228           phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
1229           phase->lazy_replace(c, unc_ctrl);
1230           c = NULL;;
1231           phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
1232           phase->set_ctrl(val, unc_ctrl_clone);
1233 
1234           IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
1235           fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
1236           Node* iff_proj = iff->proj_out(0);
1237           r->init_req(2, iff_proj);
1238           phase->register_control(r, phase->ltree_root(), iff);
1239 
1240           Node* new_bol = new_iff->in(1)->clone();
1241           Node* new_cmp = new_bol->in(1)->clone();
1242           assert(new_cmp->Opcode() == Op_CmpP, "broken");
1243           assert(new_cmp->in(1) == val->in(1), "broken");
1244           new_bol->set_req(1, new_cmp);
1245           new_cmp->set_req(1, lrb);
1246           phase->register_new_node(new_bol, new_iff->in(0));
1247           phase->register_new_node(new_cmp, new_iff->in(0));
1248           phase->igvn().replace_input_of(new_iff, 1, new_bol);
1249           phase->igvn().replace_input_of(new_cast, 1, lrb);
1250 
1251           for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1252             Node* u = lrb->fast_out(i);
1253             if (u == new_cast || u == new_cmp) {
1254               continue;
1255             }
1256             phase->igvn().rehash_node_delayed(u);
1257             int nb = u->replace_edge(lrb, new_cast);
1258             assert(nb > 0, "no update?");
1259             --i; imax -= nb;
1260           }
1261 
1262           for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1263             Node* u = val->fast_out(i);
1264             if (u == lrb) {
1265               continue;
1266             }
1267             phase->igvn().rehash_node_delayed(u);
1268             int nb = u->replace_edge(val, new_cast);
1269             assert(nb > 0, "no update?");
1270             --i; imax -= nb;
1271           }
1272 
1273           ctrl = unc_ctrl_clone;
1274           phase->set_ctrl_and_loop(lrb, ctrl);
1275           break;
1276         }
1277       }
1278     }
1279     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1280       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1281       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1282         // The rethrow call may have too many projections to be
1283         // properly handled here. Given there's no reason for a
1284         // barrier to depend on the call, move it above the call
1285         stack.push(lrb, 0);
1286         do {
1287           Node* n = stack.node();
1288           uint idx = stack.index();
1289           if (idx < n->req()) {
1290             Node* in = n->in(idx);
1291             stack.set_index(idx+1);
1292             if (in != NULL) {
1293               if (phase->has_ctrl(in)) {
1294                 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1295 #ifdef ASSERT
1296                   for (uint i = 0; i < stack.size(); i++) {
1297                     assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1298                   }
1299 #endif
1300                   stack.push(in, 0);
1301                 }
1302               } else {
1303                 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1304               }
1305             }
1306           } else {
1307             phase->set_ctrl(n, call->in(0));
1308             stack.pop();
1309           }
1310         } while(stack.size() > 0);
1311         continue;
1312       }
1313       CallProjections projs;
1314       call->extract_projections(&projs, false, false);
1315 
1316       Node* lrb_clone = lrb->clone();
1317       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1318       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1319 
1320       stack.push(lrb, 0);
1321       clones.push(lrb_clone);
1322 
1323       do {
1324         assert(stack.size() == clones.size(), "");
1325         Node* n = stack.node();
1326 #ifdef ASSERT
1327         if (n->is_Load()) {
1328           Node* mem = n->in(MemNode::Memory);
1329           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1330             Node* u = mem->fast_out(j);
1331             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1332           }
1333         }
1334 #endif
1335         uint idx = stack.index();
1336         Node* n_clone = clones.at(clones.size()-1);
1337         if (idx < n->outcnt()) {
1338           Node* u = n->raw_out(idx);
1339           Node* c = phase->ctrl_or_self(u);
1340           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1341             stack.set_index(idx+1);
1342             assert(!u->is_CFG(), "");
1343             stack.push(u, 0);
1344             Node* u_clone = u->clone();
1345             int nb = u_clone->replace_edge(n, n_clone);
1346             assert(nb > 0, "should have replaced some uses");
1347             phase->register_new_node(u_clone, projs.catchall_catchproj);
1348             clones.push(u_clone);
1349             phase->set_ctrl(u, projs.fallthrough_catchproj);
1350           } else {
1351             bool replaced = false;
1352             if (u->is_Phi()) {
1353               for (uint k = 1; k < u->req(); k++) {
1354                 if (u->in(k) == n) {
1355                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1356                     phase->igvn().replace_input_of(u, k, n_clone);
1357                     replaced = true;
1358                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1359                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1360                     replaced = true;
1361                   }
1362                 }
1363               }
1364             } else {
1365               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1366                 phase->igvn().rehash_node_delayed(u);
1367                 int nb = u->replace_edge(n, n_clone);
1368                 assert(nb > 0, "should have replaced some uses");
1369                 replaced = true;
1370               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1371                 phase->igvn().rehash_node_delayed(u);
1372                 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1373                 assert(nb > 0, "should have replaced some uses");
1374                 replaced = true;
1375               }
1376             }
1377             if (!replaced) {
1378               stack.set_index(idx+1);
1379             }
1380           }
1381         } else {
1382           stack.pop();
1383           clones.pop();
1384         }
1385       } while (stack.size() > 0);
1386       assert(stack.size() == 0 && clones.size() == 0, "");
1387     }
1388   }
1389 
1390   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1391     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1392     if (lrb->is_redundant()) {
1393       continue;
1394     }
1395     Node* ctrl = phase->get_ctrl(lrb);
1396     IdealLoopTree* loop = phase->get_loop(ctrl);
1397     if (loop->_head->is_OuterStripMinedLoop()) {
1398       // Expanding a barrier here will break loop strip mining
1399       // verification. Transform the loop so the loop nest doesn't
1400       // appear as strip mined.
1401       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1402       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1403     }
1404   }
1405 
1406   // Expand load-reference-barriers
1407   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1408   Unique_Node_List uses_to_ignore;
1409   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1410     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1411     if (lrb->is_redundant()) {
1412       phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1413       continue;
1414     }
1415     uint last = phase->C->unique();
1416     Node* ctrl = phase->get_ctrl(lrb);
1417     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1418 
1419 
1420     Node* orig_ctrl = ctrl;
1421 
1422     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1423     Node* init_raw_mem = raw_mem;
1424     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1425 
1426     IdealLoopTree *loop = phase->get_loop(ctrl);
1427     CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
1428     Node* unc_ctrl = NULL;
1429     if (unc != NULL) {
1430       if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
1431         unc = NULL;
1432       } else {
1433         unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
1434       }
1435     }
1436 
1437     Node* uncasted_val = val;
1438     if (unc != NULL) {
1439       uncasted_val = val->in(1);
1440     }
1441 
1442     Node* heap_stable_ctrl = NULL;
1443     Node* null_ctrl = NULL;
1444 
1445     assert(val->bottom_type()->make_oopptr(), "need oop");
1446     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1447 
1448     enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT };
1449     Node* region = new RegionNode(PATH_LIMIT);
1450     Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
1451     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1452 
1453     // Stable path.
1454     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::HAS_FORWARDED);
1455     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1456 
1457     // Heap stable case
1458     region->init_req(_heap_stable, heap_stable_ctrl);
1459     val_phi->init_req(_heap_stable, uncasted_val);
1460     raw_mem_phi->init_req(_heap_stable, raw_mem);
1461 
1462     Node* reg2_ctrl = NULL;
1463     // Null case
1464     test_null(ctrl, val, null_ctrl, phase);
1465     if (null_ctrl != NULL) {
1466       reg2_ctrl = null_ctrl->in(0);
1467       region->init_req(_null_path, null_ctrl);
1468       val_phi->init_req(_null_path, uncasted_val);
1469       raw_mem_phi->init_req(_null_path, raw_mem);
1470     } else {
1471       region->del_req(_null_path);
1472       val_phi->del_req(_null_path);
1473       raw_mem_phi->del_req(_null_path);
1474     }
1475 
1476     // Test for in-cset.
1477     // Wires !in_cset(obj) to slot 2 of region and phis
1478     Node* not_cset_ctrl = NULL;
1479     test_in_cset(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
1480     if (not_cset_ctrl != NULL) {
1481       if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
1482       region->init_req(_not_cset, not_cset_ctrl);
1483       val_phi->init_req(_not_cset, uncasted_val);
1484       raw_mem_phi->init_req(_not_cset, raw_mem);
1485     }
1486 
1487     // Resolve object when orig-value is in cset.
1488     // Make the unconditional resolve for fwdptr.
1489     Node* new_val = uncasted_val;
1490     if (unc_ctrl != NULL) {
1491       // Clone the null check in this branch to allow implicit null check
1492       new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
1493       fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
1494 
1495       IfNode* iff = unc_ctrl->in(0)->as_If();
1496       phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
1497     }
1498 
1499     // Call lrb-stub and wire up that path in slots 4
1500     Node* result_mem = NULL;
1501 
1502     Node* fwd = new_val;
1503     Node* addr;
1504     if (ShenandoahSelfFixing) {
1505       VectorSet visited(Thread::current()->resource_area());
1506       addr = get_load_addr(phase, visited, lrb);
1507     } else {
1508       addr = phase->igvn().zerocon(T_OBJECT);
1509     }
1510     if (addr->Opcode() == Op_AddP) {
1511       Node* orig_base = addr->in(AddPNode::Base);
1512       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), true);
1513       phase->register_new_node(base, ctrl);
1514       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1515         // Field access
1516         addr = addr->clone();
1517         addr->set_req(AddPNode::Base, base);
1518         addr->set_req(AddPNode::Address, base);
1519         phase->register_new_node(addr, ctrl);
1520       } else {
1521         Node* addr2 = addr->in(AddPNode::Address);
1522         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1523               addr2->in(AddPNode::Base) == orig_base) {
1524           addr2 = addr2->clone();
1525           addr2->set_req(AddPNode::Base, base);
1526           addr2->set_req(AddPNode::Address, base);
1527           phase->register_new_node(addr2, ctrl);
1528           addr = addr->clone();
1529           addr->set_req(AddPNode::Base, base);
1530           addr->set_req(AddPNode::Address, addr2);
1531           phase->register_new_node(addr, ctrl);
1532         }
1533       }
1534     }
1535     call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, lrb->is_native(), phase);
1536     region->init_req(_evac_path, ctrl);
1537     val_phi->init_req(_evac_path, fwd);
1538     raw_mem_phi->init_req(_evac_path, result_mem);
1539 
1540     phase->register_control(region, loop, heap_stable_iff);
1541     Node* out_val = val_phi;
1542     phase->register_new_node(val_phi, region);
1543     phase->register_new_node(raw_mem_phi, region);
1544 
1545     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1546 
1547     ctrl = orig_ctrl;
1548 
1549     if (unc != NULL) {
1550       for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1551         Node* u = val->fast_out(i);
1552         Node* c = phase->ctrl_or_self(u);
1553         if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
1554           phase->igvn().rehash_node_delayed(u);
1555           int nb = u->replace_edge(val, out_val);
1556           --i, imax -= nb;
1557         }
1558       }
1559       if (val->outcnt() == 0) {
1560         phase->igvn()._worklist.push(val);
1561       }
1562     }
1563     phase->igvn().replace_node(lrb, out_val);
1564 
1565     follow_barrier_uses(out_val, ctrl, uses, phase);
1566 
1567     for(uint next = 0; next < uses.size(); next++ ) {
1568       Node *n = uses.at(next);
1569       assert(phase->get_ctrl(n) == ctrl, "bad control");
1570       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1571       phase->set_ctrl(n, region);
1572       follow_barrier_uses(n, ctrl, uses, phase);
1573     }
1574 
1575     // The slow path call produces memory: hook the raw memory phi
1576     // from the expanded load reference barrier with the rest of the graph
1577     // which may require adding memory phis at every post dominated
1578     // region and at enclosing loop heads. Use the memory state
1579     // collected in memory_nodes to fix the memory graph. Update that
1580     // memory state as we go.
1581     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1582   }
1583   // Done expanding load-reference-barriers.
1584   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1585 
1586   for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
1587     Node* barrier = state->enqueue_barrier(i);
1588     Node* pre_val = barrier->in(1);
1589 
1590     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1591       ShouldNotReachHere();
1592       continue;
1593     }
1594 
1595     Node* ctrl = phase->get_ctrl(barrier);
1596 
1597     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1598       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1599       ctrl = ctrl->in(0)->in(0);
1600       phase->set_ctrl(barrier, ctrl);
1601     } else if (ctrl->is_CallRuntime()) {
1602       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1603       ctrl = ctrl->in(0);
1604       phase->set_ctrl(barrier, ctrl);
1605     }
1606 
1607     Node* init_ctrl = ctrl;
1608     IdealLoopTree* loop = phase->get_loop(ctrl);
1609     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1610     Node* init_raw_mem = raw_mem;
1611     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1612     Node* heap_stable_ctrl = NULL;
1613     Node* null_ctrl = NULL;
1614     uint last = phase->C->unique();
1615 
1616     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1617     Node* region = new RegionNode(PATH_LIMIT);
1618     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1619 
1620     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1621     Node* region2 = new RegionNode(PATH_LIMIT2);
1622     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1623 
1624     // Stable path.
1625     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING);
1626     region->init_req(_heap_stable, heap_stable_ctrl);
1627     phi->init_req(_heap_stable, raw_mem);
1628 
1629     // Null path
1630     Node* reg2_ctrl = NULL;
1631     test_null(ctrl, pre_val, null_ctrl, phase);
1632     if (null_ctrl != NULL) {
1633       reg2_ctrl = null_ctrl->in(0);
1634       region2->init_req(_null_path, null_ctrl);
1635       phi2->init_req(_null_path, raw_mem);
1636     } else {
1637       region2->del_req(_null_path);
1638       phi2->del_req(_null_path);
1639     }
1640 
1641     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1642     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1643     Node* thread = new ThreadLocalNode();
1644     phase->register_new_node(thread, ctrl);
1645     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1646     phase->register_new_node(buffer_adr, ctrl);
1647     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1648     phase->register_new_node(index_adr, ctrl);
1649 
1650     BasicType index_bt = TypeX_X->basic_type();
1651     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
1652     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1653     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1654     phase->register_new_node(index, ctrl);
1655     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1656     phase->register_new_node(index_cmp, ctrl);
1657     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1658     phase->register_new_node(index_test, ctrl);
1659     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1660     if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1661     phase->register_control(queue_full_iff, loop, ctrl);
1662     Node* not_full = new IfTrueNode(queue_full_iff);
1663     phase->register_control(not_full, loop, queue_full_iff);
1664     Node* full = new IfFalseNode(queue_full_iff);
1665     phase->register_control(full, loop, queue_full_iff);
1666 
1667     ctrl = not_full;
1668 
1669     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1670     phase->register_new_node(next_index, ctrl);
1671 
1672     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1673     phase->register_new_node(buffer, ctrl);
1674     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1675     phase->register_new_node(log_addr, ctrl);
1676     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1677     phase->register_new_node(log_store, ctrl);
1678     // update the index
1679     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1680     phase->register_new_node(index_update, ctrl);
1681 
1682     // Fast-path case
1683     region2->init_req(_fast_path, ctrl);
1684     phi2->init_req(_fast_path, index_update);
1685 
1686     ctrl = full;
1687 
1688     Node* base = find_bottom_mem(ctrl, phase);
1689 
1690     MergeMemNode* mm = MergeMemNode::make(base);
1691     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1692     phase->register_new_node(mm, ctrl);
1693 
1694     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1695     call->init_req(TypeFunc::Control, ctrl);
1696     call->init_req(TypeFunc::I_O, phase->C->top());
1697     call->init_req(TypeFunc::Memory, mm);
1698     call->init_req(TypeFunc::FramePtr, phase->C->top());
1699     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1700     call->init_req(TypeFunc::Parms, pre_val);
1701     call->init_req(TypeFunc::Parms+1, thread);
1702     phase->register_control(call, loop, ctrl);
1703 
1704     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1705     phase->register_control(ctrl_proj, loop, call);
1706     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1707     phase->register_new_node(mem_proj, call);
1708 
1709     // Slow-path case
1710     region2->init_req(_slow_path, ctrl_proj);
1711     phi2->init_req(_slow_path, mem_proj);
1712 
1713     phase->register_control(region2, loop, reg2_ctrl);
1714     phase->register_new_node(phi2, region2);
1715 
1716     region->init_req(_heap_unstable, region2);
1717     phi->init_req(_heap_unstable, phi2);
1718 
1719     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1720     phase->register_new_node(phi, region);
1721 
1722     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1723     for(uint next = 0; next < uses.size(); next++ ) {
1724       Node *n = uses.at(next);
1725       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1726       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1727       phase->set_ctrl(n, region);
1728       follow_barrier_uses(n, init_ctrl, uses, phase);
1729     }
1730     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1731 
1732     phase->igvn().replace_node(barrier, pre_val);
1733   }
1734   assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1735 
1736 }
1737 
1738 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1739   if (visited.test_set(in->_idx)) {
1740     return NULL;
1741   }
1742   switch (in->Opcode()) {
1743     case Op_Proj:
1744       return get_load_addr(phase, visited, in->in(0));
1745     case Op_CastPP:
1746     case Op_CheckCastPP:
1747     case Op_DecodeN:
1748     case Op_EncodeP:
1749       return get_load_addr(phase, visited, in->in(1));
1750     case Op_LoadN:
1751     case Op_LoadP:
1752       return in->in(MemNode::Address);
1753     case Op_CompareAndExchangeN:
1754     case Op_CompareAndExchangeP:
1755     case Op_GetAndSetN:
1756     case Op_GetAndSetP:
1757     case Op_ShenandoahCompareAndExchangeP:
1758     case Op_ShenandoahCompareAndExchangeN:
1759       // Those instructions would just have stored a different
1760       // value into the field. No use to attempt to fix it at this point.
1761       return phase->igvn().zerocon(T_OBJECT);
1762     case Op_CMoveP:
1763     case Op_CMoveN: {
1764       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1765       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1766       // Handle unambiguous cases: single address reported on both branches.
1767       if (t != NULL && f == NULL) return t;
1768       if (t == NULL && f != NULL) return f;
1769       if (t != NULL && t == f)    return t;
1770       // Ambiguity.
1771       return phase->igvn().zerocon(T_OBJECT);
1772     }
1773     case Op_Phi: {
1774       Node* addr = NULL;
1775       for (uint i = 1; i < in->req(); i++) {
1776         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1777         if (addr == NULL) {
1778           addr = addr1;
1779         }
1780         if (addr != addr1) {
1781           return phase->igvn().zerocon(T_OBJECT);
1782         }
1783       }
1784       return addr;
1785     }
1786     case Op_ShenandoahLoadReferenceBarrier:
1787       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1788     case Op_ShenandoahEnqueueBarrier:
1789       return get_load_addr(phase, visited, in->in(1));
1790     case Op_CallDynamicJava:
1791     case Op_CallLeaf:
1792     case Op_CallStaticJava:
1793     case Op_ConN:
1794     case Op_ConP:
1795     case Op_Parm:
1796     case Op_CreateEx:
1797       return phase->igvn().zerocon(T_OBJECT);
1798     default:
1799 #ifdef ASSERT
1800       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1801 #endif
1802       return phase->igvn().zerocon(T_OBJECT);
1803   }
1804 
1805 }
1806 
1807 void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1808   IdealLoopTree *loop = phase->get_loop(iff);
1809   Node* loop_head = loop->_head;
1810   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1811 
1812   Node* bol = iff->in(1);
1813   Node* cmp = bol->in(1);
1814   Node* andi = cmp->in(1);
1815   Node* load = andi->in(1);
1816 
1817   assert(is_gc_state_load(load), "broken");
1818   if (!phase->is_dominator(load->in(0), entry_c)) {
1819     Node* mem_ctrl = NULL;
1820     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1821     load = load->clone();
1822     load->set_req(MemNode::Memory, mem);
1823     load->set_req(0, entry_c);
1824     phase->register_new_node(load, entry_c);
1825     andi = andi->clone();
1826     andi->set_req(1, load);
1827     phase->register_new_node(andi, entry_c);
1828     cmp = cmp->clone();
1829     cmp->set_req(1, andi);
1830     phase->register_new_node(cmp, entry_c);
1831     bol = bol->clone();
1832     bol->set_req(1, cmp);
1833     phase->register_new_node(bol, entry_c);
1834 
1835     Node* old_bol =iff->in(1);
1836     phase->igvn().replace_input_of(iff, 1, bol);
1837   }
1838 }
1839 
1840 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1841   if (!n->is_If() || n->is_CountedLoopEnd()) {
1842     return false;
1843   }
1844   Node* region = n->in(0);
1845 
1846   if (!region->is_Region()) {
1847     return false;
1848   }
1849   Node* dom = phase->idom(region);
1850   if (!dom->is_If()) {
1851     return false;
1852   }
1853 
1854   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1855     return false;
1856   }
1857 
1858   IfNode* dom_if = dom->as_If();
1859   Node* proj_true = dom_if->proj_out(1);
1860   Node* proj_false = dom_if->proj_out(0);
1861 
1862   for (uint i = 1; i < region->req(); i++) {
1863     if (phase->is_dominator(proj_true, region->in(i))) {
1864       continue;
1865     }
1866     if (phase->is_dominator(proj_false, region->in(i))) {
1867       continue;
1868     }
1869     return false;
1870   }
1871 
1872   return true;
1873 }
1874 
1875 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1876   assert(is_heap_stable_test(n), "no other tests");
1877   if (identical_backtoback_ifs(n, phase)) {
1878     Node* n_ctrl = n->in(0);
1879     if (phase->can_split_if(n_ctrl)) {
1880       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1881       if (is_heap_stable_test(n)) {
1882         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1883         assert(is_gc_state_load(gc_state_load), "broken");
1884         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1885         assert(is_gc_state_load(dom_gc_state_load), "broken");
1886         if (gc_state_load != dom_gc_state_load) {
1887           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1888         }
1889       }
1890       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1891       Node* proj_true = dom_if->proj_out(1);
1892       Node* proj_false = dom_if->proj_out(0);
1893       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1894       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1895 
1896       for (uint i = 1; i < n_ctrl->req(); i++) {
1897         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1898           bolphi->init_req(i, con_true);
1899         } else {
1900           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1901           bolphi->init_req(i, con_false);
1902         }
1903       }
1904       phase->register_new_node(bolphi, n_ctrl);
1905       phase->igvn().replace_input_of(n, 1, bolphi);
1906       phase->do_split_if(n);
1907     }
1908   }
1909 }
1910 
1911 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1912   // Find first invariant test that doesn't exit the loop
1913   LoopNode *head = loop->_head->as_Loop();
1914   IfNode* unswitch_iff = NULL;
1915   Node* n = head->in(LoopNode::LoopBackControl);
1916   int loop_has_sfpts = -1;
1917   while (n != head) {
1918     Node* n_dom = phase->idom(n);
1919     if (n->is_Region()) {
1920       if (n_dom->is_If()) {
1921         IfNode* iff = n_dom->as_If();
1922         if (iff->in(1)->is_Bool()) {
1923           BoolNode* bol = iff->in(1)->as_Bool();
1924           if (bol->in(1)->is_Cmp()) {
1925             // If condition is invariant and not a loop exit,
1926             // then found reason to unswitch.
1927             if (is_heap_stable_test(iff) &&
1928                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1929               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1930               if (loop_has_sfpts == -1) {
1931                 for(uint i = 0; i < loop->_body.size(); i++) {
1932                   Node *m = loop->_body[i];
1933                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1934                     loop_has_sfpts = 1;
1935                     break;
1936                   }
1937                 }
1938                 if (loop_has_sfpts == -1) {
1939                   loop_has_sfpts = 0;
1940                 }
1941               }
1942               if (!loop_has_sfpts) {
1943                 unswitch_iff = iff;
1944               }
1945             }
1946           }
1947         }
1948       }
1949     }
1950     n = n_dom;
1951   }
1952   return unswitch_iff;
1953 }
1954 
1955 
1956 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1957   Node_List heap_stable_tests;
1958   stack.push(phase->C->start(), 0);
1959   do {
1960     Node* n = stack.node();
1961     uint i = stack.index();
1962 
1963     if (i < n->outcnt()) {
1964       Node* u = n->raw_out(i);
1965       stack.set_index(i+1);
1966       if (!visited.test_set(u->_idx)) {
1967         stack.push(u, 0);
1968       }
1969     } else {
1970       stack.pop();
1971       if (n->is_If() && is_heap_stable_test(n)) {
1972         heap_stable_tests.push(n);
1973       }
1974     }
1975   } while (stack.size() > 0);
1976 
1977   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1978     Node* n = heap_stable_tests.at(i);
1979     assert(is_heap_stable_test(n), "only evacuation test");
1980     merge_back_to_back_tests(n, phase);
1981   }
1982 
1983   if (!phase->C->major_progress()) {
1984     VectorSet seen(Thread::current()->resource_area());
1985     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1986       Node* n = heap_stable_tests.at(i);
1987       IdealLoopTree* loop = phase->get_loop(n);
1988       if (loop != phase->ltree_root() &&
1989           loop->_child == NULL &&
1990           !loop->_irreducible) {
1991         Node* head = loop->_head;
1992         if (head->is_Loop() &&
1993             (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1994             !seen.test_set(head->_idx)) {
1995           IfNode* iff = find_unswitching_candidate(loop, phase);
1996           if (iff != NULL) {
1997             Node* bol = iff->in(1);
1998             if (head->as_Loop()->is_strip_mined()) {
1999               head->as_Loop()->verify_strip_mined(0);
2000             }
2001             move_gc_state_test_out_of_loop(iff, phase);
2002 
2003             AutoNodeBudget node_budget(phase);
2004 
2005             if (loop->policy_unswitching(phase)) {
2006               if (head->as_Loop()->is_strip_mined()) {
2007                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
2008                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
2009               }
2010               phase->do_unswitching(loop, old_new);
2011             } else {
2012               // Not proceeding with unswitching. Move load back in
2013               // the loop.
2014               phase->igvn().replace_input_of(iff, 1, bol);
2015             }
2016           }
2017         }
2018       }
2019     }
2020   }
2021 }
2022 
2023 #ifdef ASSERT
2024 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
2025   const bool trace = false;
2026   ResourceMark rm;
2027   Unique_Node_List nodes;
2028   Unique_Node_List controls;
2029   Unique_Node_List memories;
2030 
2031   nodes.push(root);
2032   for (uint next = 0; next < nodes.size(); next++) {
2033     Node *n  = nodes.at(next);
2034     if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
2035       controls.push(n);
2036       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
2037       for (uint next2 = 0; next2 < controls.size(); next2++) {
2038         Node *m = controls.at(next2);
2039         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2040           Node* u = m->fast_out(i);
2041           if (u->is_CFG() && !u->is_Root() &&
2042               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
2043               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
2044             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
2045             controls.push(u);
2046           }
2047         }
2048       }
2049       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
2050       for (uint next2 = 0; next2 < memories.size(); next2++) {
2051         Node *m = memories.at(next2);
2052         assert(m->bottom_type() == Type::MEMORY, "");
2053         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2054           Node* u = m->fast_out(i);
2055           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
2056             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2057             memories.push(u);
2058           } else if (u->is_LoadStore()) {
2059             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
2060             memories.push(u->find_out_with(Op_SCMemProj));
2061           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
2062             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2063             memories.push(u);
2064           } else if (u->is_Phi()) {
2065             assert(u->bottom_type() == Type::MEMORY, "");
2066             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
2067               assert(controls.member(u->in(0)), "");
2068               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2069               memories.push(u);
2070             }
2071           } else if (u->is_SafePoint() || u->is_MemBar()) {
2072             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2073               Node* uu = u->fast_out(j);
2074               if (uu->bottom_type() == Type::MEMORY) {
2075                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
2076                 memories.push(uu);
2077               }
2078             }
2079           }
2080         }
2081       }
2082       for (uint next2 = 0; next2 < controls.size(); next2++) {
2083         Node *m = controls.at(next2);
2084         if (m->is_Region()) {
2085           bool all_in = true;
2086           for (uint i = 1; i < m->req(); i++) {
2087             if (!controls.member(m->in(i))) {
2088               all_in = false;
2089               break;
2090             }
2091           }
2092           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
2093           bool found_phi = false;
2094           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
2095             Node* u = m->fast_out(j);
2096             if (u->is_Phi() && memories.member(u)) {
2097               found_phi = true;
2098               for (uint i = 1; i < u->req() && found_phi; i++) {
2099                 Node* k = u->in(i);
2100                 if (memories.member(k) != controls.member(m->in(i))) {
2101                   found_phi = false;
2102                 }
2103               }
2104             }
2105           }
2106           assert(found_phi || all_in, "");
2107         }
2108       }
2109       controls.clear();
2110       memories.clear();
2111     }
2112     for( uint i = 0; i < n->len(); ++i ) {
2113       Node *m = n->in(i);
2114       if (m != NULL) {
2115         nodes.push(m);
2116       }
2117     }
2118   }
2119 }
2120 #endif
2121 
2122 ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
2123   ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
2124 }
2125 
2126 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
2127   if (in(1) == NULL || in(1)->is_top()) {
2128     return Type::TOP;
2129   }
2130   const Type* t = in(1)->bottom_type();
2131   if (t == TypePtr::NULL_PTR) {
2132     return t;
2133   }
2134   return t->is_oopptr();
2135 }
2136 
2137 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
2138   if (in(1) == NULL) {
2139     return Type::TOP;
2140   }
2141   const Type* t = phase->type(in(1));
2142   if (t == Type::TOP) {
2143     return Type::TOP;
2144   }
2145   if (t == TypePtr::NULL_PTR) {
2146     return t;
2147   }
2148   return t->is_oopptr();
2149 }
2150 
2151 int ShenandoahEnqueueBarrierNode::needed(Node* n) {
2152   if (n == NULL ||
2153       n->is_Allocate() ||
2154       n->Opcode() == Op_ShenandoahEnqueueBarrier ||
2155       n->bottom_type() == TypePtr::NULL_PTR ||
2156       (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2157     return NotNeeded;
2158   }
2159   if (n->is_Phi() ||
2160       n->is_CMove()) {
2161     return MaybeNeeded;
2162   }
2163   return Needed;
2164 }
2165 
2166 Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
2167   for (;;) {
2168     if (n == NULL) {
2169       return n;
2170     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2171       return n;
2172     } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2173       return n;
2174     } else if (n->is_ConstraintCast() ||
2175                n->Opcode() == Op_DecodeN ||
2176                n->Opcode() == Op_EncodeP) {
2177       n = n->in(1);
2178     } else if (n->is_Proj()) {
2179       n = n->in(0);
2180     } else {
2181       return n;
2182     }
2183   }
2184   ShouldNotReachHere();
2185   return NULL;
2186 }
2187 
2188 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
2189   PhaseIterGVN* igvn = phase->is_IterGVN();
2190 
2191   Node* n = next(in(1));
2192 
2193   int cont = needed(n);
2194 
2195   if (cont == NotNeeded) {
2196     return in(1);
2197   } else if (cont == MaybeNeeded) {
2198     if (igvn == NULL) {
2199       phase->record_for_igvn(this);
2200       return this;
2201     } else {
2202       ResourceMark rm;
2203       Unique_Node_List wq;
2204       uint wq_i = 0;
2205 
2206       for (;;) {
2207         if (n->is_Phi()) {
2208           for (uint i = 1; i < n->req(); i++) {
2209             Node* m = n->in(i);
2210             if (m != NULL) {
2211               wq.push(m);
2212             }
2213           }
2214         } else {
2215           assert(n->is_CMove(), "nothing else here");
2216           Node* m = n->in(CMoveNode::IfFalse);
2217           wq.push(m);
2218           m = n->in(CMoveNode::IfTrue);
2219           wq.push(m);
2220         }
2221         Node* orig_n = NULL;
2222         do {
2223           if (wq_i >= wq.size()) {
2224             return in(1);
2225           }
2226           n = wq.at(wq_i);
2227           wq_i++;
2228           orig_n = n;
2229           n = next(n);
2230           cont = needed(n);
2231           if (cont == Needed) {
2232             return this;
2233           }
2234         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2235       }
2236     }
2237   }
2238 
2239   return this;
2240 }
2241 
2242 #ifdef ASSERT
2243 static bool has_never_branch(Node* root) {
2244   for (uint i = 1; i < root->req(); i++) {
2245     Node* in = root->in(i);
2246     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2247       return true;
2248     }
2249   }
2250   return false;
2251 }
2252 #endif
2253 
2254 void MemoryGraphFixer::collect_memory_nodes() {
2255   Node_Stack stack(0);
2256   VectorSet visited(Thread::current()->resource_area());
2257   Node_List regions;
2258 
2259   // Walk the raw memory graph and create a mapping from CFG node to
2260   // memory node. Exclude phis for now.
2261   stack.push(_phase->C->root(), 1);
2262   do {
2263     Node* n = stack.node();
2264     int opc = n->Opcode();
2265     uint i = stack.index();
2266     if (i < n->req()) {
2267       Node* mem = NULL;
2268       if (opc == Op_Root) {
2269         Node* in = n->in(i);
2270         int in_opc = in->Opcode();
2271         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2272           mem = in->in(TypeFunc::Memory);
2273         } else if (in_opc == Op_Halt) {
2274           if (in->in(0)->is_Region()) {
2275             Node* r = in->in(0);
2276             for (uint j = 1; j < r->req(); j++) {
2277               assert(r->in(j)->Opcode() != Op_NeverBranch, "");
2278             }
2279           } else {
2280             Node* proj = in->in(0);
2281             assert(proj->is_Proj(), "");
2282             Node* in = proj->in(0);
2283             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2284             if (in->is_CallStaticJava()) {
2285               mem = in->in(TypeFunc::Memory);
2286             } else if (in->Opcode() == Op_Catch) {
2287               Node* call = in->in(0)->in(0);
2288               assert(call->is_Call(), "");
2289               mem = call->in(TypeFunc::Memory);
2290             } else if (in->Opcode() == Op_NeverBranch) {
2291               Node* head = in->in(0);
2292               assert(head->is_Region() && head->req() == 3, "unexpected infinite loop graph shape");
2293               assert(_phase->is_dominator(head, head->in(1)) || _phase->is_dominator(head, head->in(2)), "no back branch?");
2294               Node* tail = _phase->is_dominator(head, head->in(1)) ? head->in(1) : head->in(2);
2295               Node* c = tail;
2296               while (c != head) {
2297                 if (c->is_SafePoint() && !c->is_CallLeaf()) {
2298                   mem = c->in(TypeFunc::Memory);
2299                 }
2300                 c = _phase->idom(c);
2301               }
2302               assert(mem != NULL, "should have found safepoint");
2303 
2304               Node* phi_mem = NULL;
2305               for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
2306                 Node* u = head->fast_out(j);
2307                 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
2308                   if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2309                     assert(phi_mem == NULL || phi_mem->adr_type() == TypePtr::BOTTOM, "");
2310                     phi_mem = u;
2311                   } else if (u->adr_type() == TypePtr::BOTTOM) {
2312                     assert(phi_mem == NULL || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
2313                     if (phi_mem == NULL) {
2314                       phi_mem = u;
2315                     }
2316                   }
2317                 }
2318               }
2319               if (phi_mem != NULL) {
2320                 mem = phi_mem;
2321               }
2322             }
2323           }
2324         } else {
2325 #ifdef ASSERT
2326           n->dump();
2327           in->dump();
2328 #endif
2329           ShouldNotReachHere();
2330         }
2331       } else {
2332         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2333         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2334         mem = n->in(i);
2335       }
2336       i++;
2337       stack.set_index(i);
2338       if (mem == NULL) {
2339         continue;
2340       }
2341       for (;;) {
2342         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2343           break;
2344         }
2345         if (mem->is_Phi()) {
2346           stack.push(mem, 2);
2347           mem = mem->in(1);
2348         } else if (mem->is_Proj()) {
2349           stack.push(mem, mem->req());
2350           mem = mem->in(0);
2351         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2352           mem = mem->in(TypeFunc::Memory);
2353         } else if (mem->is_MergeMem()) {
2354           MergeMemNode* mm = mem->as_MergeMem();
2355           mem = mm->memory_at(_alias);
2356         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2357           assert(_alias == Compile::AliasIdxRaw, "");
2358           stack.push(mem, mem->req());
2359           mem = mem->in(MemNode::Memory);
2360         } else {
2361 #ifdef ASSERT
2362           mem->dump();
2363 #endif
2364           ShouldNotReachHere();
2365         }
2366       }
2367     } else {
2368       if (n->is_Phi()) {
2369         // Nothing
2370       } else if (!n->is_Root()) {
2371         Node* c = get_ctrl(n);
2372         _memory_nodes.map(c->_idx, n);
2373       }
2374       stack.pop();
2375     }
2376   } while(stack.is_nonempty());
2377 
2378   // Iterate over CFG nodes in rpo and propagate memory state to
2379   // compute memory state at regions, creating new phis if needed.
2380   Node_List rpo_list;
2381   visited.clear();
2382   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2383   Node* root = rpo_list.pop();
2384   assert(root == _phase->C->root(), "");
2385 
2386   const bool trace = false;
2387 #ifdef ASSERT
2388   if (trace) {
2389     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2390       Node* c = rpo_list.at(i);
2391       if (_memory_nodes[c->_idx] != NULL) {
2392         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2393       }
2394     }
2395   }
2396 #endif
2397   uint last = _phase->C->unique();
2398 
2399 #ifdef ASSERT
2400   uint8_t max_depth = 0;
2401   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2402     IdealLoopTree* lpt = iter.current();
2403     max_depth = MAX2(max_depth, lpt->_nest);
2404   }
2405 #endif
2406 
2407   bool progress = true;
2408   int iteration = 0;
2409   Node_List dead_phis;
2410   while (progress) {
2411     progress = false;
2412     iteration++;
2413     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2414     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2415     IdealLoopTree* last_updated_ilt = NULL;
2416     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2417       Node* c = rpo_list.at(i);
2418 
2419       Node* prev_mem = _memory_nodes[c->_idx];
2420       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2421         Node* prev_region = regions[c->_idx];
2422         Node* unique = NULL;
2423         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2424           Node* m = _memory_nodes[c->in(j)->_idx];
2425           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2426           if (m != NULL) {
2427             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2428               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), "");
2429               // continue
2430             } else if (unique == NULL) {
2431               unique = m;
2432             } else if (m == unique) {
2433               // continue
2434             } else {
2435               unique = NodeSentinel;
2436             }
2437           }
2438         }
2439         assert(unique != NULL, "empty phi???");
2440         if (unique != NodeSentinel) {
2441           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2442             dead_phis.push(prev_region);
2443           }
2444           regions.map(c->_idx, unique);
2445         } else {
2446           Node* phi = NULL;
2447           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2448             phi = prev_region;
2449             for (uint k = 1; k < c->req(); k++) {
2450               Node* m = _memory_nodes[c->in(k)->_idx];
2451               assert(m != NULL, "expect memory state");
2452               phi->set_req(k, m);
2453             }
2454           } else {
2455             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2456               Node* u = c->fast_out(j);
2457               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2458                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2459                 phi = u;
2460                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2461                   Node* m = _memory_nodes[c->in(k)->_idx];
2462                   assert(m != NULL, "expect memory state");
2463                   if (u->in(k) != m) {
2464                     phi = NULL;
2465                   }
2466                 }
2467               }
2468             }
2469             if (phi == NULL) {
2470               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2471               for (uint k = 1; k < c->req(); k++) {
2472                 Node* m = _memory_nodes[c->in(k)->_idx];
2473                 assert(m != NULL, "expect memory state");
2474                 phi->init_req(k, m);
2475               }
2476             }
2477           }
2478           assert(phi != NULL, "");
2479           regions.map(c->_idx, phi);
2480         }
2481         Node* current_region = regions[c->_idx];
2482         if (current_region != prev_region) {
2483           progress = true;
2484           if (prev_region == prev_mem) {
2485             _memory_nodes.map(c->_idx, current_region);
2486           }
2487         }
2488       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2489         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2490         assert(m != NULL, "expect memory state");
2491         if (m != prev_mem) {
2492           _memory_nodes.map(c->_idx, m);
2493           progress = true;
2494         }
2495       }
2496 #ifdef ASSERT
2497       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2498 #endif
2499     }
2500   }
2501 
2502   // Replace existing phi with computed memory state for that region
2503   // if different (could be a new phi or a dominating memory node if
2504   // that phi was found to be useless).
2505   while (dead_phis.size() > 0) {
2506     Node* n = dead_phis.pop();
2507     n->replace_by(_phase->C->top());
2508     n->destruct();
2509   }
2510   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2511     Node* c = rpo_list.at(i);
2512     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2513       Node* n = regions[c->_idx];
2514       if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2515         _phase->register_new_node(n, c);
2516       }
2517     }
2518   }
2519   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2520     Node* c = rpo_list.at(i);
2521     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2522       Node* n = regions[c->_idx];
2523       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2524         Node* u = c->fast_out(i);
2525         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2526             u != n) {
2527           if (u->adr_type() == TypePtr::BOTTOM) {
2528             fix_memory_uses(u, n, n, c);
2529           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2530             _phase->lazy_replace(u, n);
2531             --i; --imax;
2532           }
2533         }
2534       }
2535     }
2536   }
2537 }
2538 
2539 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2540   Node* c = _phase->get_ctrl(n);
2541   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2542     assert(c == n->in(0), "");
2543     CallNode* call = c->as_Call();
2544     CallProjections projs;
2545     call->extract_projections(&projs, true, false);
2546     if (projs.catchall_memproj != NULL) {
2547       if (projs.fallthrough_memproj == n) {
2548         c = projs.fallthrough_catchproj;
2549       } else {
2550         assert(projs.catchall_memproj == n, "");
2551         c = projs.catchall_catchproj;
2552       }
2553     }
2554   }
2555   return c;
2556 }
2557 
2558 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2559   if (_phase->has_ctrl(n))
2560     return get_ctrl(n);
2561   else {
2562     assert (n->is_CFG(), "must be a CFG node");
2563     return n;
2564   }
2565 }
2566 
2567 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2568   return m != NULL && get_ctrl(m) == c;
2569 }
2570 
2571 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2572   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2573   Node* mem = _memory_nodes[ctrl->_idx];
2574   Node* c = ctrl;
2575   while (!mem_is_valid(mem, c) &&
2576          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2577     c = _phase->idom(c);
2578     mem = _memory_nodes[c->_idx];
2579   }
2580   if (n != NULL && mem_is_valid(mem, c)) {
2581     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2582       mem = next_mem(mem, _alias);
2583     }
2584     if (mem->is_MergeMem()) {
2585       mem = mem->as_MergeMem()->memory_at(_alias);
2586     }
2587     if (!mem_is_valid(mem, c)) {
2588       do {
2589         c = _phase->idom(c);
2590         mem = _memory_nodes[c->_idx];
2591       } while (!mem_is_valid(mem, c) &&
2592                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2593     }
2594   }
2595   assert(mem->bottom_type() == Type::MEMORY, "");
2596   return mem;
2597 }
2598 
2599 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2600   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2601     Node* use = region->fast_out(i);
2602     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2603         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2604       return true;
2605     }
2606   }
2607   return false;
2608 }
2609 
2610 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2611   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2612   const bool trace = false;
2613   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2614   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2615   GrowableArray<Node*> phis;
2616   if (mem_for_ctrl != mem) {
2617     Node* old = mem_for_ctrl;
2618     Node* prev = NULL;
2619     while (old != mem) {
2620       prev = old;
2621       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2622         assert(_alias == Compile::AliasIdxRaw, "");
2623         old = old->in(MemNode::Memory);
2624       } else if (old->Opcode() == Op_SCMemProj) {
2625         assert(_alias == Compile::AliasIdxRaw, "");
2626         old = old->in(0);
2627       } else {
2628         ShouldNotReachHere();
2629       }
2630     }
2631     assert(prev != NULL, "");
2632     if (new_ctrl != ctrl) {
2633       _memory_nodes.map(ctrl->_idx, mem);
2634       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2635     }
2636     uint input = (uint)MemNode::Memory;
2637     _phase->igvn().replace_input_of(prev, input, new_mem);
2638   } else {
2639     uses.clear();
2640     _memory_nodes.map(new_ctrl->_idx, new_mem);
2641     uses.push(new_ctrl);
2642     for(uint next = 0; next < uses.size(); next++ ) {
2643       Node *n = uses.at(next);
2644       assert(n->is_CFG(), "");
2645       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2646       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2647         Node* u = n->fast_out(i);
2648         if (!u->is_Root() && u->is_CFG() && u != n) {
2649           Node* m = _memory_nodes[u->_idx];
2650           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2651               !has_mem_phi(u) &&
2652               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2653             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2654             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2655 
2656             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2657               bool push = true;
2658               bool create_phi = true;
2659               if (_phase->is_dominator(new_ctrl, u)) {
2660                 create_phi = false;
2661               }
2662               if (create_phi) {
2663                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2664                 _phase->register_new_node(phi, u);
2665                 phis.push(phi);
2666                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2667                 if (!mem_is_valid(m, u)) {
2668                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2669                   _memory_nodes.map(u->_idx, phi);
2670                 } else {
2671                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2672                   for (;;) {
2673                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2674                     Node* next = NULL;
2675                     if (m->is_Proj()) {
2676                       next = m->in(0);
2677                     } else {
2678                       assert(m->is_Mem() || m->is_LoadStore(), "");
2679                       assert(_alias == Compile::AliasIdxRaw, "");
2680                       next = m->in(MemNode::Memory);
2681                     }
2682                     if (_phase->get_ctrl(next) != u) {
2683                       break;
2684                     }
2685                     if (next->is_MergeMem()) {
2686                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2687                       break;
2688                     }
2689                     if (next->is_Phi()) {
2690                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2691                       break;
2692                     }
2693                     m = next;
2694                   }
2695 
2696                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2697                   assert(m->is_Mem() || m->is_LoadStore(), "");
2698                   uint input = (uint)MemNode::Memory;
2699                   _phase->igvn().replace_input_of(m, input, phi);
2700                   push = false;
2701                 }
2702               } else {
2703                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2704               }
2705               if (push) {
2706                 uses.push(u);
2707               }
2708             }
2709           } else if (!mem_is_valid(m, u) &&
2710                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2711             uses.push(u);
2712           }
2713         }
2714       }
2715     }
2716     for (int i = 0; i < phis.length(); i++) {
2717       Node* n = phis.at(i);
2718       Node* r = n->in(0);
2719       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2720       for (uint j = 1; j < n->req(); j++) {
2721         Node* m = find_mem(r->in(j), NULL);
2722         _phase->igvn().replace_input_of(n, j, m);
2723         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2724       }
2725     }
2726   }
2727   uint last = _phase->C->unique();
2728   MergeMemNode* mm = NULL;
2729   int alias = _alias;
2730   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2731   // Process loads first to not miss an anti-dependency: if the memory
2732   // edge of a store is updated before a load is processed then an
2733   // anti-dependency may be missed.
2734   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2735     Node* u = mem->out(i);
2736     if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2737       Node* m = find_mem(_phase->get_ctrl(u), u);
2738       if (m != mem) {
2739         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2740         _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2741         --i;
2742       }
2743     }
2744   }
2745   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2746     Node* u = mem->out(i);
2747     if (u->_idx < last) {
2748       if (u->is_Mem()) {
2749         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2750           Node* m = find_mem(_phase->get_ctrl(u), u);
2751           if (m != mem) {
2752             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2753             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2754             --i;
2755           }
2756         }
2757       } else if (u->is_MergeMem()) {
2758         MergeMemNode* u_mm = u->as_MergeMem();
2759         if (u_mm->memory_at(alias) == mem) {
2760           MergeMemNode* newmm = NULL;
2761           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2762             Node* uu = u->fast_out(j);
2763             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2764             if (uu->is_Phi()) {
2765               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2766               Node* region = uu->in(0);
2767               int nb = 0;
2768               for (uint k = 1; k < uu->req(); k++) {
2769                 if (uu->in(k) == u) {
2770                   Node* m = find_mem(region->in(k), NULL);
2771                   if (m != mem) {
2772                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2773                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2774                     if (newmm != u) {
2775                       _phase->igvn().replace_input_of(uu, k, newmm);
2776                       nb++;
2777                       --jmax;
2778                     }
2779                   }
2780                 }
2781               }
2782               if (nb > 0) {
2783                 --j;
2784               }
2785             } else {
2786               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2787               if (m != mem) {
2788                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2789                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2790                 if (newmm != u) {
2791                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2792                   --j, --jmax;
2793                 }
2794               }
2795             }
2796           }
2797         }
2798       } else if (u->is_Phi()) {
2799         assert(u->bottom_type() == Type::MEMORY, "what else?");
2800         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2801           Node* region = u->in(0);
2802           bool replaced = false;
2803           for (uint j = 1; j < u->req(); j++) {
2804             if (u->in(j) == mem) {
2805               Node* m = find_mem(region->in(j), NULL);
2806               Node* nnew = m;
2807               if (m != mem) {
2808                 if (u->adr_type() == TypePtr::BOTTOM) {
2809                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2810                   nnew = mm;
2811                 }
2812                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2813                 _phase->igvn().replace_input_of(u, j, nnew);
2814                 replaced = true;
2815               }
2816             }
2817           }
2818           if (replaced) {
2819             --i;
2820           }
2821         }
2822       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2823                  u->adr_type() == NULL) {
2824         assert(u->adr_type() != NULL ||
2825                u->Opcode() == Op_Rethrow ||
2826                u->Opcode() == Op_Return ||
2827                u->Opcode() == Op_SafePoint ||
2828                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2829                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2830                u->Opcode() == Op_CallLeaf, "");
2831         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2832         if (m != mem) {
2833           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2834           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2835           --i;
2836         }
2837       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2838         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2839         if (m != mem) {
2840           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2841           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2842           --i;
2843         }
2844       } else if (u->adr_type() != TypePtr::BOTTOM &&
2845                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2846         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2847         assert(m != mem, "");
2848         // u is on the wrong slice...
2849         assert(u->is_ClearArray(), "");
2850         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2851         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2852         --i;
2853       }
2854     }
2855   }
2856 #ifdef ASSERT
2857   assert(new_mem->outcnt() > 0, "");
2858   for (int i = 0; i < phis.length(); i++) {
2859     Node* n = phis.at(i);
2860     assert(n->outcnt() > 0, "new phi must have uses now");
2861   }
2862 #endif
2863 }
2864 
2865 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2866   MergeMemNode* mm = MergeMemNode::make(mem);
2867   mm->set_memory_at(_alias, rep_proj);
2868   _phase->register_new_node(mm, rep_ctrl);
2869   return mm;
2870 }
2871 
2872 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2873   MergeMemNode* newmm = NULL;
2874   MergeMemNode* u_mm = u->as_MergeMem();
2875   Node* c = _phase->get_ctrl(u);
2876   if (_phase->is_dominator(c, rep_ctrl)) {
2877     c = rep_ctrl;
2878   } else {
2879     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2880   }
2881   if (u->outcnt() == 1) {
2882     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2883       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2884       --i;
2885     } else {
2886       _phase->igvn().rehash_node_delayed(u);
2887       u_mm->set_memory_at(_alias, rep_proj);
2888     }
2889     newmm = u_mm;
2890     _phase->set_ctrl_and_loop(u, c);
2891   } else {
2892     // can't simply clone u and then change one of its input because
2893     // it adds and then removes an edge which messes with the
2894     // DUIterator
2895     newmm = MergeMemNode::make(u_mm->base_memory());
2896     for (uint j = 0; j < u->req(); j++) {
2897       if (j < newmm->req()) {
2898         if (j == (uint)_alias) {
2899           newmm->set_req(j, rep_proj);
2900         } else if (newmm->in(j) != u->in(j)) {
2901           newmm->set_req(j, u->in(j));
2902         }
2903       } else if (j == (uint)_alias) {
2904         newmm->add_req(rep_proj);
2905       } else {
2906         newmm->add_req(u->in(j));
2907       }
2908     }
2909     if ((uint)_alias >= u->req()) {
2910       newmm->set_memory_at(_alias, rep_proj);
2911     }
2912     _phase->register_new_node(newmm, c);
2913   }
2914   return newmm;
2915 }
2916 
2917 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2918   if (phi->adr_type() == TypePtr::BOTTOM) {
2919     Node* region = phi->in(0);
2920     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2921       Node* uu = region->fast_out(j);
2922       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2923         return false;
2924       }
2925     }
2926     return true;
2927   }
2928   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2929 }
2930 
2931 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2932   uint last = _phase-> C->unique();
2933   MergeMemNode* mm = NULL;
2934   assert(mem->bottom_type() == Type::MEMORY, "");
2935   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2936     Node* u = mem->out(i);
2937     if (u != replacement && u->_idx < last) {
2938       if (u->is_MergeMem()) {
2939         MergeMemNode* u_mm = u->as_MergeMem();
2940         if (u_mm->memory_at(_alias) == mem) {
2941           MergeMemNode* newmm = NULL;
2942           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2943             Node* uu = u->fast_out(j);
2944             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2945             if (uu->is_Phi()) {
2946               if (should_process_phi(uu)) {
2947                 Node* region = uu->in(0);
2948                 int nb = 0;
2949                 for (uint k = 1; k < uu->req(); k++) {
2950                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2951                     if (newmm == NULL) {
2952                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2953                     }
2954                     if (newmm != u) {
2955                       _phase->igvn().replace_input_of(uu, k, newmm);
2956                       nb++;
2957                       --jmax;
2958                     }
2959                   }
2960                 }
2961                 if (nb > 0) {
2962                   --j;
2963                 }
2964               }
2965             } else {
2966               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2967                 if (newmm == NULL) {
2968                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2969                 }
2970                 if (newmm != u) {
2971                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2972                   --j, --jmax;
2973                 }
2974               }
2975             }
2976           }
2977         }
2978       } else if (u->is_Phi()) {
2979         assert(u->bottom_type() == Type::MEMORY, "what else?");
2980         Node* region = u->in(0);
2981         if (should_process_phi(u)) {
2982           bool replaced = false;
2983           for (uint j = 1; j < u->req(); j++) {
2984             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2985               Node* nnew = rep_proj;
2986               if (u->adr_type() == TypePtr::BOTTOM) {
2987                 if (mm == NULL) {
2988                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2989                 }
2990                 nnew = mm;
2991               }
2992               _phase->igvn().replace_input_of(u, j, nnew);
2993               replaced = true;
2994             }
2995           }
2996           if (replaced) {
2997             --i;
2998           }
2999 
3000         }
3001       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
3002                  u->adr_type() == NULL) {
3003         assert(u->adr_type() != NULL ||
3004                u->Opcode() == Op_Rethrow ||
3005                u->Opcode() == Op_Return ||
3006                u->Opcode() == Op_SafePoint ||
3007                u->Opcode() == Op_StoreIConditional ||
3008                u->Opcode() == Op_StoreLConditional ||
3009                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
3010                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
3011                u->Opcode() == Op_CallLeaf, "%s", u->Name());
3012         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3013           if (mm == NULL) {
3014             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
3015           }
3016           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
3017           --i;
3018         }
3019       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
3020         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3021           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
3022           --i;
3023         }
3024       }
3025     }
3026   }
3027 }
3028 
3029 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, bool native)
3030 : Node(ctrl, obj), _native(native) {
3031   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
3032 }
3033 
3034 bool ShenandoahLoadReferenceBarrierNode::is_native() const {
3035   return _native;
3036 }
3037 
3038 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
3039   return sizeof(*this);
3040 }
3041 
3042 uint ShenandoahLoadReferenceBarrierNode::hash() const {
3043   return Node::hash() + (_native ? 1 : 0);
3044 }
3045 
3046 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
3047   return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
3048          _native == ((const ShenandoahLoadReferenceBarrierNode&)n)._native;
3049 }
3050 
3051 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
3052   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
3053     return Type::TOP;
3054   }
3055   const Type* t = in(ValueIn)->bottom_type();
3056   if (t == TypePtr::NULL_PTR) {
3057     return t;
3058   }
3059   return t->is_oopptr();
3060 }
3061 
3062 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
3063   // Either input is TOP ==> the result is TOP
3064   const Type *t2 = phase->type(in(ValueIn));
3065   if( t2 == Type::TOP ) return Type::TOP;
3066 
3067   if (t2 == TypePtr::NULL_PTR) {
3068     return t2;
3069   }
3070 
3071   const Type* type = t2->is_oopptr();
3072   return type;
3073 }
3074 
3075 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
3076   Node* value = in(ValueIn);
3077   if (!needs_barrier(phase, value)) {
3078     return value;
3079   }
3080   return this;
3081 }
3082 
3083 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
3084   Unique_Node_List visited;
3085   return needs_barrier_impl(phase, n, visited);
3086 }
3087 
3088 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
3089   if (n == NULL) return false;
3090   if (visited.member(n)) {
3091     return false; // Been there.
3092   }
3093   visited.push(n);
3094 
3095   if (n->is_Allocate()) {
3096     // tty->print_cr("optimize barrier on alloc");
3097     return false;
3098   }
3099   if (n->is_Call()) {
3100     // tty->print_cr("optimize barrier on call");
3101     return false;
3102   }
3103 
3104   const Type* type = phase->type(n);
3105   if (type == Type::TOP) {
3106     return false;
3107   }
3108   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
3109     // tty->print_cr("optimize barrier on null");
3110     return false;
3111   }
3112   if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
3113     // tty->print_cr("optimize barrier on constant");
3114     return false;
3115   }
3116 
3117   switch (n->Opcode()) {
3118     case Op_AddP:
3119       return true; // TODO: Can refine?
3120     case Op_LoadP:
3121     case Op_ShenandoahCompareAndExchangeN:
3122     case Op_ShenandoahCompareAndExchangeP:
3123     case Op_CompareAndExchangeN:
3124     case Op_CompareAndExchangeP:
3125     case Op_GetAndSetN:
3126     case Op_GetAndSetP:
3127       return true;
3128     case Op_Phi: {
3129       for (uint i = 1; i < n->req(); i++) {
3130         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3131       }
3132       return false;
3133     }
3134     case Op_CheckCastPP:
3135     case Op_CastPP:
3136       return needs_barrier_impl(phase, n->in(1), visited);
3137     case Op_Proj:
3138       return needs_barrier_impl(phase, n->in(0), visited);
3139     case Op_ShenandoahLoadReferenceBarrier:
3140       // tty->print_cr("optimize barrier on barrier");
3141       return false;
3142     case Op_Parm:
3143       // tty->print_cr("optimize barrier on input arg");
3144       return false;
3145     case Op_DecodeN:
3146     case Op_EncodeP:
3147       return needs_barrier_impl(phase, n->in(1), visited);
3148     case Op_LoadN:
3149       return true;
3150     case Op_CMoveN:
3151     case Op_CMoveP:
3152       return needs_barrier_impl(phase, n->in(2), visited) ||
3153              needs_barrier_impl(phase, n->in(3), visited);
3154     case Op_ShenandoahEnqueueBarrier:
3155       return needs_barrier_impl(phase, n->in(1), visited);
3156     case Op_CreateEx:
3157       return false;
3158     default:
3159       break;
3160   }
3161 #ifdef ASSERT
3162   tty->print("need barrier on?: ");
3163   tty->print_cr("ins:");
3164   n->dump(2);
3165   tty->print_cr("outs:");
3166   n->dump(-2);
3167   ShouldNotReachHere();
3168 #endif
3169   return true;
3170 }
3171 
3172 bool ShenandoahLoadReferenceBarrierNode::is_redundant() {
3173   Unique_Node_List visited;
3174   Node_Stack stack(0);
3175   stack.push(this, 0);
3176 
3177   // Check if the barrier is actually useful: go over nodes looking for useful uses
3178   // (e.g. memory accesses). Stop once we detected a required use. Otherwise, walk
3179   // until we ran out of nodes, and then declare the barrier redundant.
3180   while (stack.size() > 0) {
3181     Node* n = stack.node();
3182     if (visited.member(n)) {
3183       stack.pop();
3184       continue;
3185     }
3186     visited.push(n);
3187     bool visit_users = false;
3188     switch (n->Opcode()) {
3189       case Op_CallStaticJava:
3190       case Op_CallDynamicJava:
3191       case Op_CallLeaf:
3192       case Op_CallLeafNoFP:
3193       case Op_CompareAndSwapL:
3194       case Op_CompareAndSwapI:
3195       case Op_CompareAndSwapB:
3196       case Op_CompareAndSwapS:
3197       case Op_CompareAndSwapN:
3198       case Op_CompareAndSwapP:
3199       case Op_CompareAndExchangeL:
3200       case Op_CompareAndExchangeI:
3201       case Op_CompareAndExchangeB:
3202       case Op_CompareAndExchangeS:
3203       case Op_CompareAndExchangeN:
3204       case Op_CompareAndExchangeP:
3205       case Op_WeakCompareAndSwapL:
3206       case Op_WeakCompareAndSwapI:
3207       case Op_WeakCompareAndSwapB:
3208       case Op_WeakCompareAndSwapS:
3209       case Op_WeakCompareAndSwapN:
3210       case Op_WeakCompareAndSwapP:
3211       case Op_ShenandoahCompareAndSwapN:
3212       case Op_ShenandoahCompareAndSwapP:
3213       case Op_ShenandoahWeakCompareAndSwapN:
3214       case Op_ShenandoahWeakCompareAndSwapP:
3215       case Op_ShenandoahCompareAndExchangeN:
3216       case Op_ShenandoahCompareAndExchangeP:
3217       case Op_GetAndSetL:
3218       case Op_GetAndSetI:
3219       case Op_GetAndSetB:
3220       case Op_GetAndSetS:
3221       case Op_GetAndSetP:
3222       case Op_GetAndSetN:
3223       case Op_GetAndAddL:
3224       case Op_GetAndAddI:
3225       case Op_GetAndAddB:
3226       case Op_GetAndAddS:
3227       case Op_ShenandoahEnqueueBarrier:
3228       case Op_FastLock:
3229       case Op_FastUnlock:
3230       case Op_Rethrow:
3231       case Op_Return:
3232       case Op_StoreB:
3233       case Op_StoreC:
3234       case Op_StoreD:
3235       case Op_StoreF:
3236       case Op_StoreL:
3237       case Op_StoreLConditional:
3238       case Op_StoreI:
3239       case Op_StoreIConditional:
3240       case Op_StoreN:
3241       case Op_StoreP:
3242       case Op_StoreVector:
3243       case Op_StrInflatedCopy:
3244       case Op_StrCompressedCopy:
3245       case Op_EncodeP:
3246       case Op_CastP2X:
3247       case Op_SafePoint:
3248       case Op_EncodeISOArray:
3249       case Op_AryEq:
3250       case Op_StrEquals:
3251       case Op_StrComp:
3252       case Op_StrIndexOf:
3253       case Op_StrIndexOfChar:
3254       case Op_HasNegatives:
3255         // Known to require barriers
3256         return false;
3257       case Op_CmpP: {
3258         if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) ||
3259             n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3260           // One of the sides is known null, no need for barrier.
3261         } else {
3262           return false;
3263         }
3264         break;
3265       }
3266       case Op_LoadB:
3267       case Op_LoadUB:
3268       case Op_LoadUS:
3269       case Op_LoadD:
3270       case Op_LoadF:
3271       case Op_LoadL:
3272       case Op_LoadI:
3273       case Op_LoadS:
3274       case Op_LoadN:
3275       case Op_LoadP:
3276       case Op_LoadVector: {
3277         const TypePtr* adr_type = n->adr_type();
3278         int alias_idx = Compile::current()->get_alias_index(adr_type);
3279         Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3280         ciField* field = alias_type->field();
3281         bool is_static = field != NULL && field->is_static();
3282         bool is_final = field != NULL && field->is_final();
3283 
3284         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3285           // Loading the constant does not require barriers: it should be handled
3286           // as part of GC roots already.
3287         } else {
3288           return false;
3289         }
3290         break;
3291       }
3292       case Op_Conv2B:
3293       case Op_LoadRange:
3294       case Op_LoadKlass:
3295       case Op_LoadNKlass:
3296         // Do not require barriers
3297         break;
3298       case Op_AddP:
3299       case Op_CheckCastPP:
3300       case Op_CastPP:
3301       case Op_CMoveP:
3302       case Op_Phi:
3303       case Op_ShenandoahLoadReferenceBarrier:
3304         // Whether or not these need the barriers depends on their users
3305         visit_users = true;
3306         break;
3307       default: {
3308 #ifdef ASSERT
3309         fatal("Unknown node in is_redundant: %s", NodeClassNames[n->Opcode()]);
3310 #else
3311         // Default to have excess barriers, rather than miss some.
3312         return false;
3313 #endif
3314       }
3315     }
3316 
3317     stack.pop();
3318     if (visit_users) {
3319       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3320         Node* user = n->fast_out(i);
3321         if (user != NULL) {
3322           stack.push(user, 0);
3323         }
3324       }
3325     }
3326   }
3327 
3328   // No need for barrier found.
3329   return true;
3330 }
3331 
3332 CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
3333   Node* val = in(ValueIn);
3334 
3335   const Type* val_t = igvn.type(val);
3336 
3337   if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
3338       val->Opcode() == Op_CastPP &&
3339       val->in(0) != NULL &&
3340       val->in(0)->Opcode() == Op_IfTrue &&
3341       val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
3342       val->in(0)->in(0)->is_If() &&
3343       val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
3344       val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
3345       val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
3346       val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
3347       val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
3348     assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
3349     CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
3350     return unc;
3351   }
3352   return NULL;
3353 }