1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "opto/compile.hpp"
  26 #include "opto/castnode.hpp"
  27 #include "opto/graphKit.hpp"
  28 #include "opto/idealKit.hpp"
  29 #include "opto/loopnode.hpp"
  30 #include "opto/macro.hpp"
  31 #include "opto/node.hpp"
  32 #include "opto/type.hpp"
  33 #include "utilities/macros.hpp"
  34 #include "gc/z/c2/zBarrierSetC2.hpp"
  35 #include "gc/z/zThreadLocalData.hpp"
  36 #include "gc/z/zBarrierSetRuntime.hpp"
  37 
  38 ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena)
  39   : _load_barrier_nodes(new (comp_arena) GrowableArray<LoadBarrierNode*>(comp_arena, 8,  0, NULL)) {}
  40 
  41 int ZBarrierSetC2State::load_barrier_count() const {
  42   return _load_barrier_nodes->length();
  43 }
  44 
  45 void ZBarrierSetC2State::add_load_barrier_node(LoadBarrierNode * n) {
  46   assert(!_load_barrier_nodes->contains(n), " duplicate entry in expand list");
  47   _load_barrier_nodes->append(n);
  48 }
  49 
  50 void ZBarrierSetC2State::remove_load_barrier_node(LoadBarrierNode * n) {
  51   // this function may be called twice for a node so check
  52   // that the node is in the array before attempting to remove it
  53   if (_load_barrier_nodes->contains(n)) {
  54     _load_barrier_nodes->remove(n);
  55   }
  56 }
  57 
  58 LoadBarrierNode* ZBarrierSetC2State::load_barrier_node(int idx) const {
  59   return _load_barrier_nodes->at(idx);
  60 }
  61 
  62 void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
  63   return new(comp_arena) ZBarrierSetC2State(comp_arena);
  64 }
  65 
  66 ZBarrierSetC2State* ZBarrierSetC2::state() const {
  67   return reinterpret_cast<ZBarrierSetC2State*>(Compile::current()->barrier_set_state());
  68 }
  69 
  70 bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const {
  71   // 1. This step follows potential oop projections of a load barrier before expansion
  72   if (node->is_Proj()) {
  73     node = node->in(0);
  74   }
  75 
  76   // 2. This step checks for unexpanded load barriers
  77   if (node->is_LoadBarrier()) {
  78     return true;
  79   }
  80 
  81   // 3. This step checks for the phi corresponding to an optimized load barrier expansion
  82   if (node->is_Phi()) {
  83     PhiNode* phi = node->as_Phi();
  84     Node* n = phi->in(1);
  85     if (n != NULL && (n->is_LoadBarrierSlowReg() ||  n->is_LoadBarrierWeakSlowReg())) {
  86       return true;
  87     }
  88   }
  89 
  90   return false;
  91 }
  92 
  93 void ZBarrierSetC2::register_potential_barrier_node(Node* node) const {
  94   if (node->is_LoadBarrier()) {
  95     state()->add_load_barrier_node(node->as_LoadBarrier());
  96   }
  97 }
  98 
  99 void ZBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
 100   if (node->is_LoadBarrier()) {
 101     state()->remove_load_barrier_node(node->as_LoadBarrier());
 102   }
 103 }
 104 
 105 void ZBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {
 106   // Remove useless LoadBarrier nodes
 107   ZBarrierSetC2State* s = state();
 108   for (int i = s->load_barrier_count()-1; i >= 0; i--) {
 109     LoadBarrierNode* n = s->load_barrier_node(i);
 110     if (!useful.member(n)) {
 111       unregister_potential_barrier_node(n);
 112     }
 113   }
 114 }
 115 
 116 void ZBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
 117   if (node->is_LoadBarrier() && !node->as_LoadBarrier()->has_true_uses()) {
 118     igvn->_worklist.push(node);
 119   }
 120 }
 121 
 122 void ZBarrierSetC2::find_dominating_barriers(PhaseIterGVN& igvn) {
 123   // Look for dominating barriers on the same address only once all
 124   // other loop opts are over: loop opts may cause a safepoint to be
 125   // inserted between a barrier and its dominating barrier.
 126   Compile* C = Compile::current();
 127   ZBarrierSetC2* bs = (ZBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2();
 128   ZBarrierSetC2State* s = bs->state();
 129   if (s->load_barrier_count() >= 2) {
 130     Compile::TracePhase tp("idealLoop", &C->timers[Phase::_t_idealLoop]);
 131     PhaseIdealLoop ideal_loop(igvn, LoopOptsLastRound);
 132     if (C->major_progress()) C->print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
 133   }
 134 }
 135 
 136 void ZBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {
 137   // Permanent temporary workaround
 138   // Loadbarriers may have non-obvious dead uses keeping them alive during parsing. The use is
 139   // removed by RemoveUseless (after parsing, before optimize) but the barriers won't be added to
 140   // the worklist. Unless we add them explicitly they are not guaranteed to end up there.
 141   ZBarrierSetC2State* s = state();
 142 
 143   for (int i = 0; i < s->load_barrier_count(); i++) {
 144     LoadBarrierNode* n = s->load_barrier_node(i);
 145     worklist->push(n);
 146   }
 147 }
 148 
 149 const TypeFunc* ZBarrierSetC2::load_barrier_Type() const {
 150   const Type** fields;
 151 
 152   // Create input types (domain)
 153   fields = TypeTuple::fields(2);
 154   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
 155   fields[TypeFunc::Parms+1] = TypeOopPtr::BOTTOM;
 156   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 157 
 158   // Create result type (range)
 159   fields = TypeTuple::fields(1);
 160   fields[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM;
 161   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 162 
 163   return TypeFunc::make(domain, range);
 164 }
 165 
 166 // == LoadBarrierNode ==
 167 
 168 LoadBarrierNode::LoadBarrierNode(Compile* C,
 169                                  Node* c,
 170                                  Node* mem,
 171                                  Node* val,
 172                                  Node* adr,
 173                                  bool weak,
 174                                  bool writeback,
 175                                  bool oop_reload_allowed) :
 176     MultiNode(Number_of_Inputs),
 177     _weak(weak),
 178     _writeback(writeback),
 179     _oop_reload_allowed(oop_reload_allowed) {
 180   init_req(Control, c);
 181   init_req(Memory, mem);
 182   init_req(Oop, val);
 183   init_req(Address, adr);
 184   init_req(Similar, C->top());
 185 
 186   init_class_id(Class_LoadBarrier);
 187   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 188   bs->register_potential_barrier_node(this);
 189 }
 190 
 191 const Type *LoadBarrierNode::bottom_type() const {
 192   const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
 193   Node* in_oop = in(Oop);
 194   floadbarrier[Control] = Type::CONTROL;
 195   floadbarrier[Memory] = Type::MEMORY;
 196   floadbarrier[Oop] = in_oop == NULL ? Type::TOP : in_oop->bottom_type();
 197   return TypeTuple::make(Number_of_Outputs, floadbarrier);
 198 }
 199 
 200 const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
 201   const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
 202   const Type* val_t = phase->type(in(Oop));
 203   floadbarrier[Control] = Type::CONTROL;
 204   floadbarrier[Memory] = Type::MEMORY;
 205   floadbarrier[Oop] = val_t;
 206   return TypeTuple::make(Number_of_Outputs, floadbarrier);
 207 }
 208 
 209 bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n) {
 210   if (phase != NULL) {
 211     return phase->is_dominator(d, n);
 212   }
 213 
 214   for (int i = 0; i < 10 && n != NULL; i++) {
 215     n = IfNode::up_one_dom(n, linear_only);
 216     if (n == d) {
 217       return true;
 218     }
 219   }
 220 
 221   return false;
 222 }
 223 
 224 LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) {
 225   Node* val = in(LoadBarrierNode::Oop);
 226   if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) {
 227     LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier();
 228     assert(lb->in(Address) == in(Address), "");
 229     // Load barrier on Similar edge dominates so if it now has the Oop field it can replace this barrier.
 230     if (lb->in(Oop) == in(Oop)) {
 231       return lb;
 232     }
 233     // Follow chain of load barrier through Similar edges
 234     while (!lb->in(Similar)->is_top()) {
 235       lb = lb->in(Similar)->in(0)->as_LoadBarrier();
 236       assert(lb->in(Address) == in(Address), "");
 237     }
 238     if (lb != in(Similar)->in(0)) {
 239       return lb;
 240     }
 241   }
 242   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
 243     Node* u = val->fast_out(i);
 244     if (u != this && u->is_LoadBarrier() && u->in(Oop) == val && u->as_LoadBarrier()->has_true_uses()) {
 245       Node* this_ctrl = in(LoadBarrierNode::Control);
 246       Node* other_ctrl = u->in(LoadBarrierNode::Control);
 247       if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
 248         return u->as_LoadBarrier();
 249       }
 250     }
 251   }
 252 
 253   if (ZVerifyLoadBarriers || can_be_eliminated()) {
 254     return NULL;
 255   }
 256 
 257   if (!look_for_similar) {
 258     return NULL;
 259   }
 260 
 261   Node* addr = in(LoadBarrierNode::Address);
 262   for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
 263     Node* u = addr->fast_out(i);
 264     if (u != this && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
 265       Node* this_ctrl = in(LoadBarrierNode::Control);
 266       Node* other_ctrl = u->in(LoadBarrierNode::Control);
 267       if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
 268         ResourceMark rm;
 269         Unique_Node_List wq;
 270         wq.push(in(LoadBarrierNode::Control));
 271         bool ok = true;
 272         bool dom_found = false;
 273         for (uint next = 0; next < wq.size(); ++next) {
 274           Node *n = wq.at(next);
 275           if (n->is_top()) {
 276             return NULL;
 277           }
 278           assert(n->is_CFG(), "");
 279           if (n->is_SafePoint()) {
 280             ok = false;
 281             break;
 282           }
 283           if (n == u) {
 284             dom_found = true;
 285             continue;
 286           }
 287           if (n->is_Region()) {
 288             for (uint i = 1; i < n->req(); i++) {
 289               Node* m = n->in(i);
 290               if (m != NULL) {
 291                 wq.push(m);
 292               }
 293             }
 294           } else {
 295             Node* m = n->in(0);
 296             if (m != NULL) {
 297               wq.push(m);
 298             }
 299           }
 300         }
 301         if (ok) {
 302           assert(dom_found, "");
 303           return u->as_LoadBarrier();;
 304         }
 305         break;
 306       }
 307     }
 308   }
 309 
 310   return NULL;
 311 }
 312 
 313 void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
 314   // Change to that barrier may affect a dominated barrier so re-push those
 315   Node* val = in(LoadBarrierNode::Oop);
 316 
 317   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
 318     Node* u = val->fast_out(i);
 319     if (u != this && u->is_LoadBarrier() && u->in(Oop) == val) {
 320       Node* this_ctrl = in(Control);
 321       Node* other_ctrl = u->in(Control);
 322       if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
 323         igvn->_worklist.push(u);
 324       }
 325     }
 326 
 327     Node* addr = in(LoadBarrierNode::Address);
 328     for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
 329       Node* u = addr->fast_out(i);
 330       if (u != this && u->is_LoadBarrier() && u->in(Similar)->is_top()) {
 331         Node* this_ctrl = in(Control);
 332         Node* other_ctrl = u->in(Control);
 333         if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
 334           igvn->_worklist.push(u);
 335         }
 336       }
 337     }
 338   }
 339 }
 340 
 341 Node *LoadBarrierNode::Identity(PhaseGVN *phase) {
 342   if (!phase->C->directive()->ZOptimizeLoadBarriersOption) {
 343     return this;
 344   }
 345 
 346   bool redundant_addr = false;
 347   LoadBarrierNode* dominating_barrier = has_dominating_barrier(NULL, true, false);
 348   if (dominating_barrier != NULL) {
 349     assert(dominating_barrier->in(Oop) == in(Oop), "");
 350     return dominating_barrier;
 351   }
 352 
 353   return this;
 354 }
 355 
 356 Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 357   if (remove_dead_region(phase, can_reshape)) {
 358     return this;
 359   }
 360 
 361   Node* val = in(Oop);
 362   Node* mem = in(Memory);
 363   Node* ctrl = in(Control);
 364   Node* adr = in(Address);
 365   assert(val->Opcode() != Op_LoadN, "");
 366 
 367   if (mem->is_MergeMem()) {
 368     Node* new_mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
 369     set_req(Memory, new_mem);
 370     if (mem->outcnt() == 0 && can_reshape) {
 371       phase->is_IterGVN()->_worklist.push(mem);
 372     }
 373 
 374     return this;
 375   }
 376 
 377   bool optimizeLoadBarriers = phase->C->directive()->ZOptimizeLoadBarriersOption;
 378   LoadBarrierNode* dominating_barrier = optimizeLoadBarriers ? has_dominating_barrier(NULL, !can_reshape, !phase->C->major_progress()) : NULL;
 379   if (dominating_barrier != NULL && dominating_barrier->in(Oop) != in(Oop)) {
 380     assert(in(Address) == dominating_barrier->in(Address), "");
 381     set_req(Similar, dominating_barrier->proj_out(Oop));
 382     return this;
 383   }
 384 
 385   bool eliminate = (optimizeLoadBarriers && !(val->is_Phi() || val->Opcode() == Op_LoadP || val->Opcode() == Op_GetAndSetP || val->is_DecodeN())) ||
 386                    (can_reshape && (dominating_barrier != NULL || !has_true_uses()));
 387 
 388   if (eliminate) {
 389     if (can_reshape) {
 390       PhaseIterGVN* igvn = phase->is_IterGVN();
 391       Node* out_ctrl = proj_out_or_null(Control);
 392       Node* out_res = proj_out_or_null(Oop);
 393 
 394       if (out_ctrl != NULL) {
 395         igvn->replace_node(out_ctrl, ctrl);
 396       }
 397 
 398       // That transformation may cause the Similar edge on the load barrier to be invalid
 399       fix_similar_in_uses(igvn);
 400       if (out_res != NULL) {
 401         if (dominating_barrier != NULL) {
 402           igvn->replace_node(out_res, dominating_barrier->proj_out(Oop));
 403         } else {
 404           igvn->replace_node(out_res, val);
 405         }
 406       }
 407     }
 408 
 409     return new ConINode(TypeInt::ZERO);
 410   }
 411 
 412   // If the Similar edge is no longer a load barrier, clear it
 413   Node* similar = in(Similar);
 414   if (!similar->is_top() && !(similar->is_Proj() && similar->in(0)->is_LoadBarrier())) {
 415     set_req(Similar, phase->C->top());
 416     return this;
 417   }
 418 
 419   if (can_reshape) {
 420     // If this barrier is linked through the Similar edge by a
 421     // dominated barrier and both barriers have the same Oop field,
 422     // the dominated barrier can go away, so push it for reprocessing.
 423     // We also want to avoid a barrier to depend on another dominating
 424     // barrier through its Similar edge that itself depend on another
 425     // barrier through its Similar edge and rather have the first
 426     // depend on the third.
 427     PhaseIterGVN* igvn = phase->is_IterGVN();
 428     Node* out_res = proj_out(Oop);
 429     for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 430       Node* u = out_res->fast_out(i);
 431       if (u->is_LoadBarrier() && u->in(Similar) == out_res &&
 432           (u->in(Oop) == val || !u->in(Similar)->is_top())) {
 433         igvn->_worklist.push(u);
 434       }
 435     }
 436 
 437     push_dominated_barriers(igvn);
 438   }
 439 
 440   return NULL;
 441 }
 442 
 443 void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
 444   Node* out_res = proj_out_or_null(Oop);
 445   if (out_res == NULL) {
 446     return;
 447   }
 448 
 449   for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 450     Node* u = out_res->fast_out(i);
 451     if (u->is_LoadBarrier() && u->in(Similar) == out_res) {
 452       igvn->replace_input_of(u, Similar, igvn->C->top());
 453       --i;
 454       --imax;
 455     }
 456   }
 457 }
 458 
 459 bool LoadBarrierNode::has_true_uses() const {
 460   Node* out_res = proj_out_or_null(Oop);
 461   if (out_res == NULL) {
 462     return false;
 463   }
 464 
 465   for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 466     Node* u = out_res->fast_out(i);
 467     if (!u->is_LoadBarrier() || u->in(Similar) != out_res) {
 468       return true;
 469     }
 470   }
 471 
 472   return false;
 473 }
 474 
 475 // == Accesses ==
 476 
 477 Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicParseAccess& access) const {
 478   assert(!UseCompressedOops, "Not allowed");
 479   CompareAndSwapNode* cas = (CompareAndSwapNode*)access.raw_access();
 480   PhaseGVN& gvn = access.gvn();
 481   Compile* C = Compile::current();
 482   GraphKit* kit = access.kit();
 483 
 484   Node* in_ctrl     = cas->in(MemNode::Control);
 485   Node* in_mem      = cas->in(MemNode::Memory);
 486   Node* in_adr      = cas->in(MemNode::Address);
 487   Node* in_val      = cas->in(MemNode::ValueIn);
 488   Node* in_expected = cas->in(LoadStoreConditionalNode::ExpectedIn);
 489 
 490   float likely                   = PROB_LIKELY(0.999);
 491 
 492   const TypePtr *adr_type        = gvn.type(in_adr)->isa_ptr();
 493   Compile::AliasType* alias_type = C->alias_type(adr_type);
 494   int alias_idx                  = C->get_alias_index(adr_type);
 495 
 496   // Outer check - true: continue, false: load and check
 497   Node* region   = new RegionNode(3);
 498   Node* phi      = new PhiNode(region, TypeInt::BOOL);
 499   Node* phi_mem  = new PhiNode(region, Type::MEMORY, adr_type);
 500 
 501   // Inner check - is the healed ref equal to the expected
 502   Node* region2  = new RegionNode(3);
 503   Node* phi2     = new PhiNode(region2, TypeInt::BOOL);
 504   Node* phi_mem2 = new PhiNode(region2, Type::MEMORY, adr_type);
 505 
 506   // CAS node returns 0 or 1
 507   Node* cmp     = gvn.transform(new CmpINode(cas, kit->intcon(0)));
 508   Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
 509   IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
 510   Node* then    = gvn.transform(new IfTrueNode(iff));
 511   Node* elsen   = gvn.transform(new IfFalseNode(iff));
 512 
 513   Node* scmemproj1   = gvn.transform(new SCMemProjNode(cas));
 514 
 515   kit->set_memory(scmemproj1, alias_idx);
 516   phi_mem->init_req(1, scmemproj1);
 517   phi_mem2->init_req(2, scmemproj1);
 518 
 519   // CAS fail - reload and heal oop
 520   Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
 521   Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
 522   Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
 523   Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
 524 
 525   // Check load
 526   Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
 527   Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
 528   Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
 529   Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
 530   IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
 531   Node* then2   = gvn.transform(new IfTrueNode(iff2));
 532   Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
 533 
 534   // redo CAS
 535   Node* cas2       = gvn.transform(new CompareAndSwapPNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, cas->order()));
 536   Node* scmemproj2 = gvn.transform(new SCMemProjNode(cas2));
 537   kit->set_control(elsen2);
 538   kit->set_memory(scmemproj2, alias_idx);
 539 
 540   // Merge inner flow - check if healed oop was equal too expected.
 541   region2->set_req(1, kit->control());
 542   region2->set_req(2, then2);
 543   phi2->set_req(1, cas2);
 544   phi2->set_req(2, kit->intcon(0));
 545   phi_mem2->init_req(1, scmemproj2);
 546   kit->set_memory(phi_mem2, alias_idx);
 547 
 548   // Merge outer flow - then check if first CAS succeeded
 549   region->set_req(1, then);
 550   region->set_req(2, region2);
 551   phi->set_req(1, kit->intcon(1));
 552   phi->set_req(2, phi2);
 553   phi_mem->init_req(2, phi_mem2);
 554   kit->set_memory(phi_mem, alias_idx);
 555 
 556   gvn.transform(region2);
 557   gvn.transform(phi2);
 558   gvn.transform(phi_mem2);
 559   gvn.transform(region);
 560   gvn.transform(phi);
 561   gvn.transform(phi_mem);
 562 
 563   kit->set_control(region);
 564   kit->insert_mem_bar(Op_MemBarCPUOrder);
 565 
 566   return phi;
 567 }
 568 
 569 Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicParseAccess& access) const {
 570   CompareAndExchangePNode* cmpx = (CompareAndExchangePNode*)access.raw_access();
 571   GraphKit* kit = access.kit();
 572   PhaseGVN& gvn = kit->gvn();
 573   Compile* C = Compile::current();
 574 
 575   Node* in_ctrl     = cmpx->in(MemNode::Control);
 576   Node* in_mem      = cmpx->in(MemNode::Memory);
 577   Node* in_adr      = cmpx->in(MemNode::Address);
 578   Node* in_val      = cmpx->in(MemNode::ValueIn);
 579   Node* in_expected = cmpx->in(LoadStoreConditionalNode::ExpectedIn);
 580 
 581   float likely                   = PROB_LIKELY(0.999);
 582 
 583   const TypePtr *adr_type        = cmpx->get_ptr_type();
 584   Compile::AliasType* alias_type = C->alias_type(adr_type);
 585   int alias_idx                  = C->get_alias_index(adr_type);
 586 
 587   // Outer check - true: continue, false: load and check
 588   Node* region  = new RegionNode(3);
 589   Node* phi     = new PhiNode(region, adr_type);
 590 
 591   // Inner check - is the healed ref equal to the expected
 592   Node* region2 = new RegionNode(3);
 593   Node* phi2    = new PhiNode(region2, adr_type);
 594 
 595   // Check if cmpx succeeded
 596   Node* cmp     = gvn.transform(new CmpPNode(cmpx, in_expected));
 597   Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::eq))->as_Bool();
 598   IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
 599   Node* then    = gvn.transform(new IfTrueNode(iff));
 600   Node* elsen   = gvn.transform(new IfFalseNode(iff));
 601 
 602   Node* scmemproj1  = gvn.transform(new SCMemProjNode(cmpx));
 603   kit->set_memory(scmemproj1, alias_idx);
 604 
 605   // CAS fail - reload and heal oop
 606   Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
 607   Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
 608   Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
 609   Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
 610 
 611   // Check load
 612   Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
 613   Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
 614   Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
 615   Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
 616   IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
 617   Node* then2   = gvn.transform(new IfTrueNode(iff2));
 618   Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
 619 
 620   // Redo CAS
 621   Node* cmpx2      = gvn.transform(new CompareAndExchangePNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, adr_type, cmpx->get_ptr_type(), cmpx->order()));
 622   Node* scmemproj2 = gvn.transform(new SCMemProjNode(cmpx2));
 623   kit->set_control(elsen2);
 624   kit->set_memory(scmemproj2, alias_idx);
 625 
 626   // Merge inner flow - check if healed oop was equal too expected.
 627   region2->set_req(1, kit->control());
 628   region2->set_req(2, then2);
 629   phi2->set_req(1, cmpx2);
 630   phi2->set_req(2, barrierdata);
 631 
 632   // Merge outer flow - then check if first cas succeeded
 633   region->set_req(1, then);
 634   region->set_req(2, region2);
 635   phi->set_req(1, cmpx);
 636   phi->set_req(2, phi2);
 637 
 638   gvn.transform(region2);
 639   gvn.transform(phi2);
 640   gvn.transform(region);
 641   gvn.transform(phi);
 642 
 643   kit->set_control(region);
 644   kit->set_memory(in_mem, alias_idx);
 645   kit->insert_mem_bar(Op_MemBarCPUOrder);
 646 
 647   return phi;
 648 }
 649 
 650 Node* ZBarrierSetC2::load_barrier(GraphKit* kit, Node* val, Node* adr, bool weak, bool writeback, bool oop_reload_allowed) const {
 651   PhaseGVN& gvn = kit->gvn();
 652   Node* barrier = new LoadBarrierNode(Compile::current(), kit->control(), kit->memory(TypeRawPtr::BOTTOM), val, adr, weak, writeback, oop_reload_allowed);
 653   Node* transformed_barrier = gvn.transform(barrier);
 654 
 655   if (transformed_barrier->is_LoadBarrier()) {
 656     if (barrier == transformed_barrier) {
 657       kit->set_control(gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control)));
 658     }
 659     Node* result = gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop));
 660     assert(is_gc_barrier_node(result), "sanity");
 661     assert(step_over_gc_barrier(result) == val, "sanity");
 662     return result;
 663   } else {
 664     return val;
 665   }
 666 }
 667 
 668 static bool barrier_needed(C2Access& access) {
 669   return ZBarrierSet::barrier_needed(access.decorators(), access.type());
 670 }
 671 
 672 Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 673   Node* p = BarrierSetC2::load_at_resolved(access, val_type);
 674   if (!barrier_needed(access)) {
 675     return p;
 676   }
 677 
 678   bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
 679 
 680   assert(access.is_parse_access(), "entry not supported at optimization time");
 681   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 682   GraphKit* kit = parse_access.kit();
 683   PhaseGVN& gvn = kit->gvn();
 684   Node* adr = access.addr().node();
 685   Node* heap_base_oop = access.base();
 686   bool unsafe = (access.decorators() & C2_UNSAFE_ACCESS) != 0;
 687   if (unsafe) {
 688     if (!ZVerifyLoadBarriers) {
 689       p = load_barrier(kit, p, adr);
 690     } else {
 691       if (!TypePtr::NULL_PTR->higher_equal(gvn.type(heap_base_oop))) {
 692         p = load_barrier(kit, p, adr);
 693       } else {
 694         IdealKit ideal(kit);
 695         IdealVariable res(ideal);
 696 #define __ ideal.
 697         __ declarations_done();
 698         __ set(res, p);
 699         __ if_then(heap_base_oop, BoolTest::ne, kit->null(), PROB_UNLIKELY(0.999)); {
 700           kit->sync_kit(ideal);
 701           p = load_barrier(kit, p, adr);
 702           __ set(res, p);
 703           __ sync_kit(kit);
 704         } __ end_if();
 705         kit->final_sync(ideal);
 706         p = __ value(res);
 707 #undef __
 708       }
 709     }
 710     return p;
 711   } else {
 712     return load_barrier(parse_access.kit(), p, access.addr().node(), weak, true, true);
 713   }
 714 }
 715 
 716 Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 717                                                     Node* new_val, const Type* val_type) const {
 718   Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
 719   if (!barrier_needed(access)) {
 720     return result;
 721   }
 722 
 723   access.set_needs_pinning(false);
 724   return make_cmpx_loadbarrier(access);
 725 }
 726 
 727 Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 728                                                      Node* new_val, const Type* value_type) const {
 729   Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
 730   if (!barrier_needed(access)) {
 731     return result;
 732   }
 733 
 734   Node* load_store = access.raw_access();
 735   bool weak_cas = (access.decorators() & C2_WEAK_CMPXCHG) != 0;
 736   bool expected_is_null = (expected_val->get_ptr_type() == TypePtr::NULL_PTR);
 737 
 738   if (!expected_is_null) {
 739     if (weak_cas) {
 740       access.set_needs_pinning(false);
 741       load_store = make_cas_loadbarrier(access);
 742     } else {
 743       access.set_needs_pinning(false);
 744       load_store = make_cas_loadbarrier(access);
 745     }
 746   }
 747 
 748   return load_store;
 749 }
 750 
 751 Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const {
 752   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
 753   if (!barrier_needed(access)) {
 754     return result;
 755   }
 756 
 757   Node* load_store = access.raw_access();
 758   Node* adr = access.addr().node();
 759 
 760   assert(access.is_parse_access(), "entry not supported at optimization time");
 761   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 762   return load_barrier(parse_access.kit(), load_store, adr, false, false, false);
 763 }
 764 
 765 // == Macro Expansion ==
 766 
 767 void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const {
 768   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
 769   Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
 770   Node* in_val  = barrier->in(LoadBarrierNode::Oop);
 771   Node* in_adr  = barrier->in(LoadBarrierNode::Address);
 772 
 773   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
 774   Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
 775 
 776   PhaseIterGVN &igvn = phase->igvn();
 777 
 778   if (ZVerifyLoadBarriers) {
 779     igvn.replace_node(out_res, in_val);
 780     igvn.replace_node(out_ctrl, in_ctrl);
 781     return;
 782   }
 783 
 784   if (barrier->can_be_eliminated()) {
 785     // Clone and pin the load for this barrier below the dominating
 786     // barrier: the load cannot be allowed to float above the
 787     // dominating barrier
 788     Node* load = in_val;
 789 
 790     if (load->is_Load()) {
 791       Node* new_load = load->clone();
 792       Node* addp = new_load->in(MemNode::Address);
 793       assert(addp->is_AddP() || addp->is_Phi() || addp->is_Load(), "bad address");
 794       Node* cast = new CastPPNode(addp, igvn.type(addp), true);
 795       Node* ctrl = NULL;
 796       Node* similar = barrier->in(LoadBarrierNode::Similar);
 797       if (similar->is_Phi()) {
 798         // already expanded
 799         ctrl = similar->in(0);
 800       } else {
 801         assert(similar->is_Proj() && similar->in(0)->is_LoadBarrier(), "unexpected graph shape");
 802         ctrl = similar->in(0)->as_LoadBarrier()->proj_out(LoadBarrierNode::Control);
 803       }
 804       assert(ctrl != NULL, "bad control");
 805       cast->set_req(0, ctrl);
 806       igvn.transform(cast);
 807       new_load->set_req(MemNode::Address, cast);
 808       igvn.transform(new_load);
 809 
 810       igvn.replace_node(out_res, new_load);
 811       igvn.replace_node(out_ctrl, in_ctrl);
 812       return;
 813     }
 814     // cannot eliminate
 815   }
 816 
 817   // There are two cases that require the basic loadbarrier
 818   // 1) When the writeback of a healed oop must be avoided (swap)
 819   // 2) When we must guarantee that no reload of is done (swap, cas, cmpx)
 820   if (!barrier->is_writeback()) {
 821     assert(!barrier->oop_reload_allowed(), "writeback barriers should be marked as requires oop");
 822   }
 823 
 824   if (!barrier->oop_reload_allowed()) {
 825     expand_loadbarrier_basic(phase, barrier);
 826   } else {
 827     expand_loadbarrier_optimized(phase, barrier);
 828   }
 829 }
 830 
 831 // Basic loadbarrier using conventional argument passing
 832 void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
 833   PhaseIterGVN &igvn = phase->igvn();
 834 
 835   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
 836   Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
 837   Node* in_val  = barrier->in(LoadBarrierNode::Oop);
 838   Node* in_adr  = barrier->in(LoadBarrierNode::Address);
 839 
 840   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
 841   Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
 842 
 843   float unlikely  = PROB_UNLIKELY(0.999);
 844   const Type* in_val_maybe_null_t = igvn.type(in_val);
 845 
 846   Node* jthread = igvn.transform(new ThreadLocalNode());
 847   Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
 848   Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
 849   Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
 850   Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
 851   Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
 852   Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
 853   IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
 854   Node* then = igvn.transform(new IfTrueNode(iff));
 855   Node* elsen = igvn.transform(new IfFalseNode(iff));
 856 
 857   Node* result_region;
 858   Node* result_val;
 859 
 860   result_region = new RegionNode(3);
 861   result_val = new PhiNode(result_region, TypeInstPtr::BOTTOM);
 862 
 863   result_region->set_req(1, elsen);
 864   Node* res = igvn.transform(new CastPPNode(in_val, in_val_maybe_null_t));
 865   res->init_req(0, elsen);
 866   result_val->set_req(1, res);
 867 
 868   const TypeFunc *tf = load_barrier_Type();
 869   Node* call;
 870   if (barrier->is_weak()) {
 871     call = new CallLeafNode(tf,
 872                             ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr(),
 873                             "ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded",
 874                             TypeRawPtr::BOTTOM);
 875   } else {
 876     call = new CallLeafNode(tf,
 877                             ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(),
 878                             "ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded",
 879                             TypeRawPtr::BOTTOM);
 880   }
 881 
 882   call->init_req(TypeFunc::Control, then);
 883   call->init_req(TypeFunc::I_O    , phase->top());
 884   call->init_req(TypeFunc::Memory , in_mem);
 885   call->init_req(TypeFunc::FramePtr, phase->top());
 886   call->init_req(TypeFunc::ReturnAdr, phase->top());
 887   call->init_req(TypeFunc::Parms+0, in_val);
 888   if (barrier->is_writeback()) {
 889     call->init_req(TypeFunc::Parms+1, in_adr);
 890   } else {
 891     // When slow path is called with a null address, the healed oop will not be written back
 892     call->init_req(TypeFunc::Parms+1, igvn.zerocon(T_OBJECT));
 893   }
 894   call = igvn.transform(call);
 895 
 896   Node* ctrl = igvn.transform(new ProjNode(call, TypeFunc::Control));
 897   res = igvn.transform(new ProjNode(call, TypeFunc::Parms));
 898   res = igvn.transform(new CheckCastPPNode(ctrl, res, in_val_maybe_null_t));
 899 
 900   result_region->set_req(2, ctrl);
 901   result_val->set_req(2, res);
 902 
 903   result_region = igvn.transform(result_region);
 904   result_val = igvn.transform(result_val);
 905 
 906   if (out_ctrl != NULL) { // Added if cond
 907     igvn.replace_node(out_ctrl, result_region);
 908   }
 909   igvn.replace_node(out_res, result_val);
 910 }
 911 
 912 // Optimized, low spill, loadbarrier variant using stub specialized on register used
 913 void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
 914   PhaseIterGVN &igvn = phase->igvn();
 915 #ifdef PRINT_NODE_TRAVERSALS
 916   Node* preceding_barrier_node = barrier->in(LoadBarrierNode::Oop);
 917 #endif
 918 
 919   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
 920   Node* in_mem = barrier->in(LoadBarrierNode::Memory);
 921   Node* in_val = barrier->in(LoadBarrierNode::Oop);
 922   Node* in_adr = barrier->in(LoadBarrierNode::Address);
 923 
 924   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
 925   Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
 926 
 927   assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null");
 928 
 929 #ifdef PRINT_NODE_TRAVERSALS
 930   tty->print("\n\n\nBefore barrier optimization:\n");
 931   traverse(barrier, out_ctrl, out_res, -1);
 932 
 933   tty->print("\nBefore barrier optimization:  preceding_barrier_node\n");
 934   traverse(preceding_barrier_node, out_ctrl, out_res, -1);
 935 #endif
 936 
 937   float unlikely  = PROB_UNLIKELY(0.999);
 938 
 939   Node* jthread = igvn.transform(new ThreadLocalNode());
 940   Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
 941   Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr,
 942                                                  TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(),
 943                                                  MemNode::unordered));
 944   Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
 945   Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
 946   Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
 947   Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
 948   IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
 949   Node* then = igvn.transform(new IfTrueNode(iff));
 950   Node* elsen = igvn.transform(new IfFalseNode(iff));
 951 
 952   Node* slow_path_surrogate;
 953   if (!barrier->is_weak()) {
 954     slow_path_surrogate = igvn.transform(new LoadBarrierSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
 955                                                                     (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
 956   } else {
 957     slow_path_surrogate = igvn.transform(new LoadBarrierWeakSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
 958                                                                         (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
 959   }
 960 
 961   Node *new_loadp;
 962   new_loadp = slow_path_surrogate;
 963   // Create the final region/phi pair to converge cntl/data paths to downstream code
 964   Node* result_region = igvn.transform(new RegionNode(3));
 965   result_region->set_req(1, then);
 966   result_region->set_req(2, elsen);
 967 
 968   Node* result_phi = igvn.transform(new PhiNode(result_region, TypeInstPtr::BOTTOM));
 969   result_phi->set_req(1, new_loadp);
 970   result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
 971 
 972   // Finally, connect the original outputs to the barrier region and phi to complete the expansion/substitution
 973   // igvn.replace_node(out_ctrl, result_region);
 974   if (out_ctrl != NULL) { // added if cond
 975     igvn.replace_node(out_ctrl, result_region);
 976   }
 977   igvn.replace_node(out_res, result_phi);
 978 
 979   assert(barrier->outcnt() == 0,"LoadBarrier macro node has non-null outputs after expansion!");
 980 
 981 #ifdef PRINT_NODE_TRAVERSALS
 982   tty->print("\nAfter barrier optimization:  old out_ctrl\n");
 983   traverse(out_ctrl, out_ctrl, out_res, -1);
 984   tty->print("\nAfter barrier optimization:  old out_res\n");
 985   traverse(out_res, out_ctrl, out_res, -1);
 986   tty->print("\nAfter barrier optimization:  old barrier\n");
 987   traverse(barrier, out_ctrl, out_res, -1);
 988   tty->print("\nAfter barrier optimization:  preceding_barrier_node\n");
 989   traverse(preceding_barrier_node, result_region, result_phi, -1);
 990 #endif
 991 
 992   assert(is_gc_barrier_node(result_phi), "sanity");
 993   assert(step_over_gc_barrier(result_phi) == in_val, "sanity");
 994 
 995   return;
 996 }
 997 
 998 bool ZBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
 999   Compile* C = Compile::current();
1000   PhaseIterGVN &igvn = macro->igvn();
1001   ZBarrierSetC2State* s = state();
1002   if (s->load_barrier_count() > 0) {
1003 #ifdef ASSERT
1004     verify_gc_barriers(false);
1005 #endif
1006     igvn.set_delay_transform(true);
1007     int skipped = 0;
1008     while (s->load_barrier_count() > skipped) {
1009       int load_barrier_count = s->load_barrier_count();
1010       LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
1011       if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
1012         // Node is unreachable, so don't try to expand it
1013         s->remove_load_barrier_node(n);
1014         continue;
1015       }
1016       if (!n->can_be_eliminated()) {
1017         skipped++;
1018         continue;
1019       }
1020       expand_loadbarrier_node(macro, n);
1021       assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
1022       if (C->failing())  return true;
1023     }
1024     while (s->load_barrier_count() > 0) {
1025       int load_barrier_count = s->load_barrier_count();
1026       LoadBarrierNode* n = s->load_barrier_node(load_barrier_count - 1);
1027       assert(!(igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())), "should have been processed already");
1028       assert(!n->can_be_eliminated(), "should have been processed already");
1029       expand_loadbarrier_node(macro, n);
1030       assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
1031       if (C->failing())  return true;
1032     }
1033     igvn.set_delay_transform(false);
1034     igvn.optimize();
1035     if (C->failing())  return true;
1036   }
1037   return false;
1038 }
1039 
1040 // == Loop optimization ==
1041 
1042 static bool replace_with_dominating_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
1043   PhaseIterGVN &igvn = phase->igvn();
1044   Compile* C = Compile::current();
1045 
1046   LoadBarrierNode* lb2 = lb->has_dominating_barrier(phase, false, last_round);
1047   if (lb2 != NULL) {
1048     if (lb->in(LoadBarrierNode::Oop) != lb2->in(LoadBarrierNode::Oop)) {
1049       assert(lb->in(LoadBarrierNode::Address) == lb2->in(LoadBarrierNode::Address), "");
1050       igvn.replace_input_of(lb, LoadBarrierNode::Similar, lb2->proj_out(LoadBarrierNode::Oop));
1051       C->set_major_progress();
1052     } else  {
1053       // That transformation may cause the Similar edge on dominated load barriers to be invalid
1054       lb->fix_similar_in_uses(&igvn);
1055 
1056       Node* val = lb->proj_out(LoadBarrierNode::Oop);
1057       assert(lb2->has_true_uses(), "");
1058       assert(lb2->in(LoadBarrierNode::Oop) == lb->in(LoadBarrierNode::Oop), "");
1059 
1060       phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
1061       phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
1062       igvn.replace_node(val, lb2->proj_out(LoadBarrierNode::Oop));
1063 
1064       return true;
1065     }
1066   }
1067   return false;
1068 }
1069 
1070 static Node* find_dominating_memory(PhaseIdealLoop* phase, Node* mem, Node* dom, int i) {
1071   assert(dom->is_Region() || i == -1, "");
1072   Node* m = mem;
1073   while(phase->is_dominator(dom, phase->has_ctrl(m) ? phase->get_ctrl(m) : m->in(0))) {
1074     if (m->is_Mem()) {
1075       assert(m->as_Mem()->adr_type() == TypeRawPtr::BOTTOM, "");
1076       m = m->in(MemNode::Memory);
1077     } else if (m->is_MergeMem()) {
1078       m = m->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1079     } else if (m->is_Phi()) {
1080       if (m->in(0) == dom && i != -1) {
1081         m = m->in(i);
1082         break;
1083       } else {
1084         m = m->in(LoopNode::EntryControl);
1085       }
1086     } else if (m->is_Proj()) {
1087       m = m->in(0);
1088     } else if (m->is_SafePoint() || m->is_MemBar()) {
1089       m = m->in(TypeFunc::Memory);
1090     } else {
1091 #ifdef ASSERT
1092       m->dump();
1093 #endif
1094       ShouldNotReachHere();
1095     }
1096   }
1097   return m;
1098 }
1099 
1100 static LoadBarrierNode* clone_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* ctl, Node* mem, Node* oop_in) {
1101   PhaseIterGVN &igvn = phase->igvn();
1102   Compile* C = Compile::current();
1103   Node* the_clone = lb->clone();
1104   the_clone->set_req(LoadBarrierNode::Control, ctl);
1105   the_clone->set_req(LoadBarrierNode::Memory, mem);
1106   if (oop_in != NULL) {
1107     the_clone->set_req(LoadBarrierNode::Oop, oop_in);
1108   }
1109 
1110   LoadBarrierNode* new_lb = the_clone->as_LoadBarrier();
1111   igvn.register_new_node_with_optimizer(new_lb);
1112   IdealLoopTree *loop = phase->get_loop(new_lb->in(0));
1113   phase->set_ctrl(new_lb, new_lb->in(0));
1114   phase->set_loop(new_lb, loop);
1115   phase->set_idom(new_lb, new_lb->in(0), phase->dom_depth(new_lb->in(0))+1);
1116   if (!loop->_child) {
1117     loop->_body.push(new_lb);
1118   }
1119 
1120   Node* proj_ctl = new ProjNode(new_lb, LoadBarrierNode::Control);
1121   igvn.register_new_node_with_optimizer(proj_ctl);
1122   phase->set_ctrl(proj_ctl, proj_ctl->in(0));
1123   phase->set_loop(proj_ctl, loop);
1124   phase->set_idom(proj_ctl, new_lb, phase->dom_depth(new_lb)+1);
1125   if (!loop->_child) {
1126     loop->_body.push(proj_ctl);
1127   }
1128 
1129   Node* proj_oop = new ProjNode(new_lb, LoadBarrierNode::Oop);
1130   phase->register_new_node(proj_oop, new_lb);
1131 
1132   if (!new_lb->in(LoadBarrierNode::Similar)->is_top()) {
1133     LoadBarrierNode* similar = new_lb->in(LoadBarrierNode::Similar)->in(0)->as_LoadBarrier();
1134     if (!phase->is_dominator(similar, ctl)) {
1135       igvn.replace_input_of(new_lb, LoadBarrierNode::Similar, C->top());
1136     }
1137   }
1138 
1139   return new_lb;
1140 }
1141 
1142 static void replace_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* new_val) {
1143   PhaseIterGVN &igvn = phase->igvn();
1144   Node* val = lb->proj_out(LoadBarrierNode::Oop);
1145   igvn.replace_node(val, new_val);
1146   phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
1147   phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
1148 }
1149 
1150 static bool split_barrier_thru_phi(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
1151   PhaseIterGVN &igvn = phase->igvn();
1152   Compile* C = Compile::current();
1153 
1154   if (lb->in(LoadBarrierNode::Oop)->is_Phi()) {
1155     Node* oop_phi = lb->in(LoadBarrierNode::Oop);
1156 
1157     if (oop_phi->in(2) == oop_phi) {
1158       // Ignore phis with only one input
1159       return false;
1160     }
1161 
1162     if (phase->is_dominator(phase->get_ctrl(lb->in(LoadBarrierNode::Address)),
1163                             oop_phi->in(0)) && phase->get_ctrl(lb->in(LoadBarrierNode::Address)) != oop_phi->in(0)) {
1164       // That transformation may cause the Similar edge on dominated load barriers to be invalid
1165       lb->fix_similar_in_uses(&igvn);
1166 
1167       RegionNode* region = oop_phi->in(0)->as_Region();
1168 
1169       int backedge = LoopNode::LoopBackControl;
1170       if (region->is_Loop() && region->in(backedge)->is_Proj() && region->in(backedge)->in(0)->is_If()) {
1171         Node* c = region->in(backedge)->in(0)->in(0);
1172         assert(c->unique_ctrl_out() == region->in(backedge)->in(0), "");
1173         Node* oop = lb->in(LoadBarrierNode::Oop)->in(backedge);
1174         Node* oop_c = phase->has_ctrl(oop) ? phase->get_ctrl(oop) : oop;
1175         if (!phase->is_dominator(oop_c, c)) {
1176           return false;
1177         }
1178       }
1179 
1180       // If the node on the backedge above the phi is the node itself - we have a self loop.
1181       // Don't clone - this will be folded later.
1182       if (oop_phi->in(LoopNode::LoopBackControl) == lb->proj_out(LoadBarrierNode::Oop)) {
1183         return false;
1184       }
1185 
1186       bool is_strip_mined = region->is_CountedLoop() && region->as_CountedLoop()->is_strip_mined();
1187       Node *phi = oop_phi->clone();
1188 
1189       for (uint i = 1; i < region->req(); i++) {
1190         Node* ctrl = region->in(i);
1191         if (ctrl != C->top()) {
1192           assert(!phase->is_dominator(ctrl, region) || region->is_Loop(), "");
1193 
1194           Node* mem = lb->in(LoadBarrierNode::Memory);
1195           Node* m = find_dominating_memory(phase, mem, region, i);
1196 
1197           if (region->is_Loop() && i == LoopNode::LoopBackControl && ctrl->is_Proj() && ctrl->in(0)->is_If()) {
1198             ctrl = ctrl->in(0)->in(0);
1199           } else if (region->is_Loop() && is_strip_mined) {
1200             // If this is a strip mined loop, control must move above OuterStripMinedLoop
1201             assert(i == LoopNode::EntryControl, "check");
1202             assert(ctrl->is_OuterStripMinedLoop(), "sanity");
1203             ctrl = ctrl->as_OuterStripMinedLoop()->in(LoopNode::EntryControl);
1204           }
1205 
1206           LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, ctrl, m, lb->in(LoadBarrierNode::Oop)->in(i));
1207           Node* out_ctrl = new_lb->proj_out(LoadBarrierNode::Control);
1208 
1209           if (is_strip_mined && (i == LoopNode::EntryControl)) {
1210             assert(region->in(i)->is_OuterStripMinedLoop(), "");
1211             igvn.replace_input_of(region->in(i), i, out_ctrl);
1212             phase->set_idom(region->in(i), out_ctrl, phase->dom_depth(out_ctrl));
1213           } else if (ctrl == region->in(i)) {
1214             igvn.replace_input_of(region, i, out_ctrl);
1215             // Only update the idom if is the loop entry we are updating
1216             // - A loop backedge doesn't change the idom
1217             if (region->is_Loop() && i == LoopNode::EntryControl) {
1218               phase->set_idom(region, out_ctrl, phase->dom_depth(out_ctrl));
1219             }
1220           } else {
1221             Node* iff = region->in(i)->in(0);
1222             igvn.replace_input_of(iff, 0, out_ctrl);
1223             phase->set_idom(iff, out_ctrl, phase->dom_depth(out_ctrl)+1);
1224           }
1225           phi->set_req(i, new_lb->proj_out(LoadBarrierNode::Oop));
1226         }
1227       }
1228       phase->register_new_node(phi, region);
1229       replace_barrier(phase, lb, phi);
1230 
1231       if (region->is_Loop()) {
1232         // Load barrier moved to the back edge of the Loop may now
1233         // have a safepoint on the path to the barrier on the Similar
1234         // edge
1235         igvn.replace_input_of(phi->in(LoopNode::LoopBackControl)->in(0), LoadBarrierNode::Similar, C->top());
1236         Node* head = region->in(LoopNode::EntryControl);
1237         phase->set_idom(region, head, phase->dom_depth(head)+1);
1238         phase->recompute_dom_depth();
1239         if (head->is_CountedLoop() && head->as_CountedLoop()->is_main_loop()) {
1240           head->as_CountedLoop()->set_normal_loop();
1241         }
1242       }
1243 
1244       return true;
1245     }
1246   }
1247 
1248   return false;
1249 }
1250 
1251 static bool move_out_of_loop(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
1252   PhaseIterGVN &igvn = phase->igvn();
1253   IdealLoopTree *lb_loop = phase->get_loop(lb->in(0));
1254   if (lb_loop != phase->ltree_root() && !lb_loop->_irreducible) {
1255     Node* oop_ctrl = phase->get_ctrl(lb->in(LoadBarrierNode::Oop));
1256     IdealLoopTree *oop_loop = phase->get_loop(oop_ctrl);
1257     IdealLoopTree* adr_loop = phase->get_loop(phase->get_ctrl(lb->in(LoadBarrierNode::Address)));
1258     if (!lb_loop->is_member(oop_loop) && !lb_loop->is_member(adr_loop)) {
1259       // That transformation may cause the Similar edge on dominated load barriers to be invalid
1260       lb->fix_similar_in_uses(&igvn);
1261 
1262       Node* head = lb_loop->_head;
1263       assert(head->is_Loop(), "");
1264 
1265       if (phase->is_dominator(head, oop_ctrl)) {
1266         assert(oop_ctrl->Opcode() == Op_CProj && oop_ctrl->in(0)->Opcode() == Op_NeverBranch, "");
1267         assert(lb_loop->is_member(phase->get_loop(oop_ctrl->in(0)->in(0))), "");
1268         return false;
1269       }
1270 
1271       if (head->is_CountedLoop()) {
1272         CountedLoopNode* cloop = head->as_CountedLoop();
1273         if (cloop->is_main_loop()) {
1274           cloop->set_normal_loop();
1275         }
1276         // When we are moving barrier out of a counted loop,
1277         // make sure we move it all the way out of the strip mined outer loop.
1278         if (cloop->is_strip_mined()) {
1279           head = cloop->outer_loop();
1280         }
1281       }
1282 
1283       Node* mem = lb->in(LoadBarrierNode::Memory);
1284       Node* m = find_dominating_memory(phase, mem, head, -1);
1285 
1286       LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, head->in(LoopNode::EntryControl), m, NULL);
1287 
1288       assert(phase->idom(head) == head->in(LoopNode::EntryControl), "");
1289       Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
1290       igvn.replace_input_of(head, LoopNode::EntryControl, proj_ctl);
1291       phase->set_idom(head, proj_ctl, phase->dom_depth(proj_ctl) + 1);
1292 
1293       replace_barrier(phase, lb, new_lb->proj_out(LoadBarrierNode::Oop));
1294 
1295       phase->recompute_dom_depth();
1296 
1297       return true;
1298     }
1299   }
1300 
1301   return false;
1302 }
1303 
1304 static bool common_barriers(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
1305   PhaseIterGVN &igvn = phase->igvn();
1306   Node* in_val = lb->in(LoadBarrierNode::Oop);
1307   for (DUIterator_Fast imax, i = in_val->fast_outs(imax); i < imax; i++) {
1308     Node* u = in_val->fast_out(i);
1309     if (u != lb && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
1310       Node* this_ctrl = lb->in(LoadBarrierNode::Control);
1311       Node* other_ctrl = u->in(LoadBarrierNode::Control);
1312 
1313       Node* lca = phase->dom_lca(this_ctrl, other_ctrl);
1314       bool ok = true;
1315 
1316       Node* proj1 = NULL;
1317       Node* proj2 = NULL;
1318 
1319       while (this_ctrl != lca && ok) {
1320         if (this_ctrl->in(0) != NULL &&
1321             this_ctrl->in(0)->is_MultiBranch()) {
1322           if (this_ctrl->in(0)->in(0) == lca) {
1323             assert(proj1 == NULL, "");
1324             assert(this_ctrl->is_Proj(), "");
1325             proj1 = this_ctrl;
1326           } else if (!(this_ctrl->in(0)->is_If() && this_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
1327             ok = false;
1328           }
1329         }
1330         this_ctrl = phase->idom(this_ctrl);
1331       }
1332       while (other_ctrl != lca && ok) {
1333         if (other_ctrl->in(0) != NULL &&
1334             other_ctrl->in(0)->is_MultiBranch()) {
1335           if (other_ctrl->in(0)->in(0) == lca) {
1336             assert(other_ctrl->is_Proj(), "");
1337             assert(proj2 == NULL, "");
1338             proj2 = other_ctrl;
1339           } else if (!(other_ctrl->in(0)->is_If() && other_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
1340             ok = false;
1341           }
1342         }
1343         other_ctrl = phase->idom(other_ctrl);
1344       }
1345       assert(proj1 == NULL || proj2 == NULL || proj1->in(0) == proj2->in(0), "");
1346       if (ok && proj1 && proj2 && proj1 != proj2 && proj1->in(0)->is_If()) {
1347         // That transformation may cause the Similar edge on dominated load barriers to be invalid
1348         lb->fix_similar_in_uses(&igvn);
1349         u->as_LoadBarrier()->fix_similar_in_uses(&igvn);
1350 
1351         Node* split = lca->unique_ctrl_out();
1352         assert(split->in(0) == lca, "");
1353 
1354         Node* mem = lb->in(LoadBarrierNode::Memory);
1355         Node* m = find_dominating_memory(phase, mem, split, -1);
1356         LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, lca, m, NULL);
1357 
1358         Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
1359         igvn.replace_input_of(split, 0, new_lb->proj_out(LoadBarrierNode::Control));
1360         phase->set_idom(split, proj_ctl, phase->dom_depth(proj_ctl)+1);
1361 
1362         Node* proj_oop = new_lb->proj_out(LoadBarrierNode::Oop);
1363         replace_barrier(phase, lb, proj_oop);
1364         replace_barrier(phase, u->as_LoadBarrier(), proj_oop);
1365 
1366         phase->recompute_dom_depth();
1367 
1368         return true;
1369       }
1370     }
1371   }
1372 
1373   return false;
1374 }
1375 
1376 static void optimize_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
1377   Compile* C = Compile::current();
1378 
1379   if (!C->directive()->ZOptimizeLoadBarriersOption) {
1380     return;
1381   }
1382 
1383   if (lb->has_true_uses()) {
1384     if (replace_with_dominating_barrier(phase, lb, last_round)) {
1385       return;
1386     }
1387 
1388     if (split_barrier_thru_phi(phase, lb)) {
1389       return;
1390     }
1391 
1392     if (move_out_of_loop(phase, lb)) {
1393       return;
1394     }
1395 
1396     if (common_barriers(phase, lb)) {
1397       return;
1398     }
1399   }
1400 }
1401 
1402 void ZBarrierSetC2::loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round) {
1403   if (node->is_LoadBarrier()) {
1404     optimize_load_barrier(phase, node->as_LoadBarrier(), last_round);
1405   }
1406 }
1407 
1408 Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const {
1409   Node* node = c;
1410 
1411   // 1. This step follows potential oop projections of a load barrier before expansion
1412   if (node->is_Proj()) {
1413     node = node->in(0);
1414   }
1415 
1416   // 2. This step checks for unexpanded load barriers
1417   if (node->is_LoadBarrier()) {
1418     return node->in(LoadBarrierNode::Oop);
1419   }
1420 
1421   // 3. This step checks for the phi corresponding to an optimized load barrier expansion
1422   if (node->is_Phi()) {
1423     PhiNode* phi = node->as_Phi();
1424     Node* n = phi->in(1);
1425     if (n != NULL && (n->is_LoadBarrierSlowReg() ||  n->is_LoadBarrierWeakSlowReg())) {
1426       assert(c == node, "projections from step 1 should only be seen before macro expansion");
1427       return phi->in(2);
1428     }
1429   }
1430 
1431   return c;
1432 }
1433 
1434 bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const {
1435   return type == T_OBJECT || type == T_ARRAY;
1436 }
1437 
1438 // == Verification ==
1439 
1440 #ifdef ASSERT
1441 
1442 static bool look_for_barrier(Node* n, bool post_parse, VectorSet& visited) {
1443   if (visited.test_set(n->_idx)) {
1444     return true;
1445   }
1446 
1447   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1448     Node* u = n->fast_out(i);
1449     if (u->is_LoadBarrier()) {
1450     } else if ((u->is_Phi() || u->is_CMove()) && !post_parse) {
1451       if (!look_for_barrier(u, post_parse, visited)) {
1452         return false;
1453       }
1454     } else if (u->Opcode() == Op_EncodeP || u->Opcode() == Op_DecodeN) {
1455       if (!look_for_barrier(u, post_parse, visited)) {
1456         return false;
1457       }
1458     } else if (u->Opcode() != Op_SCMemProj) {
1459       tty->print("bad use"); u->dump();
1460       return false;
1461     }
1462   }
1463 
1464   return true;
1465 }
1466 
1467 void ZBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
1468   if (phase == BarrierSetC2::BeforeCodeGen) return;
1469   bool post_parse = phase == BarrierSetC2::BeforeOptimize;
1470   verify_gc_barriers(post_parse);
1471 }
1472 
1473 void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
1474   ZBarrierSetC2State* s = state();
1475   Compile* C = Compile::current();
1476   ResourceMark rm;
1477   VectorSet visited(Thread::current()->resource_area());
1478   for (int i = 0; i < s->load_barrier_count(); i++) {
1479     LoadBarrierNode* n = s->load_barrier_node(i);
1480 
1481     // The dominating barrier on the same address if it exists and
1482     // this barrier must not be applied on the value from the same
1483     // load otherwise the value is not reloaded before it's used the
1484     // second time.
1485     assert(n->in(LoadBarrierNode::Similar)->is_top() ||
1486            (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
1487             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Address) == n->in(LoadBarrierNode::Address) &&
1488             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Oop) != n->in(LoadBarrierNode::Oop)),
1489            "broken similar edge");
1490 
1491     assert(post_parse || n->as_LoadBarrier()->has_true_uses(),
1492            "found unneeded load barrier");
1493 
1494     // Several load barrier nodes chained through their Similar edge
1495     // break the code that remove the barriers in final graph reshape.
1496     assert(n->in(LoadBarrierNode::Similar)->is_top() ||
1497            (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
1498             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Similar)->is_top()),
1499            "chain of Similar load barriers");
1500 
1501     if (!n->in(LoadBarrierNode::Similar)->is_top()) {
1502       ResourceMark rm;
1503       Unique_Node_List wq;
1504       Node* other = n->in(LoadBarrierNode::Similar)->in(0);
1505       wq.push(n);
1506       bool ok = true;
1507       bool dom_found = false;
1508       for (uint next = 0; next < wq.size(); ++next) {
1509         Node *n = wq.at(next);
1510         assert(n->is_CFG(), "");
1511         assert(!n->is_SafePoint(), "");
1512 
1513         if (n == other) {
1514           continue;
1515         }
1516 
1517         if (n->is_Region()) {
1518           for (uint i = 1; i < n->req(); i++) {
1519             Node* m = n->in(i);
1520             if (m != NULL) {
1521               wq.push(m);
1522             }
1523           }
1524         } else {
1525           Node* m = n->in(0);
1526           if (m != NULL) {
1527             wq.push(m);
1528           }
1529         }
1530       }
1531     }
1532 
1533     if (ZVerifyLoadBarriers) {
1534       if ((n->is_Load() || n->is_LoadStore()) && n->bottom_type()->make_oopptr() != NULL) {
1535         visited.Clear();
1536         bool found = look_for_barrier(n, post_parse, visited);
1537         if (!found) {
1538           n->dump(1);
1539           n->dump(-3);
1540           stringStream ss;
1541           C->method()->print_short_name(&ss);
1542           tty->print_cr("-%s-", ss.as_string());
1543           assert(found, "");
1544         }
1545       }
1546     }
1547   }
1548 }
1549 
1550 #endif