1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "opto/compile.hpp"
  26 #include "opto/castnode.hpp"
  27 #include "opto/graphKit.hpp"
  28 #include "opto/idealKit.hpp"
  29 #include "opto/loopnode.hpp"
  30 #include "opto/macro.hpp"
  31 #include "opto/node.hpp"
  32 #include "opto/type.hpp"
  33 #include "utilities/macros.hpp"
  34 #include "gc/z/zBarrierSet.hpp"
  35 #include "gc/z/c2/zBarrierSetC2.hpp"
  36 #include "gc/z/zThreadLocalData.hpp"
  37 #include "gc/z/zBarrierSetRuntime.hpp"
  38 
  39 ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena)
  40   : _load_barrier_nodes(new (comp_arena) GrowableArray<LoadBarrierNode*>(comp_arena, 8,  0, NULL)) {}
  41 
  42 int ZBarrierSetC2State::load_barrier_count() const {
  43   return _load_barrier_nodes->length();
  44 }
  45 
  46 void ZBarrierSetC2State::add_load_barrier_node(LoadBarrierNode * n) {
  47   assert(!_load_barrier_nodes->contains(n), " duplicate entry in expand list");
  48   _load_barrier_nodes->append(n);
  49 }
  50 
  51 void ZBarrierSetC2State::remove_load_barrier_node(LoadBarrierNode * n) {
  52   // this function may be called twice for a node so check
  53   // that the node is in the array before attempting to remove it
  54   if (_load_barrier_nodes->contains(n)) {
  55     _load_barrier_nodes->remove(n);
  56   }
  57 }
  58 
  59 LoadBarrierNode* ZBarrierSetC2State::load_barrier_node(int idx) const {
  60   return _load_barrier_nodes->at(idx);
  61 }
  62 
  63 void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
  64   return new(comp_arena) ZBarrierSetC2State(comp_arena);
  65 }
  66 
  67 ZBarrierSetC2State* ZBarrierSetC2::state() const {
  68   return reinterpret_cast<ZBarrierSetC2State*>(Compile::current()->barrier_set_state());
  69 }
  70 
  71 bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const {
  72   // 1. This step follows potential oop projections of a load barrier before expansion
  73   if (node->is_Proj()) {
  74     node = node->in(0);
  75   }
  76 
  77   // 2. This step checks for unexpanded load barriers
  78   if (node->is_LoadBarrier()) {
  79     return true;
  80   }
  81 
  82   // 3. This step checks for the phi corresponding to an optimized load barrier expansion
  83   if (node->is_Phi()) {
  84     PhiNode* phi = node->as_Phi();
  85     Node* n = phi->in(1);
  86     if (n != NULL && (n->is_LoadBarrierSlowReg() ||  n->is_LoadBarrierWeakSlowReg())) {
  87       return true;
  88     }
  89   }
  90 
  91   return false;
  92 }
  93 
  94 void ZBarrierSetC2::register_potential_barrier_node(Node* node) const {
  95   if (node->is_LoadBarrier()) {
  96     state()->add_load_barrier_node(node->as_LoadBarrier());
  97   }
  98 }
  99 
 100 void ZBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
 101   if (node->is_LoadBarrier()) {
 102     state()->remove_load_barrier_node(node->as_LoadBarrier());
 103   }
 104 }
 105 
 106 void ZBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful) const {
 107   // Remove useless LoadBarrier nodes
 108   ZBarrierSetC2State* s = state();
 109   for (int i = s->load_barrier_count()-1; i >= 0; i--) {
 110     LoadBarrierNode* n = s->load_barrier_node(i);
 111     if (!useful.member(n)) {
 112       unregister_potential_barrier_node(n);
 113     }
 114   }
 115 }
 116 
 117 void ZBarrierSetC2::enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const {
 118   if (node->is_LoadBarrier() && !node->as_LoadBarrier()->has_true_uses()) {
 119     worklist.push(node);
 120   }
 121 }
 122 
 123 void ZBarrierSetC2::find_dominating_barriers(PhaseIterGVN& igvn) {
 124   // Look for dominating barriers on the same address only once all
 125   // other loop opts are over: loop opts may cause a safepoint to be
 126   // inserted between a barrier and its dominating barrier.
 127   Compile* C = Compile::current();
 128   ZBarrierSetC2* bs = (ZBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2();
 129   ZBarrierSetC2State* s = bs->state();
 130   if (s->load_barrier_count() >= 2) {
 131     Compile::TracePhase tp("idealLoop", &C->timers[Phase::_t_idealLoop]);
 132     PhaseIdealLoop ideal_loop(igvn, LoopOptsZgcLastRound);
 133     if (C->major_progress()) C->print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
 134   }
 135 }
 136 
 137 void ZBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {
 138   // Permanent temporary workaround
 139   // Loadbarriers may have non-obvious dead uses keeping them alive during parsing. The use is
 140   // removed by RemoveUseless (after parsing, before optimize) but the barriers won't be added to
 141   // the worklist. Unless we add them explicitly they are not guaranteed to end up there.
 142   ZBarrierSetC2State* s = state();
 143 
 144   for (int i = 0; i < s->load_barrier_count(); i++) {
 145     LoadBarrierNode* n = s->load_barrier_node(i);
 146     worklist->push(n);
 147   }
 148 }
 149 
 150 const TypeFunc* ZBarrierSetC2::load_barrier_Type() const {
 151   const Type** fields;
 152 
 153   // Create input types (domain)
 154   fields = TypeTuple::fields(2);
 155   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
 156   fields[TypeFunc::Parms+1] = TypeOopPtr::BOTTOM;
 157   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 158 
 159   // Create result type (range)
 160   fields = TypeTuple::fields(1);
 161   fields[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM;
 162   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 163 
 164   return TypeFunc::make(domain, range);
 165 }
 166 
 167 // == LoadBarrierNode ==
 168 
 169 LoadBarrierNode::LoadBarrierNode(Compile* C,
 170                                  Node* c,
 171                                  Node* mem,
 172                                  Node* val,
 173                                  Node* adr,
 174                                  bool weak,
 175                                  bool writeback,
 176                                  bool oop_reload_allowed) :
 177     MultiNode(Number_of_Inputs),
 178     _weak(weak),
 179     _writeback(writeback),
 180     _oop_reload_allowed(oop_reload_allowed) {
 181   init_req(Control, c);
 182   init_req(Memory, mem);
 183   init_req(Oop, val);
 184   init_req(Address, adr);
 185   init_req(Similar, C->top());
 186 
 187   init_class_id(Class_LoadBarrier);
 188   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 189   bs->register_potential_barrier_node(this);
 190 }
 191 
 192 uint LoadBarrierNode::size_of() const {
 193   return sizeof(*this);
 194 }
 195 
 196 uint LoadBarrierNode::cmp(const Node& n) const {
 197   ShouldNotReachHere();
 198   return 0;
 199 }
 200 
 201 const Type *LoadBarrierNode::bottom_type() const {
 202   const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
 203   Node* in_oop = in(Oop);
 204   floadbarrier[Control] = Type::CONTROL;
 205   floadbarrier[Memory] = Type::MEMORY;
 206   floadbarrier[Oop] = in_oop == NULL ? Type::TOP : in_oop->bottom_type();
 207   return TypeTuple::make(Number_of_Outputs, floadbarrier);
 208 }
 209 
 210 const TypePtr* LoadBarrierNode::adr_type() const {
 211   ShouldNotReachHere();
 212   return NULL;
 213 }
 214 
 215 const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
 216   const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
 217   const Type* val_t = phase->type(in(Oop));
 218   floadbarrier[Control] = Type::CONTROL;
 219   floadbarrier[Memory] = Type::MEMORY;
 220   floadbarrier[Oop] = val_t;
 221   return TypeTuple::make(Number_of_Outputs, floadbarrier);
 222 }
 223 
 224 bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n) {
 225   if (phase != NULL) {
 226     return phase->is_dominator(d, n);
 227   }
 228 
 229   for (int i = 0; i < 10 && n != NULL; i++) {
 230     n = IfNode::up_one_dom(n, linear_only);
 231     if (n == d) {
 232       return true;
 233     }
 234   }
 235 
 236   return false;
 237 }
 238 
 239 LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) {
 240   Node* val = in(LoadBarrierNode::Oop);
 241   if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) {
 242     LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier();
 243     assert(lb->in(Address) == in(Address), "");
 244     // Load barrier on Similar edge dominates so if it now has the Oop field it can replace this barrier.
 245     if (lb->in(Oop) == in(Oop)) {
 246       return lb;
 247     }
 248     // Follow chain of load barrier through Similar edges
 249     while (!lb->in(Similar)->is_top()) {
 250       lb = lb->in(Similar)->in(0)->as_LoadBarrier();
 251       assert(lb->in(Address) == in(Address), "");
 252     }
 253     if (lb != in(Similar)->in(0)) {
 254       return lb;
 255     }
 256   }
 257   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
 258     Node* u = val->fast_out(i);
 259     if (u != this && u->is_LoadBarrier() && u->in(Oop) == val && u->as_LoadBarrier()->has_true_uses()) {
 260       Node* this_ctrl = in(LoadBarrierNode::Control);
 261       Node* other_ctrl = u->in(LoadBarrierNode::Control);
 262       if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
 263         return u->as_LoadBarrier();
 264       }
 265     }
 266   }
 267 
 268   if (ZVerifyLoadBarriers || can_be_eliminated()) {
 269     return NULL;
 270   }
 271 
 272   if (!look_for_similar) {
 273     return NULL;
 274   }
 275 
 276   Node* addr = in(LoadBarrierNode::Address);
 277   for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
 278     Node* u = addr->fast_out(i);
 279     if (u != this && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
 280       Node* this_ctrl = in(LoadBarrierNode::Control);
 281       Node* other_ctrl = u->in(LoadBarrierNode::Control);
 282       if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
 283         ResourceMark rm;
 284         Unique_Node_List wq;
 285         wq.push(in(LoadBarrierNode::Control));
 286         bool ok = true;
 287         bool dom_found = false;
 288         for (uint next = 0; next < wq.size(); ++next) {
 289           Node *n = wq.at(next);
 290           if (n->is_top()) {
 291             return NULL;
 292           }
 293           assert(n->is_CFG(), "");
 294           if (n->is_SafePoint()) {
 295             ok = false;
 296             break;
 297           }
 298           if (n == u) {
 299             dom_found = true;
 300             continue;
 301           }
 302           if (n->is_Region()) {
 303             for (uint i = 1; i < n->req(); i++) {
 304               Node* m = n->in(i);
 305               if (m != NULL) {
 306                 wq.push(m);
 307               }
 308             }
 309           } else {
 310             Node* m = n->in(0);
 311             if (m != NULL) {
 312               wq.push(m);
 313             }
 314           }
 315         }
 316         if (ok) {
 317           assert(dom_found, "");
 318           return u->as_LoadBarrier();;
 319         }
 320         break;
 321       }
 322     }
 323   }
 324 
 325   return NULL;
 326 }
 327 
 328 void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
 329   // Change to that barrier may affect a dominated barrier so re-push those
 330   Node* val = in(LoadBarrierNode::Oop);
 331 
 332   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
 333     Node* u = val->fast_out(i);
 334     if (u != this && u->is_LoadBarrier() && u->in(Oop) == val) {
 335       Node* this_ctrl = in(Control);
 336       Node* other_ctrl = u->in(Control);
 337       if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
 338         igvn->_worklist.push(u);
 339       }
 340     }
 341 
 342     Node* addr = in(LoadBarrierNode::Address);
 343     for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
 344       Node* u = addr->fast_out(i);
 345       if (u != this && u->is_LoadBarrier() && u->in(Similar)->is_top()) {
 346         Node* this_ctrl = in(Control);
 347         Node* other_ctrl = u->in(Control);
 348         if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
 349           igvn->_worklist.push(u);
 350         }
 351       }
 352     }
 353   }
 354 }
 355 
 356 Node *LoadBarrierNode::Identity(PhaseGVN *phase) {
 357   if (!phase->C->directive()->ZOptimizeLoadBarriersOption) {
 358     return this;
 359   }
 360 
 361   bool redundant_addr = false;
 362   LoadBarrierNode* dominating_barrier = has_dominating_barrier(NULL, true, false);
 363   if (dominating_barrier != NULL) {
 364     assert(dominating_barrier->in(Oop) == in(Oop), "");
 365     return dominating_barrier;
 366   }
 367 
 368   return this;
 369 }
 370 
 371 Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 372   if (remove_dead_region(phase, can_reshape)) {
 373     return this;
 374   }
 375 
 376   Node* val = in(Oop);
 377   Node* mem = in(Memory);
 378   Node* ctrl = in(Control);
 379   Node* adr = in(Address);
 380   assert(val->Opcode() != Op_LoadN, "");
 381 
 382   if (mem->is_MergeMem()) {
 383     Node* new_mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
 384     set_req(Memory, new_mem);
 385     if (mem->outcnt() == 0 && can_reshape) {
 386       phase->is_IterGVN()->_worklist.push(mem);
 387     }
 388 
 389     return this;
 390   }
 391 
 392   bool optimizeLoadBarriers = phase->C->directive()->ZOptimizeLoadBarriersOption;
 393   LoadBarrierNode* dominating_barrier = optimizeLoadBarriers ? has_dominating_barrier(NULL, !can_reshape, !phase->C->major_progress()) : NULL;
 394   if (dominating_barrier != NULL && dominating_barrier->in(Oop) != in(Oop)) {
 395     assert(in(Address) == dominating_barrier->in(Address), "");
 396     set_req(Similar, dominating_barrier->proj_out(Oop));
 397     return this;
 398   }
 399 
 400   bool eliminate = (optimizeLoadBarriers && !(val->is_Phi() || val->Opcode() == Op_LoadP || val->Opcode() == Op_GetAndSetP || val->is_DecodeN())) ||
 401                    (can_reshape && (dominating_barrier != NULL || !has_true_uses()));
 402 
 403   if (eliminate) {
 404     if (can_reshape) {
 405       PhaseIterGVN* igvn = phase->is_IterGVN();
 406       Node* out_ctrl = proj_out_or_null(Control);
 407       Node* out_res = proj_out_or_null(Oop);
 408 
 409       if (out_ctrl != NULL) {
 410         igvn->replace_node(out_ctrl, ctrl);
 411       }
 412 
 413       // That transformation may cause the Similar edge on the load barrier to be invalid
 414       fix_similar_in_uses(igvn);
 415       if (out_res != NULL) {
 416         if (dominating_barrier != NULL) {
 417           igvn->replace_node(out_res, dominating_barrier->proj_out(Oop));
 418         } else {
 419           igvn->replace_node(out_res, val);
 420         }
 421       }
 422     }
 423 
 424     return new ConINode(TypeInt::ZERO);
 425   }
 426 
 427   // If the Similar edge is no longer a load barrier, clear it
 428   Node* similar = in(Similar);
 429   if (!similar->is_top() && !(similar->is_Proj() && similar->in(0)->is_LoadBarrier())) {
 430     set_req(Similar, phase->C->top());
 431     return this;
 432   }
 433 
 434   if (can_reshape) {
 435     // If this barrier is linked through the Similar edge by a
 436     // dominated barrier and both barriers have the same Oop field,
 437     // the dominated barrier can go away, so push it for reprocessing.
 438     // We also want to avoid a barrier to depend on another dominating
 439     // barrier through its Similar edge that itself depend on another
 440     // barrier through its Similar edge and rather have the first
 441     // depend on the third.
 442     PhaseIterGVN* igvn = phase->is_IterGVN();
 443     Node* out_res = proj_out(Oop);
 444     for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 445       Node* u = out_res->fast_out(i);
 446       if (u->is_LoadBarrier() && u->in(Similar) == out_res &&
 447           (u->in(Oop) == val || !u->in(Similar)->is_top())) {
 448         igvn->_worklist.push(u);
 449       }
 450     }
 451 
 452     push_dominated_barriers(igvn);
 453   }
 454 
 455   return NULL;
 456 }
 457 
 458 uint LoadBarrierNode::match_edge(uint idx) const {
 459   ShouldNotReachHere();
 460   return 0;
 461 }
 462 
 463 void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
 464   Node* out_res = proj_out_or_null(Oop);
 465   if (out_res == NULL) {
 466     return;
 467   }
 468 
 469   for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 470     Node* u = out_res->fast_out(i);
 471     if (u->is_LoadBarrier() && u->in(Similar) == out_res) {
 472       igvn->replace_input_of(u, Similar, igvn->C->top());
 473       --i;
 474       --imax;
 475     }
 476   }
 477 }
 478 
 479 bool LoadBarrierNode::has_true_uses() const {
 480   Node* out_res = proj_out_or_null(Oop);
 481   if (out_res == NULL) {
 482     return false;
 483   }
 484 
 485   for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 486     Node* u = out_res->fast_out(i);
 487     if (!u->is_LoadBarrier() || u->in(Similar) != out_res) {
 488       return true;
 489     }
 490   }
 491 
 492   return false;
 493 }
 494 
 495 // == Accesses ==
 496 
 497 Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicAccess& access) const {
 498   assert(!UseCompressedOops, "Not allowed");
 499   CompareAndSwapNode* cas = (CompareAndSwapNode*)access.raw_access();
 500   PhaseGVN& gvn = access.kit()->gvn();
 501   Compile* C = Compile::current();
 502   GraphKit* kit = access.kit();
 503 
 504   Node* in_ctrl     = cas->in(MemNode::Control);
 505   Node* in_mem      = cas->in(MemNode::Memory);
 506   Node* in_adr      = cas->in(MemNode::Address);
 507   Node* in_val      = cas->in(MemNode::ValueIn);
 508   Node* in_expected = cas->in(LoadStoreConditionalNode::ExpectedIn);
 509 
 510   float likely                   = PROB_LIKELY(0.999);
 511 
 512   const TypePtr *adr_type        = gvn.type(in_adr)->isa_ptr();
 513   Compile::AliasType* alias_type = C->alias_type(adr_type);
 514   int alias_idx                  = C->get_alias_index(adr_type);
 515 
 516   // Outer check - true: continue, false: load and check
 517   Node* region   = new RegionNode(3);
 518   Node* phi      = new PhiNode(region, TypeInt::BOOL);
 519   Node* phi_mem  = new PhiNode(region, Type::MEMORY, adr_type);
 520 
 521   // Inner check - is the healed ref equal to the expected
 522   Node* region2  = new RegionNode(3);
 523   Node* phi2     = new PhiNode(region2, TypeInt::BOOL);
 524   Node* phi_mem2 = new PhiNode(region2, Type::MEMORY, adr_type);
 525 
 526   // CAS node returns 0 or 1
 527   Node* cmp     = gvn.transform(new CmpINode(cas, kit->intcon(0)));
 528   Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
 529   IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
 530   Node* then    = gvn.transform(new IfTrueNode(iff));
 531   Node* elsen   = gvn.transform(new IfFalseNode(iff));
 532 
 533   Node* scmemproj1   = gvn.transform(new SCMemProjNode(cas));
 534 
 535   kit->set_memory(scmemproj1, alias_idx);
 536   phi_mem->init_req(1, scmemproj1);
 537   phi_mem2->init_req(2, scmemproj1);
 538 
 539   // CAS fail - reload and heal oop
 540   Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
 541   Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
 542   Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
 543   Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
 544 
 545   // Check load
 546   Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
 547   Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
 548   Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
 549   Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
 550   IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
 551   Node* then2   = gvn.transform(new IfTrueNode(iff2));
 552   Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
 553 
 554   // redo CAS
 555   Node* cas2       = gvn.transform(new CompareAndSwapPNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, cas->order()));
 556   Node* scmemproj2 = gvn.transform(new SCMemProjNode(cas2));
 557   kit->set_control(elsen2);
 558   kit->set_memory(scmemproj2, alias_idx);
 559 
 560   // Merge inner flow - check if healed oop was equal too expected.
 561   region2->set_req(1, kit->control());
 562   region2->set_req(2, then2);
 563   phi2->set_req(1, cas2);
 564   phi2->set_req(2, kit->intcon(0));
 565   phi_mem2->init_req(1, scmemproj2);
 566   kit->set_memory(phi_mem2, alias_idx);
 567 
 568   // Merge outer flow - then check if first CAS succeeded
 569   region->set_req(1, then);
 570   region->set_req(2, region2);
 571   phi->set_req(1, kit->intcon(1));
 572   phi->set_req(2, phi2);
 573   phi_mem->init_req(2, phi_mem2);
 574   kit->set_memory(phi_mem, alias_idx);
 575 
 576   gvn.transform(region2);
 577   gvn.transform(phi2);
 578   gvn.transform(phi_mem2);
 579   gvn.transform(region);
 580   gvn.transform(phi);
 581   gvn.transform(phi_mem);
 582 
 583   kit->set_control(region);
 584   kit->insert_mem_bar(Op_MemBarCPUOrder);
 585 
 586   return phi;
 587 }
 588 
 589 Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicAccess& access) const {
 590   CompareAndExchangePNode* cmpx = (CompareAndExchangePNode*)access.raw_access();
 591   GraphKit* kit = access.kit();
 592   PhaseGVN& gvn = kit->gvn();
 593   Compile* C = Compile::current();
 594 
 595   Node* in_ctrl     = cmpx->in(MemNode::Control);
 596   Node* in_mem      = cmpx->in(MemNode::Memory);
 597   Node* in_adr      = cmpx->in(MemNode::Address);
 598   Node* in_val      = cmpx->in(MemNode::ValueIn);
 599   Node* in_expected = cmpx->in(LoadStoreConditionalNode::ExpectedIn);
 600 
 601   float likely                   = PROB_LIKELY(0.999);
 602 
 603   const TypePtr *adr_type        = cmpx->get_ptr_type();
 604   Compile::AliasType* alias_type = C->alias_type(adr_type);
 605   int alias_idx                  = C->get_alias_index(adr_type);
 606 
 607   // Outer check - true: continue, false: load and check
 608   Node* region  = new RegionNode(3);
 609   Node* phi     = new PhiNode(region, adr_type);
 610 
 611   // Inner check - is the healed ref equal to the expected
 612   Node* region2 = new RegionNode(3);
 613   Node* phi2    = new PhiNode(region2, adr_type);
 614 
 615   // Check if cmpx succeeded
 616   Node* cmp     = gvn.transform(new CmpPNode(cmpx, in_expected));
 617   Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::eq))->as_Bool();
 618   IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
 619   Node* then    = gvn.transform(new IfTrueNode(iff));
 620   Node* elsen   = gvn.transform(new IfFalseNode(iff));
 621 
 622   Node* scmemproj1  = gvn.transform(new SCMemProjNode(cmpx));
 623   kit->set_memory(scmemproj1, alias_idx);
 624 
 625   // CAS fail - reload and heal oop
 626   Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
 627   Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
 628   Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
 629   Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
 630 
 631   // Check load
 632   Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
 633   Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
 634   Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
 635   Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
 636   IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
 637   Node* then2   = gvn.transform(new IfTrueNode(iff2));
 638   Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
 639 
 640   // Redo CAS
 641   Node* cmpx2      = gvn.transform(new CompareAndExchangePNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, adr_type, cmpx->get_ptr_type(), cmpx->order()));
 642   Node* scmemproj2 = gvn.transform(new SCMemProjNode(cmpx2));
 643   kit->set_control(elsen2);
 644   kit->set_memory(scmemproj2, alias_idx);
 645 
 646   // Merge inner flow - check if healed oop was equal too expected.
 647   region2->set_req(1, kit->control());
 648   region2->set_req(2, then2);
 649   phi2->set_req(1, cmpx2);
 650   phi2->set_req(2, barrierdata);
 651 
 652   // Merge outer flow - then check if first cas succeeded
 653   region->set_req(1, then);
 654   region->set_req(2, region2);
 655   phi->set_req(1, cmpx);
 656   phi->set_req(2, phi2);
 657 
 658   gvn.transform(region2);
 659   gvn.transform(phi2);
 660   gvn.transform(region);
 661   gvn.transform(phi);
 662 
 663   kit->set_control(region);
 664   kit->set_memory(in_mem, alias_idx);
 665   kit->insert_mem_bar(Op_MemBarCPUOrder);
 666 
 667   return phi;
 668 }
 669 
 670 Node* ZBarrierSetC2::load_barrier(GraphKit* kit, Node* val, Node* adr, bool weak, bool writeback, bool oop_reload_allowed) const {
 671   PhaseGVN& gvn = kit->gvn();
 672   Node* barrier = new LoadBarrierNode(Compile::current(), kit->control(), kit->memory(TypeRawPtr::BOTTOM), val, adr, weak, writeback, oop_reload_allowed);
 673   Node* transformed_barrier = gvn.transform(barrier);
 674 
 675   if (transformed_barrier->is_LoadBarrier()) {
 676     if (barrier == transformed_barrier) {
 677       kit->set_control(gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control)));
 678     }
 679     Node* result = gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop));
 680     assert(is_gc_barrier_node(result), "sanity");
 681     assert(step_over_gc_barrier(result) == val, "sanity");
 682     return result;
 683   } else {
 684     return val;
 685   }
 686 }
 687 
 688 static bool barrier_needed(C2Access access) {
 689   return ZBarrierSet::barrier_needed(access.decorators(), access.type());
 690 }
 691 
 692 Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 693   Node* p = BarrierSetC2::load_at_resolved(access, val_type);
 694   if (!barrier_needed(access)) {
 695     return p;
 696   }
 697 
 698   bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
 699 
 700   GraphKit* kit = access.kit();
 701   PhaseGVN& gvn = kit->gvn();
 702   Node* adr = access.addr().node();
 703   Node* heap_base_oop = access.base();
 704   bool unsafe = (access.decorators() & C2_UNSAFE_ACCESS) != 0;
 705   if (unsafe) {
 706     if (!ZVerifyLoadBarriers) {
 707       p = load_barrier(kit, p, adr);
 708     } else {
 709       if (!TypePtr::NULL_PTR->higher_equal(gvn.type(heap_base_oop))) {
 710         p = load_barrier(kit, p, adr);
 711       } else {
 712         IdealKit ideal(kit);
 713         IdealVariable res(ideal);
 714 #define __ ideal.
 715         __ declarations_done();
 716         __ set(res, p);
 717         __ if_then(heap_base_oop, BoolTest::ne, kit->null(), PROB_UNLIKELY(0.999)); {
 718           kit->sync_kit(ideal);
 719           p = load_barrier(kit, p, adr);
 720           __ set(res, p);
 721           __ sync_kit(kit);
 722         } __ end_if();
 723         kit->final_sync(ideal);
 724         p = __ value(res);
 725 #undef __
 726       }
 727     }
 728     return p;
 729   } else {
 730     return load_barrier(access.kit(), p, access.addr().node(), weak, true, true);
 731   }
 732 }
 733 
 734 Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
 735                                                     Node* new_val, const Type* val_type) const {
 736   Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
 737   if (!barrier_needed(access)) {
 738     return result;
 739   }
 740 
 741   access.set_needs_pinning(false);
 742   return make_cmpx_loadbarrier(access);
 743 }
 744 
 745 Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
 746                                                      Node* new_val, const Type* value_type) const {
 747   Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
 748   if (!barrier_needed(access)) {
 749     return result;
 750   }
 751 
 752   Node* load_store = access.raw_access();
 753   bool weak_cas = (access.decorators() & C2_WEAK_CMPXCHG) != 0;
 754   bool expected_is_null = (expected_val->get_ptr_type() == TypePtr::NULL_PTR);
 755 
 756   if (!expected_is_null) {
 757     if (weak_cas) {
 758       access.set_needs_pinning(false);
 759       load_store = make_cas_loadbarrier(access);
 760     } else {
 761       access.set_needs_pinning(false);
 762       load_store = make_cas_loadbarrier(access);
 763     }
 764   }
 765 
 766   return load_store;
 767 }
 768 
 769 Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const {
 770   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
 771   if (!barrier_needed(access)) {
 772     return result;
 773   }
 774 
 775   Node* load_store = access.raw_access();
 776   Node* adr = access.addr().node();
 777 
 778   return load_barrier(access.kit(), load_store, adr, false, false, false);
 779 }
 780 
 781 // == Macro Expansion ==
 782 
 783 void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const {
 784   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
 785   Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
 786   Node* in_val  = barrier->in(LoadBarrierNode::Oop);
 787   Node* in_adr  = barrier->in(LoadBarrierNode::Address);
 788 
 789   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
 790   Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
 791 
 792   PhaseIterGVN &igvn = phase->igvn();
 793 
 794   if (ZVerifyLoadBarriers) {
 795     igvn.replace_node(out_res, in_val);
 796     igvn.replace_node(out_ctrl, in_ctrl);
 797     return;
 798   }
 799 
 800   if (barrier->can_be_eliminated()) {
 801     // Clone and pin the load for this barrier below the dominating
 802     // barrier: the load cannot be allowed to float above the
 803     // dominating barrier
 804     Node* load = in_val;
 805 
 806     if (load->is_Load()) {
 807       Node* new_load = load->clone();
 808       Node* addp = new_load->in(MemNode::Address);
 809       assert(addp->is_AddP() || addp->is_Phi() || addp->is_Load(), "bad address");
 810       Node* cast = new CastPPNode(addp, igvn.type(addp), true);
 811       Node* ctrl = NULL;
 812       Node* similar = barrier->in(LoadBarrierNode::Similar);
 813       if (similar->is_Phi()) {
 814         // already expanded
 815         ctrl = similar->in(0);
 816       } else {
 817         assert(similar->is_Proj() && similar->in(0)->is_LoadBarrier(), "unexpected graph shape");
 818         ctrl = similar->in(0)->as_LoadBarrier()->proj_out(LoadBarrierNode::Control);
 819       }
 820       assert(ctrl != NULL, "bad control");
 821       cast->set_req(0, ctrl);
 822       igvn.transform(cast);
 823       new_load->set_req(MemNode::Address, cast);
 824       igvn.transform(new_load);
 825 
 826       igvn.replace_node(out_res, new_load);
 827       igvn.replace_node(out_ctrl, in_ctrl);
 828       return;
 829     }
 830     // cannot eliminate
 831   }
 832 
 833   // There are two cases that require the basic loadbarrier
 834   // 1) When the writeback of a healed oop must be avoided (swap)
 835   // 2) When we must guarantee that no reload of is done (swap, cas, cmpx)
 836   if (!barrier->is_writeback()) {
 837     assert(!barrier->oop_reload_allowed(), "writeback barriers should be marked as requires oop");
 838   }
 839 
 840   if (!barrier->oop_reload_allowed()) {
 841     expand_loadbarrier_basic(phase, barrier);
 842   } else {
 843     expand_loadbarrier_optimized(phase, barrier);
 844   }
 845 }
 846 
 847 // Basic loadbarrier using conventional argument passing
 848 void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
 849   PhaseIterGVN &igvn = phase->igvn();
 850 
 851   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
 852   Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
 853   Node* in_val  = barrier->in(LoadBarrierNode::Oop);
 854   Node* in_adr  = barrier->in(LoadBarrierNode::Address);
 855 
 856   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
 857   Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
 858 
 859   float unlikely  = PROB_UNLIKELY(0.999);
 860   const Type* in_val_maybe_null_t = igvn.type(in_val);
 861 
 862   Node* jthread = igvn.transform(new ThreadLocalNode());
 863   Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
 864   Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
 865   Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
 866   Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
 867   Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
 868   Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
 869   IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
 870   Node* then = igvn.transform(new IfTrueNode(iff));
 871   Node* elsen = igvn.transform(new IfFalseNode(iff));
 872 
 873   Node* result_region;
 874   Node* result_val;
 875 
 876   result_region = new RegionNode(3);
 877   result_val = new PhiNode(result_region, TypeInstPtr::BOTTOM);
 878 
 879   result_region->set_req(1, elsen);
 880   Node* res = igvn.transform(new CastPPNode(in_val, in_val_maybe_null_t));
 881   res->init_req(0, elsen);
 882   result_val->set_req(1, res);
 883 
 884   const TypeFunc *tf = load_barrier_Type();
 885   Node* call;
 886   if (barrier->is_weak()) {
 887     call = new CallLeafNode(tf,
 888                             ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr(),
 889                             "ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded",
 890                             TypeRawPtr::BOTTOM);
 891   } else {
 892     call = new CallLeafNode(tf,
 893                             ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(),
 894                             "ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded",
 895                             TypeRawPtr::BOTTOM);
 896   }
 897 
 898   call->init_req(TypeFunc::Control, then);
 899   call->init_req(TypeFunc::I_O    , phase->top());
 900   call->init_req(TypeFunc::Memory , in_mem);
 901   call->init_req(TypeFunc::FramePtr, phase->top());
 902   call->init_req(TypeFunc::ReturnAdr, phase->top());
 903   call->init_req(TypeFunc::Parms+0, in_val);
 904   if (barrier->is_writeback()) {
 905     call->init_req(TypeFunc::Parms+1, in_adr);
 906   } else {
 907     // When slow path is called with a null address, the healed oop will not be written back
 908     call->init_req(TypeFunc::Parms+1, igvn.zerocon(T_OBJECT));
 909   }
 910   call = igvn.transform(call);
 911 
 912   Node* ctrl = igvn.transform(new ProjNode(call, TypeFunc::Control));
 913   res = igvn.transform(new ProjNode(call, TypeFunc::Parms));
 914   res = igvn.transform(new CheckCastPPNode(ctrl, res, in_val_maybe_null_t));
 915 
 916   result_region->set_req(2, ctrl);
 917   result_val->set_req(2, res);
 918 
 919   result_region = igvn.transform(result_region);
 920   result_val = igvn.transform(result_val);
 921 
 922   if (out_ctrl != NULL) { // Added if cond
 923     igvn.replace_node(out_ctrl, result_region);
 924   }
 925   igvn.replace_node(out_res, result_val);
 926 }
 927 
 928 // Optimized, low spill, loadbarrier variant using stub specialized on register used
 929 void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
 930   PhaseIterGVN &igvn = phase->igvn();
 931 #ifdef PRINT_NODE_TRAVERSALS
 932   Node* preceding_barrier_node = barrier->in(LoadBarrierNode::Oop);
 933 #endif
 934 
 935   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
 936   Node* in_mem = barrier->in(LoadBarrierNode::Memory);
 937   Node* in_val = barrier->in(LoadBarrierNode::Oop);
 938   Node* in_adr = barrier->in(LoadBarrierNode::Address);
 939 
 940   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
 941   Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
 942 
 943   assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null");
 944 
 945 #ifdef PRINT_NODE_TRAVERSALS
 946   tty->print("\n\n\nBefore barrier optimization:\n");
 947   traverse(barrier, out_ctrl, out_res, -1);
 948 
 949   tty->print("\nBefore barrier optimization:  preceding_barrier_node\n");
 950   traverse(preceding_barrier_node, out_ctrl, out_res, -1);
 951 #endif
 952 
 953   float unlikely  = PROB_UNLIKELY(0.999);
 954 
 955   Node* jthread = igvn.transform(new ThreadLocalNode());
 956   Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
 957   Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr,
 958                                                  TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(),
 959                                                  MemNode::unordered));
 960   Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
 961   Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
 962   Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
 963   Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
 964   IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
 965   Node* then = igvn.transform(new IfTrueNode(iff));
 966   Node* elsen = igvn.transform(new IfFalseNode(iff));
 967 
 968   Node* slow_path_surrogate;
 969   if (!barrier->is_weak()) {
 970     slow_path_surrogate = igvn.transform(new LoadBarrierSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
 971                                                                     (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
 972   } else {
 973     slow_path_surrogate = igvn.transform(new LoadBarrierWeakSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
 974                                                                         (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
 975   }
 976 
 977   Node *new_loadp;
 978   new_loadp = slow_path_surrogate;
 979   // Create the final region/phi pair to converge cntl/data paths to downstream code
 980   Node* result_region = igvn.transform(new RegionNode(3));
 981   result_region->set_req(1, then);
 982   result_region->set_req(2, elsen);
 983 
 984   Node* result_phi = igvn.transform(new PhiNode(result_region, TypeInstPtr::BOTTOM));
 985   result_phi->set_req(1, new_loadp);
 986   result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
 987 
 988   // Finally, connect the original outputs to the barrier region and phi to complete the expansion/substitution
 989   // igvn.replace_node(out_ctrl, result_region);
 990   if (out_ctrl != NULL) { // added if cond
 991     igvn.replace_node(out_ctrl, result_region);
 992   }
 993   igvn.replace_node(out_res, result_phi);
 994 
 995   assert(barrier->outcnt() == 0,"LoadBarrier macro node has non-null outputs after expansion!");
 996 
 997 #ifdef PRINT_NODE_TRAVERSALS
 998   tty->print("\nAfter barrier optimization:  old out_ctrl\n");
 999   traverse(out_ctrl, out_ctrl, out_res, -1);
1000   tty->print("\nAfter barrier optimization:  old out_res\n");
1001   traverse(out_res, out_ctrl, out_res, -1);
1002   tty->print("\nAfter barrier optimization:  old barrier\n");
1003   traverse(barrier, out_ctrl, out_res, -1);
1004   tty->print("\nAfter barrier optimization:  preceding_barrier_node\n");
1005   traverse(preceding_barrier_node, result_region, result_phi, -1);
1006 #endif
1007 
1008   assert(is_gc_barrier_node(result_phi), "sanity");
1009   assert(step_over_gc_barrier(result_phi) == in_val, "sanity");
1010 
1011   return;
1012 }
1013 
1014 bool ZBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
1015   Compile* C = Compile::current();
1016   PhaseIterGVN &igvn = macro->igvn();
1017   ZBarrierSetC2State* s = state();
1018   if (s->load_barrier_count() > 0) {
1019 #ifdef ASSERT
1020     verify_gc_barriers(false);
1021 #endif
1022     igvn.set_delay_transform(true);
1023     int skipped = 0;
1024     while (s->load_barrier_count() > skipped) {
1025       int load_barrier_count = s->load_barrier_count();
1026       LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
1027       if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
1028         // Node is unreachable, so don't try to expand it
1029         s->remove_load_barrier_node(n);
1030         continue;
1031       }
1032       if (!n->can_be_eliminated()) {
1033         skipped++;
1034         continue;
1035       }
1036       expand_loadbarrier_node(macro, n);
1037       assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
1038       if (C->failing())  return true;
1039     }
1040     while (s->load_barrier_count() > 0) {
1041       int load_barrier_count = s->load_barrier_count();
1042       LoadBarrierNode* n = s->load_barrier_node(load_barrier_count - 1);
1043       assert(!(igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())), "should have been processed already");
1044       assert(!n->can_be_eliminated(), "should have been processed already");
1045       expand_loadbarrier_node(macro, n);
1046       assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
1047       if (C->failing())  return true;
1048     }
1049     igvn.set_delay_transform(false);
1050     igvn.optimize();
1051     if (C->failing())  return true;
1052   }
1053   return false;
1054 }
1055 
1056 // == Loop optimization ==
1057 
1058 static bool replace_with_dominating_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
1059   PhaseIterGVN &igvn = phase->igvn();
1060   Compile* C = Compile::current();
1061 
1062   LoadBarrierNode* lb2 = lb->has_dominating_barrier(phase, false, last_round);
1063   if (lb2 != NULL) {
1064     if (lb->in(LoadBarrierNode::Oop) != lb2->in(LoadBarrierNode::Oop)) {
1065       assert(lb->in(LoadBarrierNode::Address) == lb2->in(LoadBarrierNode::Address), "");
1066       igvn.replace_input_of(lb, LoadBarrierNode::Similar, lb2->proj_out(LoadBarrierNode::Oop));
1067       C->set_major_progress();
1068     } else  {
1069       // That transformation may cause the Similar edge on dominated load barriers to be invalid
1070       lb->fix_similar_in_uses(&igvn);
1071 
1072       Node* val = lb->proj_out(LoadBarrierNode::Oop);
1073       assert(lb2->has_true_uses(), "");
1074       assert(lb2->in(LoadBarrierNode::Oop) == lb->in(LoadBarrierNode::Oop), "");
1075 
1076       phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
1077       phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
1078       igvn.replace_node(val, lb2->proj_out(LoadBarrierNode::Oop));
1079 
1080       return true;
1081     }
1082   }
1083   return false;
1084 }
1085 
1086 static Node* find_dominating_memory(PhaseIdealLoop* phase, Node* mem, Node* dom, int i) {
1087   assert(dom->is_Region() || i == -1, "");
1088   Node* m = mem;
1089   while(phase->is_dominator(dom, phase->has_ctrl(m) ? phase->get_ctrl(m) : m->in(0))) {
1090     if (m->is_Mem()) {
1091       assert(m->as_Mem()->adr_type() == TypeRawPtr::BOTTOM, "");
1092       m = m->in(MemNode::Memory);
1093     } else if (m->is_MergeMem()) {
1094       m = m->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1095     } else if (m->is_Phi()) {
1096       if (m->in(0) == dom && i != -1) {
1097         m = m->in(i);
1098         break;
1099       } else {
1100         m = m->in(LoopNode::EntryControl);
1101       }
1102     } else if (m->is_Proj()) {
1103       m = m->in(0);
1104     } else if (m->is_SafePoint() || m->is_MemBar()) {
1105       m = m->in(TypeFunc::Memory);
1106     } else {
1107 #ifdef ASSERT
1108       m->dump();
1109 #endif
1110       ShouldNotReachHere();
1111     }
1112   }
1113   return m;
1114 }
1115 
1116 static LoadBarrierNode* clone_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* ctl, Node* mem, Node* oop_in) {
1117   PhaseIterGVN &igvn = phase->igvn();
1118   Compile* C = Compile::current();
1119   Node* the_clone = lb->clone();
1120   the_clone->set_req(LoadBarrierNode::Control, ctl);
1121   the_clone->set_req(LoadBarrierNode::Memory, mem);
1122   if (oop_in != NULL) {
1123     the_clone->set_req(LoadBarrierNode::Oop, oop_in);
1124   }
1125 
1126   LoadBarrierNode* new_lb = the_clone->as_LoadBarrier();
1127   igvn.register_new_node_with_optimizer(new_lb);
1128   IdealLoopTree *loop = phase->get_loop(new_lb->in(0));
1129   phase->set_ctrl(new_lb, new_lb->in(0));
1130   phase->set_loop(new_lb, loop);
1131   phase->set_idom(new_lb, new_lb->in(0), phase->dom_depth(new_lb->in(0))+1);
1132   if (!loop->_child) {
1133     loop->_body.push(new_lb);
1134   }
1135 
1136   Node* proj_ctl = new ProjNode(new_lb, LoadBarrierNode::Control);
1137   igvn.register_new_node_with_optimizer(proj_ctl);
1138   phase->set_ctrl(proj_ctl, proj_ctl->in(0));
1139   phase->set_loop(proj_ctl, loop);
1140   phase->set_idom(proj_ctl, new_lb, phase->dom_depth(new_lb)+1);
1141   if (!loop->_child) {
1142     loop->_body.push(proj_ctl);
1143   }
1144 
1145   Node* proj_oop = new ProjNode(new_lb, LoadBarrierNode::Oop);
1146   phase->register_new_node(proj_oop, new_lb);
1147 
1148   if (!new_lb->in(LoadBarrierNode::Similar)->is_top()) {
1149     LoadBarrierNode* similar = new_lb->in(LoadBarrierNode::Similar)->in(0)->as_LoadBarrier();
1150     if (!phase->is_dominator(similar, ctl)) {
1151       igvn.replace_input_of(new_lb, LoadBarrierNode::Similar, C->top());
1152     }
1153   }
1154 
1155   return new_lb;
1156 }
1157 
1158 static void replace_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* new_val) {
1159   PhaseIterGVN &igvn = phase->igvn();
1160   Node* val = lb->proj_out(LoadBarrierNode::Oop);
1161   igvn.replace_node(val, new_val);
1162   phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
1163   phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
1164 }
1165 
1166 static bool split_barrier_thru_phi(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
1167   PhaseIterGVN &igvn = phase->igvn();
1168   Compile* C = Compile::current();
1169 
1170   if (lb->in(LoadBarrierNode::Oop)->is_Phi()) {
1171     Node* oop_phi = lb->in(LoadBarrierNode::Oop);
1172 
1173     if ((oop_phi->req() != 3) || (oop_phi->in(2) == oop_phi)) {
1174       // Ignore phis with only one input
1175       return false;
1176     }
1177 
1178     if (phase->is_dominator(phase->get_ctrl(lb->in(LoadBarrierNode::Address)),
1179                             oop_phi->in(0)) && phase->get_ctrl(lb->in(LoadBarrierNode::Address)) != oop_phi->in(0)) {
1180       // That transformation may cause the Similar edge on dominated load barriers to be invalid
1181       lb->fix_similar_in_uses(&igvn);
1182 
1183       RegionNode* region = oop_phi->in(0)->as_Region();
1184 
1185       int backedge = LoopNode::LoopBackControl;
1186       if (region->is_Loop() && region->in(backedge)->is_Proj() && region->in(backedge)->in(0)->is_If()) {
1187         Node* c = region->in(backedge)->in(0)->in(0);
1188         assert(c->unique_ctrl_out() == region->in(backedge)->in(0), "");
1189         Node* oop = lb->in(LoadBarrierNode::Oop)->in(backedge);
1190         Node* oop_c = phase->has_ctrl(oop) ? phase->get_ctrl(oop) : oop;
1191         if (!phase->is_dominator(oop_c, c)) {
1192           return false;
1193         }
1194       }
1195 
1196       // If the node on the backedge above the phi is the node itself - we have a self loop.
1197       // Don't clone - this will be folded later.
1198       if (oop_phi->in(LoopNode::LoopBackControl) == lb->proj_out(LoadBarrierNode::Oop)) {
1199         return false;
1200       }
1201 
1202       bool is_strip_mined = region->is_CountedLoop() && region->as_CountedLoop()->is_strip_mined();
1203       Node *phi = oop_phi->clone();
1204 
1205       for (uint i = 1; i < region->req(); i++) {
1206         Node* ctrl = region->in(i);
1207         if (ctrl != C->top()) {
1208           assert(!phase->is_dominator(ctrl, region) || region->is_Loop(), "");
1209 
1210           Node* mem = lb->in(LoadBarrierNode::Memory);
1211           Node* m = find_dominating_memory(phase, mem, region, i);
1212 
1213           if (region->is_Loop() && i == LoopNode::LoopBackControl && ctrl->is_Proj() && ctrl->in(0)->is_If()) {
1214             ctrl = ctrl->in(0)->in(0);
1215           } else if (region->is_Loop() && is_strip_mined) {
1216             // If this is a strip mined loop, control must move above OuterStripMinedLoop
1217             assert(i == LoopNode::EntryControl, "check");
1218             assert(ctrl->is_OuterStripMinedLoop(), "sanity");
1219             ctrl = ctrl->as_OuterStripMinedLoop()->in(LoopNode::EntryControl);
1220           }
1221 
1222           LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, ctrl, m, lb->in(LoadBarrierNode::Oop)->in(i));
1223           Node* out_ctrl = new_lb->proj_out(LoadBarrierNode::Control);
1224 
1225           if (is_strip_mined && (i == LoopNode::EntryControl)) {
1226             assert(region->in(i)->is_OuterStripMinedLoop(), "");
1227             igvn.replace_input_of(region->in(i), i, out_ctrl);
1228             phase->set_idom(region->in(i), out_ctrl, phase->dom_depth(out_ctrl));
1229           } else if (ctrl == region->in(i)) {
1230             igvn.replace_input_of(region, i, out_ctrl);
1231             // Only update the idom if is the loop entry we are updating
1232             // - A loop backedge doesn't change the idom
1233             if (region->is_Loop() && i == LoopNode::EntryControl) {
1234               phase->set_idom(region, out_ctrl, phase->dom_depth(out_ctrl));
1235             }
1236           } else {
1237             Node* iff = region->in(i)->in(0);
1238             igvn.replace_input_of(iff, 0, out_ctrl);
1239             phase->set_idom(iff, out_ctrl, phase->dom_depth(out_ctrl)+1);
1240           }
1241           phi->set_req(i, new_lb->proj_out(LoadBarrierNode::Oop));
1242         }
1243       }
1244       phase->register_new_node(phi, region);
1245       replace_barrier(phase, lb, phi);
1246 
1247       if (region->is_Loop()) {
1248         // Load barrier moved to the back edge of the Loop may now
1249         // have a safepoint on the path to the barrier on the Similar
1250         // edge
1251         igvn.replace_input_of(phi->in(LoopNode::LoopBackControl)->in(0), LoadBarrierNode::Similar, C->top());
1252         Node* head = region->in(LoopNode::EntryControl);
1253         phase->set_idom(region, head, phase->dom_depth(head)+1);
1254         phase->recompute_dom_depth();
1255         if (head->is_CountedLoop() && head->as_CountedLoop()->is_main_loop()) {
1256           head->as_CountedLoop()->set_normal_loop();
1257         }
1258       }
1259 
1260       return true;
1261     }
1262   }
1263 
1264   return false;
1265 }
1266 
1267 static bool move_out_of_loop(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
1268   PhaseIterGVN &igvn = phase->igvn();
1269   IdealLoopTree *lb_loop = phase->get_loop(lb->in(0));
1270   if (lb_loop != phase->ltree_root() && !lb_loop->_irreducible) {
1271     Node* oop_ctrl = phase->get_ctrl(lb->in(LoadBarrierNode::Oop));
1272     IdealLoopTree *oop_loop = phase->get_loop(oop_ctrl);
1273     IdealLoopTree* adr_loop = phase->get_loop(phase->get_ctrl(lb->in(LoadBarrierNode::Address)));
1274     if (!lb_loop->is_member(oop_loop) && !lb_loop->is_member(adr_loop)) {
1275       // That transformation may cause the Similar edge on dominated load barriers to be invalid
1276       lb->fix_similar_in_uses(&igvn);
1277 
1278       Node* head = lb_loop->_head;
1279       assert(head->is_Loop(), "");
1280 
1281       if (phase->is_dominator(head, oop_ctrl)) {
1282         assert(oop_ctrl->Opcode() == Op_CProj && oop_ctrl->in(0)->Opcode() == Op_NeverBranch, "");
1283         assert(lb_loop->is_member(phase->get_loop(oop_ctrl->in(0)->in(0))), "");
1284         return false;
1285       }
1286 
1287       if (head->is_CountedLoop()) {
1288         CountedLoopNode* cloop = head->as_CountedLoop();
1289         if (cloop->is_main_loop()) {
1290           cloop->set_normal_loop();
1291         }
1292         // When we are moving barrier out of a counted loop,
1293         // make sure we move it all the way out of the strip mined outer loop.
1294         if (cloop->is_strip_mined()) {
1295           head = cloop->outer_loop();
1296         }
1297       }
1298 
1299       Node* mem = lb->in(LoadBarrierNode::Memory);
1300       Node* m = find_dominating_memory(phase, mem, head, -1);
1301 
1302       LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, head->in(LoopNode::EntryControl), m, NULL);
1303 
1304       assert(phase->idom(head) == head->in(LoopNode::EntryControl), "");
1305       Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
1306       igvn.replace_input_of(head, LoopNode::EntryControl, proj_ctl);
1307       phase->set_idom(head, proj_ctl, phase->dom_depth(proj_ctl) + 1);
1308 
1309       replace_barrier(phase, lb, new_lb->proj_out(LoadBarrierNode::Oop));
1310 
1311       phase->recompute_dom_depth();
1312 
1313       return true;
1314     }
1315   }
1316 
1317   return false;
1318 }
1319 
1320 static bool common_barriers(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
1321   PhaseIterGVN &igvn = phase->igvn();
1322   Node* in_val = lb->in(LoadBarrierNode::Oop);
1323   for (DUIterator_Fast imax, i = in_val->fast_outs(imax); i < imax; i++) {
1324     Node* u = in_val->fast_out(i);
1325     if (u != lb && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
1326       Node* this_ctrl = lb->in(LoadBarrierNode::Control);
1327       Node* other_ctrl = u->in(LoadBarrierNode::Control);
1328 
1329       Node* lca = phase->dom_lca(this_ctrl, other_ctrl);
1330       Node* proj1 = NULL;
1331       Node* proj2 = NULL;
1332       bool ok = (lb->in(LoadBarrierNode::Address) == u->in(LoadBarrierNode::Address));
1333 
1334       while (this_ctrl != lca && ok) {
1335         if (this_ctrl->in(0) != NULL &&
1336             this_ctrl->in(0)->is_MultiBranch()) {
1337           if (this_ctrl->in(0)->in(0) == lca) {
1338             assert(proj1 == NULL, "");
1339             assert(this_ctrl->is_Proj(), "");
1340             proj1 = this_ctrl;
1341           } else if (!(this_ctrl->in(0)->is_If() && this_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
1342             ok = false;
1343           }
1344         }
1345         this_ctrl = phase->idom(this_ctrl);
1346       }
1347       while (other_ctrl != lca && ok) {
1348         if (other_ctrl->in(0) != NULL &&
1349             other_ctrl->in(0)->is_MultiBranch()) {
1350           if (other_ctrl->in(0)->in(0) == lca) {
1351             assert(other_ctrl->is_Proj(), "");
1352             assert(proj2 == NULL, "");
1353             proj2 = other_ctrl;
1354           } else if (!(other_ctrl->in(0)->is_If() && other_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
1355             ok = false;
1356           }
1357         }
1358         other_ctrl = phase->idom(other_ctrl);
1359       }
1360       assert(proj1 == NULL || proj2 == NULL || proj1->in(0) == proj2->in(0), "");
1361       if (ok && proj1 && proj2 && proj1 != proj2 && proj1->in(0)->is_If()) {
1362         // That transformation may cause the Similar edge on dominated load barriers to be invalid
1363         lb->fix_similar_in_uses(&igvn);
1364         u->as_LoadBarrier()->fix_similar_in_uses(&igvn);
1365 
1366         Node* split = lca->unique_ctrl_out();
1367         assert(split->in(0) == lca, "");
1368 
1369         Node* mem = lb->in(LoadBarrierNode::Memory);
1370         Node* m = find_dominating_memory(phase, mem, split, -1);
1371         LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, lca, m, NULL);
1372 
1373         Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
1374         igvn.replace_input_of(split, 0, new_lb->proj_out(LoadBarrierNode::Control));
1375         phase->set_idom(split, proj_ctl, phase->dom_depth(proj_ctl)+1);
1376 
1377         Node* proj_oop = new_lb->proj_out(LoadBarrierNode::Oop);
1378         replace_barrier(phase, lb, proj_oop);
1379         replace_barrier(phase, u->as_LoadBarrier(), proj_oop);
1380 
1381         phase->recompute_dom_depth();
1382 
1383         return true;
1384       }
1385     }
1386   }
1387 
1388   return false;
1389 }
1390 
1391 static void optimize_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
1392   Compile* C = Compile::current();
1393 
1394   if (!C->directive()->ZOptimizeLoadBarriersOption) {
1395     return;
1396   }
1397 
1398   if (lb->has_true_uses()) {
1399     if (replace_with_dominating_barrier(phase, lb, last_round)) {
1400       return;
1401     }
1402 
1403     if (split_barrier_thru_phi(phase, lb)) {
1404       return;
1405     }
1406 
1407     if (move_out_of_loop(phase, lb)) {
1408       return;
1409     }
1410 
1411     if (common_barriers(phase, lb)) {
1412       return;
1413     }
1414   }
1415 }
1416 
1417 void ZBarrierSetC2::loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round) {
1418   if (node->is_LoadBarrier()) {
1419     optimize_load_barrier(phase, node->as_LoadBarrier(), last_round);
1420   }
1421 }
1422 
1423 Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const {
1424   Node* node = c;
1425 
1426   // 1. This step follows potential oop projections of a load barrier before expansion
1427   if (node->is_Proj()) {
1428     node = node->in(0);
1429   }
1430 
1431   // 2. This step checks for unexpanded load barriers
1432   if (node->is_LoadBarrier()) {
1433     return node->in(LoadBarrierNode::Oop);
1434   }
1435 
1436   // 3. This step checks for the phi corresponding to an optimized load barrier expansion
1437   if (node->is_Phi()) {
1438     PhiNode* phi = node->as_Phi();
1439     Node* n = phi->in(1);
1440     if (n != NULL && (n->is_LoadBarrierSlowReg() ||  n->is_LoadBarrierWeakSlowReg())) {
1441       assert(c == node, "projections from step 1 should only be seen before macro expansion");
1442       return phi->in(2);
1443     }
1444   }
1445 
1446   return c;
1447 }
1448 
1449 // == Verification ==
1450 
1451 #ifdef ASSERT
1452 
1453 static bool look_for_barrier(Node* n, bool post_parse, VectorSet& visited) {
1454   if (visited.test_set(n->_idx)) {
1455     return true;
1456   }
1457 
1458   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1459     Node* u = n->fast_out(i);
1460     if (u->is_LoadBarrier()) {
1461     } else if ((u->is_Phi() || u->is_CMove()) && !post_parse) {
1462       if (!look_for_barrier(u, post_parse, visited)) {
1463         return false;
1464       }
1465     } else if (u->Opcode() == Op_EncodeP || u->Opcode() == Op_DecodeN) {
1466       if (!look_for_barrier(u, post_parse, visited)) {
1467         return false;
1468       }
1469     } else if (u->Opcode() != Op_SCMemProj) {
1470       tty->print("bad use"); u->dump();
1471       return false;
1472     }
1473   }
1474 
1475   return true;
1476 }
1477 
1478 void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
1479   ZBarrierSetC2State* s = state();
1480   Compile* C = Compile::current();
1481   ResourceMark rm;
1482   VectorSet visited(Thread::current()->resource_area());
1483   for (int i = 0; i < s->load_barrier_count(); i++) {
1484     LoadBarrierNode* n = s->load_barrier_node(i);
1485 
1486     // The dominating barrier on the same address if it exists and
1487     // this barrier must not be applied on the value from the same
1488     // load otherwise the value is not reloaded before it's used the
1489     // second time.
1490     assert(n->in(LoadBarrierNode::Similar)->is_top() ||
1491            (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
1492             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Address) == n->in(LoadBarrierNode::Address) &&
1493             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Oop) != n->in(LoadBarrierNode::Oop)),
1494            "broken similar edge");
1495 
1496     assert(post_parse || n->as_LoadBarrier()->has_true_uses(),
1497            "found unneeded load barrier");
1498 
1499     // Several load barrier nodes chained through their Similar edge
1500     // break the code that remove the barriers in final graph reshape.
1501     assert(n->in(LoadBarrierNode::Similar)->is_top() ||
1502            (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
1503             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Similar)->is_top()),
1504            "chain of Similar load barriers");
1505 
1506     if (!n->in(LoadBarrierNode::Similar)->is_top()) {
1507       ResourceMark rm;
1508       Unique_Node_List wq;
1509       Node* other = n->in(LoadBarrierNode::Similar)->in(0);
1510       wq.push(n);
1511       bool ok = true;
1512       bool dom_found = false;
1513       for (uint next = 0; next < wq.size(); ++next) {
1514         Node *n = wq.at(next);
1515         assert(n->is_CFG(), "");
1516         assert(!n->is_SafePoint(), "");
1517 
1518         if (n == other) {
1519           continue;
1520         }
1521 
1522         if (n->is_Region()) {
1523           for (uint i = 1; i < n->req(); i++) {
1524             Node* m = n->in(i);
1525             if (m != NULL) {
1526               wq.push(m);
1527             }
1528           }
1529         } else {
1530           Node* m = n->in(0);
1531           if (m != NULL) {
1532             wq.push(m);
1533           }
1534         }
1535       }
1536     }
1537 
1538     if (ZVerifyLoadBarriers) {
1539       if ((n->is_Load() || n->is_LoadStore()) && n->bottom_type()->make_oopptr() != NULL) {
1540         visited.Clear();
1541         bool found = look_for_barrier(n, post_parse, visited);
1542         if (!found) {
1543           n->dump(1);
1544           n->dump(-3);
1545           stringStream ss;
1546           C->method()->print_short_name(&ss);
1547           tty->print_cr("-%s-", ss.as_string());
1548           assert(found, "");
1549         }
1550       }
1551     }
1552   }
1553 }
1554 
1555 #endif