1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "opto/castnode.hpp"
  26 #include "opto/compile.hpp"
  27 #include "opto/escape.hpp"
  28 #include "opto/graphKit.hpp"
  29 #include "opto/loopnode.hpp"
  30 #include "opto/machnode.hpp"
  31 #include "opto/macro.hpp"
  32 #include "opto/memnode.hpp"
  33 #include "opto/movenode.hpp"
  34 #include "opto/node.hpp"
  35 #include "opto/phase.hpp"
  36 #include "opto/phaseX.hpp"
  37 #include "opto/rootnode.hpp"
  38 #include "opto/type.hpp"
  39 #include "utilities/copy.hpp"
  40 #include "utilities/growableArray.hpp"
  41 #include "utilities/macros.hpp"
  42 #include "gc/z/zBarrierSet.hpp"
  43 #include "gc/z/c2/zBarrierSetC2.hpp"
  44 #include "gc/z/zThreadLocalData.hpp"
  45 #include "gc/z/zBarrierSetRuntime.hpp"
  46 
  47 ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena) :
  48     _load_barrier_nodes(new (comp_arena) GrowableArray<LoadBarrierNode*>(comp_arena, 8,  0, NULL)) {}
  49 
  50 int ZBarrierSetC2State::load_barrier_count() const {
  51   return _load_barrier_nodes->length();
  52 }
  53 
  54 void ZBarrierSetC2State::add_load_barrier_node(LoadBarrierNode * n) {
  55   assert(!_load_barrier_nodes->contains(n), " duplicate entry in expand list");
  56   _load_barrier_nodes->append(n);
  57 }
  58 
  59 void ZBarrierSetC2State::remove_load_barrier_node(LoadBarrierNode * n) {
  60   // this function may be called twice for a node so check
  61   // that the node is in the array before attempting to remove it
  62   if (_load_barrier_nodes->contains(n)) {
  63     _load_barrier_nodes->remove(n);
  64   }
  65 }
  66 
  67 LoadBarrierNode* ZBarrierSetC2State::load_barrier_node(int idx) const {
  68   return _load_barrier_nodes->at(idx);
  69 }
  70 
  71 void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
  72   return new(comp_arena) ZBarrierSetC2State(comp_arena);
  73 }
  74 
  75 ZBarrierSetC2State* ZBarrierSetC2::state() const {
  76   return reinterpret_cast<ZBarrierSetC2State*>(Compile::current()->barrier_set_state());
  77 }
  78 
  79 bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const {
  80   // 1. This step follows potential oop projections of a load barrier before expansion
  81   if (node->is_Proj()) {
  82     node = node->in(0);
  83   }
  84 
  85   // 2. This step checks for unexpanded load barriers
  86   if (node->is_LoadBarrier()) {
  87     return true;
  88   }
  89 
  90   // 3. This step checks for the phi corresponding to an optimized load barrier expansion
  91   if (node->is_Phi()) {
  92     PhiNode* phi = node->as_Phi();
  93     Node* n = phi->in(1);
  94     if (n != NULL && n->is_LoadBarrierSlowReg()) {
  95       return true;
  96     }
  97   }
  98 
  99   return false;
 100 }
 101 
 102 void ZBarrierSetC2::register_potential_barrier_node(Node* node) const {
 103   if (node->is_LoadBarrier()) {
 104     state()->add_load_barrier_node(node->as_LoadBarrier());
 105   }
 106 }
 107 
 108 void ZBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
 109   if (node->is_LoadBarrier()) {
 110     state()->remove_load_barrier_node(node->as_LoadBarrier());
 111   }
 112 }
 113 
 114 void ZBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {
 115   // Remove useless LoadBarrier nodes
 116   ZBarrierSetC2State* s = state();
 117   for (int i = s->load_barrier_count()-1; i >= 0; i--) {
 118     LoadBarrierNode* n = s->load_barrier_node(i);
 119     if (!useful.member(n)) {
 120       unregister_potential_barrier_node(n);
 121     }
 122   }
 123 }
 124 
 125 void ZBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
 126   if (node->is_LoadBarrier() && !node->as_LoadBarrier()->has_true_uses()) {
 127     igvn->_worklist.push(node);
 128   }
 129 }
 130 
 131 const uint NoBarrier       = 0;
 132 const uint RequireBarrier  = 1;
 133 const uint WeakBarrier     = 2;
 134 const uint ExpandedBarrier = 4;
 135 
 136 static bool load_require_barrier(LoadNode* load)      { return (load->barrier_data() & RequireBarrier)  == RequireBarrier; }
 137 static bool load_has_weak_barrier(LoadNode* load)     { return (load->barrier_data() & WeakBarrier)     == WeakBarrier; }
 138 static bool load_has_expanded_barrier(LoadNode* load) { return (load->barrier_data() & ExpandedBarrier) == ExpandedBarrier; }
 139 static void load_set_expanded_barrier(LoadNode* load) { return load->set_barrier_data(ExpandedBarrier); }
 140 
 141 static void load_set_barrier(LoadNode* load, bool weak) {
 142   if (weak) {
 143     load->set_barrier_data(RequireBarrier | WeakBarrier);
 144   } else {
 145     load->set_barrier_data(RequireBarrier);
 146   }
 147 }
 148 
 149 // == LoadBarrierNode ==
 150 
 151 LoadBarrierNode::LoadBarrierNode(Compile* C,
 152                                  Node* c,
 153                                  Node* mem,
 154                                  Node* val,
 155                                  Node* adr,
 156                                  bool weak) :
 157     MultiNode(Number_of_Inputs),
 158     _weak(weak) {
 159   init_req(Control, c);
 160   init_req(Memory, mem);
 161   init_req(Oop, val);
 162   init_req(Address, adr);
 163   init_req(Similar, C->top());
 164 
 165   init_class_id(Class_LoadBarrier);
 166   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 167   bs->register_potential_barrier_node(this);
 168 }
 169 
 170 uint LoadBarrierNode::size_of() const {
 171   return sizeof(*this);
 172 }
 173 
 174 bool LoadBarrierNode::cmp(const Node& n) const {
 175   ShouldNotReachHere();
 176   return false;
 177 }
 178 
 179 const Type *LoadBarrierNode::bottom_type() const {
 180   const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
 181   Node* in_oop = in(Oop);
 182   floadbarrier[Control] = Type::CONTROL;
 183   floadbarrier[Memory] = Type::MEMORY;
 184   floadbarrier[Oop] = in_oop == NULL ? Type::TOP : in_oop->bottom_type();
 185   return TypeTuple::make(Number_of_Outputs, floadbarrier);
 186 }
 187 
 188 const TypePtr* LoadBarrierNode::adr_type() const {
 189   ShouldNotReachHere();
 190   return NULL;
 191 }
 192 
 193 const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
 194   const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
 195   const Type* val_t = phase->type(in(Oop));
 196   floadbarrier[Control] = Type::CONTROL;
 197   floadbarrier[Memory]  = Type::MEMORY;
 198   floadbarrier[Oop]     = val_t;
 199   return TypeTuple::make(Number_of_Outputs, floadbarrier);
 200 }
 201 
 202 bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n) {
 203   if (phase != NULL) {
 204     return phase->is_dominator(d, n);
 205   }
 206 
 207   for (int i = 0; i < 10 && n != NULL; i++) {
 208     n = IfNode::up_one_dom(n, linear_only);
 209     if (n == d) {
 210       return true;
 211     }
 212   }
 213 
 214   return false;
 215 }
 216 
 217 LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) {
 218   if (is_weak()) {
 219     // Weak barriers can't be eliminated
 220     return NULL;
 221   }
 222 
 223   Node* val = in(LoadBarrierNode::Oop);
 224   if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) {
 225     LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier();
 226     assert(lb->in(Address) == in(Address), "");
 227     // Load barrier on Similar edge dominates so if it now has the Oop field it can replace this barrier.
 228     if (lb->in(Oop) == in(Oop)) {
 229       return lb;
 230     }
 231     // Follow chain of load barrier through Similar edges
 232     while (!lb->in(Similar)->is_top()) {
 233       lb = lb->in(Similar)->in(0)->as_LoadBarrier();
 234       assert(lb->in(Address) == in(Address), "");
 235     }
 236     if (lb != in(Similar)->in(0)) {
 237       return lb;
 238     }
 239   }
 240   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
 241     Node* u = val->fast_out(i);
 242     if (u != this && u->is_LoadBarrier() && u->in(Oop) == val && u->as_LoadBarrier()->has_true_uses()) {
 243       Node* this_ctrl = in(LoadBarrierNode::Control);
 244       Node* other_ctrl = u->in(LoadBarrierNode::Control);
 245       if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
 246         return u->as_LoadBarrier();
 247       }
 248     }
 249   }
 250 
 251   if (can_be_eliminated()) {
 252     return NULL;
 253   }
 254 
 255   if (!look_for_similar) {
 256     return NULL;
 257   }
 258 
 259   Node* addr = in(LoadBarrierNode::Address);
 260   for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
 261     Node* u = addr->fast_out(i);
 262     if (u != this && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
 263       Node* this_ctrl = in(LoadBarrierNode::Control);
 264       Node* other_ctrl = u->in(LoadBarrierNode::Control);
 265       if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
 266         ResourceMark rm;
 267         Unique_Node_List wq;
 268         wq.push(in(LoadBarrierNode::Control));
 269         bool ok = true;
 270         bool dom_found = false;
 271         for (uint next = 0; next < wq.size(); ++next) {
 272           Node *n = wq.at(next);
 273           if (n->is_top()) {
 274             return NULL;
 275           }
 276           assert(n->is_CFG(), "");
 277           if (n->is_SafePoint()) {
 278             ok = false;
 279             break;
 280           }
 281           if (n == u) {
 282             dom_found = true;
 283             continue;
 284           }
 285           if (n->is_Region()) {
 286             for (uint i = 1; i < n->req(); i++) {
 287               Node* m = n->in(i);
 288               if (m != NULL) {
 289                 wq.push(m);
 290               }
 291             }
 292           } else {
 293             Node* m = n->in(0);
 294             if (m != NULL) {
 295               wq.push(m);
 296             }
 297           }
 298         }
 299         if (ok) {
 300           assert(dom_found, "");
 301           return u->as_LoadBarrier();
 302         }
 303         break;
 304       }
 305     }
 306   }
 307 
 308   return NULL;
 309 }
 310 
 311 void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
 312   // Change to that barrier may affect a dominated barrier so re-push those
 313   assert(!is_weak(), "sanity");
 314   Node* val = in(LoadBarrierNode::Oop);
 315 
 316   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
 317     Node* u = val->fast_out(i);
 318     if (u != this && u->is_LoadBarrier() && u->in(Oop) == val) {
 319       Node* this_ctrl = in(Control);
 320       Node* other_ctrl = u->in(Control);
 321       if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
 322         igvn->_worklist.push(u);
 323       }
 324     }
 325 
 326     Node* addr = in(LoadBarrierNode::Address);
 327     for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
 328       Node* u = addr->fast_out(i);
 329       if (u != this && u->is_LoadBarrier() && u->in(Similar)->is_top()) {
 330         Node* this_ctrl = in(Control);
 331         Node* other_ctrl = u->in(Control);
 332         if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
 333           igvn->_worklist.push(u);
 334         }
 335       }
 336     }
 337   }
 338 }
 339 
 340 Node *LoadBarrierNode::Identity(PhaseGVN *phase) {
 341   LoadBarrierNode* dominating_barrier = has_dominating_barrier(NULL, true, false);
 342   if (dominating_barrier != NULL) {
 343     assert(!is_weak(), "Weak barriers cant be eliminated");
 344     assert(dominating_barrier->in(Oop) == in(Oop), "");
 345     return dominating_barrier;
 346   }
 347 
 348   return this;
 349 }
 350 
 351 Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 352   if (remove_dead_region(phase, can_reshape)) {
 353     return this;
 354   }
 355 
 356   Node *val = in(Oop);
 357   Node *mem = in(Memory);
 358   Node *ctrl = in(Control);
 359 
 360   assert(val->Opcode() != Op_LoadN, "");
 361   assert(val->Opcode() != Op_DecodeN, "");
 362 
 363   if (mem->is_MergeMem()) {
 364     Node *new_mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
 365     set_req(Memory, new_mem);
 366     if (mem->outcnt() == 0 && can_reshape) {
 367       phase->is_IterGVN()->_worklist.push(mem);
 368     }
 369     return this;
 370   }
 371 
 372   LoadBarrierNode *dominating_barrier = NULL;
 373   if (!is_weak()) {
 374     dominating_barrier = has_dominating_barrier(NULL, !can_reshape, !phase->C->major_progress());
 375     if (dominating_barrier != NULL && dominating_barrier->in(Oop) != in(Oop)) {
 376       assert(in(Address) == dominating_barrier->in(Address), "");
 377       set_req(Similar, dominating_barrier->proj_out(Oop));
 378       return this;
 379     }
 380   }
 381 
 382   bool eliminate = can_reshape && (dominating_barrier != NULL || !has_true_uses());
 383   if (eliminate) {
 384     if (can_reshape) {
 385       PhaseIterGVN* igvn = phase->is_IterGVN();
 386       Node* out_ctrl = proj_out_or_null(Control);
 387       Node* out_res = proj_out_or_null(Oop);
 388 
 389       if (out_ctrl != NULL) {
 390         igvn->replace_node(out_ctrl, ctrl);
 391       }
 392 
 393       // That transformation may cause the Similar edge on the load barrier to be invalid
 394       fix_similar_in_uses(igvn);
 395       if (out_res != NULL) {
 396         if (dominating_barrier != NULL) {
 397           assert(!is_weak(), "Sanity");
 398           igvn->replace_node(out_res, dominating_barrier->proj_out(Oop));
 399         } else {
 400           igvn->replace_node(out_res, val);
 401         }
 402       }
 403     }
 404     return new ConINode(TypeInt::ZERO);
 405   }
 406 
 407   // If the Similar edge is no longer a load barrier, clear it
 408   Node* similar = in(Similar);
 409   if (!similar->is_top() && !(similar->is_Proj() && similar->in(0)->is_LoadBarrier())) {
 410     set_req(Similar, phase->C->top());
 411     return this;
 412   }
 413 
 414   if (can_reshape && !is_weak()) {
 415     // If this barrier is linked through the Similar edge by a
 416     // dominated barrier and both barriers have the same Oop field,
 417     // the dominated barrier can go away, so push it for reprocessing.
 418     // We also want to avoid a barrier to depend on another dominating
 419     // barrier through its Similar edge that itself depend on another
 420     // barrier through its Similar edge and rather have the first
 421     // depend on the third.
 422     PhaseIterGVN* igvn = phase->is_IterGVN();
 423     Node* out_res = proj_out(Oop);
 424     for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 425       Node* u = out_res->fast_out(i);
 426       if (u->is_LoadBarrier() && u->in(Similar) == out_res &&
 427           (u->in(Oop) == val || !u->in(Similar)->is_top())) {
 428         assert(!u->as_LoadBarrier()->is_weak(), "Sanity");
 429         igvn->_worklist.push(u);
 430       }
 431     }
 432     push_dominated_barriers(igvn);
 433   }
 434 
 435   return NULL;
 436 }
 437 
 438 uint LoadBarrierNode::match_edge(uint idx) const {
 439   ShouldNotReachHere();
 440   return 0;
 441 }
 442 
 443 void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
 444   Node* out_res = proj_out_or_null(Oop);
 445   if (out_res == NULL) {
 446     return;
 447   }
 448 
 449   for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 450     Node* u = out_res->fast_out(i);
 451     if (u->is_LoadBarrier() && u->in(Similar) == out_res) {
 452       igvn->replace_input_of(u, Similar, igvn->C->top());
 453       --i;
 454       --imax;
 455     }
 456   }
 457 }
 458 
 459 bool LoadBarrierNode::has_true_uses() const {
 460   Node* out_res = proj_out_or_null(Oop);
 461   if (out_res != NULL) {
 462     for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 463       Node *u = out_res->fast_out(i);
 464       if (!u->is_LoadBarrier() || u->in(Similar) != out_res) {
 465         return true;
 466       }
 467     }
 468   }
 469   return false;
 470 }
 471 
 472 static bool barrier_needed(C2Access& access) {
 473   return ZBarrierSet::barrier_needed(access.decorators(), access.type());
 474 }
 475 
 476 Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 477   Node* p = BarrierSetC2::load_at_resolved(access, val_type);
 478   if (!barrier_needed(access)) {
 479     return p;
 480   }
 481 
 482   bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
 483   if (p->isa_Load()) {
 484     load_set_barrier(p->as_Load(), weak);
 485   }
 486   return p;
 487 }
 488 
 489 Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 490                                                     Node* new_val, const Type* val_type) const {
 491   Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
 492   LoadStoreNode* lsn = result->as_LoadStore();
 493   if (barrier_needed(access)) {
 494     lsn->set_has_barrier();
 495   }
 496   return lsn;
 497 }
 498 
 499 Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 500                                                      Node* new_val, const Type* value_type) const {
 501   Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
 502   LoadStoreNode* lsn = result->as_LoadStore();
 503   if (barrier_needed(access)) {
 504     lsn->set_has_barrier();
 505   }
 506   return lsn;
 507 }
 508 
 509 Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const {
 510   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
 511   LoadStoreNode* lsn = result->as_LoadStore();
 512   if (barrier_needed(access)) {
 513     lsn->set_has_barrier();
 514   }
 515   return lsn;
 516 }
 517 
 518 // == Macro Expansion ==
 519 
 520 // Optimized, low spill, loadbarrier variant using stub specialized on register used
 521 void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const {
 522   PhaseIterGVN &igvn = phase->igvn();
 523   float unlikely  = PROB_UNLIKELY(0.999);
 524 
 525   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
 526   Node* in_mem = barrier->in(LoadBarrierNode::Memory);
 527   Node* in_val = barrier->in(LoadBarrierNode::Oop);
 528   Node* in_adr = barrier->in(LoadBarrierNode::Address);
 529 
 530   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
 531   Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
 532 
 533   assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null");
 534 
 535   Node* jthread = igvn.transform(new ThreadLocalNode());
 536   Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
 537   Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr,
 538                                                  TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(),
 539                                                  MemNode::unordered));
 540   Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
 541   Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
 542   Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
 543   Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
 544   IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
 545   Node* then = igvn.transform(new IfTrueNode(iff));
 546   Node* elsen = igvn.transform(new IfFalseNode(iff));
 547 
 548   Node* new_loadp = igvn.transform(new LoadBarrierSlowRegNode(then, in_adr, in_val,
 549                                                               (const TypePtr*) in_val->bottom_type(), barrier->is_weak()));
 550 
 551   // Create the final region/phi pair to converge cntl/data paths to downstream code
 552   Node* result_region = igvn.transform(new RegionNode(3));
 553   result_region->set_req(1, then);
 554   result_region->set_req(2, elsen);
 555 
 556   Node* result_phi = igvn.transform(new PhiNode(result_region, TypeInstPtr::BOTTOM));
 557   result_phi->set_req(1, new_loadp);
 558   result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
 559 
 560   igvn.replace_node(out_ctrl, result_region);
 561   igvn.replace_node(out_res, result_phi);
 562 
 563   assert(barrier->outcnt() == 0,"LoadBarrier macro node has non-null outputs after expansion!");
 564 
 565   igvn.remove_dead_node(barrier);
 566   igvn.remove_dead_node(out_ctrl);
 567   igvn.remove_dead_node(out_res);
 568 
 569   assert(is_gc_barrier_node(result_phi), "sanity");
 570   assert(step_over_gc_barrier(result_phi) == in_val, "sanity");
 571 
 572   phase->C->print_method(PHASE_BARRIER_EXPANSION, 4, barrier->_idx);
 573 }
 574 
 575 bool ZBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
 576   ZBarrierSetC2State* s = state();
 577   if (s->load_barrier_count() > 0) {
 578     PhaseMacroExpand macro(igvn);
 579 
 580     int skipped = 0;
 581     while (s->load_barrier_count() > skipped) {
 582       int load_barrier_count = s->load_barrier_count();
 583       LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
 584       if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
 585         // Node is unreachable, so don't try to expand it
 586         s->remove_load_barrier_node(n);
 587         continue;
 588       }
 589       if (!n->can_be_eliminated()) {
 590         skipped++;
 591         continue;
 592       }
 593       expand_loadbarrier_node(&macro, n);
 594       assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
 595       if (C->failing()) {
 596         return true;
 597       }
 598     }
 599     while (s->load_barrier_count() > 0) {
 600       int load_barrier_count = s->load_barrier_count();
 601       LoadBarrierNode* n = s->load_barrier_node(load_barrier_count - 1);
 602       assert(!(igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())), "should have been processed already");
 603       assert(!n->can_be_eliminated(), "should have been processed already");
 604       expand_loadbarrier_node(&macro, n);
 605       assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
 606       if (C->failing()) {
 607         return true;
 608       }
 609     }
 610     igvn.set_delay_transform(false);
 611     igvn.optimize();
 612     if (C->failing()) {
 613       return true;
 614     }
 615   }
 616 
 617   return false;
 618 }
 619 
 620 Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const {
 621   Node* node = c;
 622 
 623   // 1. This step follows potential oop projections of a load barrier before expansion
 624   if (node->is_Proj()) {
 625     node = node->in(0);
 626   }
 627 
 628   // 2. This step checks for unexpanded load barriers
 629   if (node->is_LoadBarrier()) {
 630     return node->in(LoadBarrierNode::Oop);
 631   }
 632 
 633   // 3. This step checks for the phi corresponding to an optimized load barrier expansion
 634   if (node->is_Phi()) {
 635     PhiNode* phi = node->as_Phi();
 636     Node* n = phi->in(1);
 637     if (n != NULL && n->is_LoadBarrierSlowReg()) {
 638       assert(c == node, "projections from step 1 should only be seen before macro expansion");
 639       return phi->in(2);
 640     }
 641   }
 642 
 643   return c;
 644 }
 645 
 646 Node* ZBarrierSetC2::step_over_gc_barrier_ctrl(Node* c) const {
 647   Node* node = c;
 648 
 649   // 1. This step follows potential ctrl projections of a load barrier before expansion
 650   if (node->is_Proj()) {
 651     node = node->in(0);
 652   }
 653 
 654   // 2. This step checks for unexpanded load barriers
 655   if (node->is_LoadBarrier()) {
 656     return node->in(LoadBarrierNode::Control);
 657   }
 658 
 659   return c;
 660 }
 661 
 662 bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const {
 663   return type == T_OBJECT || type == T_ARRAY;
 664 }
 665 
 666 bool ZBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const {
 667   switch (opcode) {
 668     case Op_LoadBarrier:
 669       assert(0, "There should be no load barriers left");
 670     case Op_ZGetAndSetP:
 671     case Op_ZCompareAndExchangeP:
 672     case Op_ZCompareAndSwapP:
 673     case Op_ZWeakCompareAndSwapP:
 674 #ifdef ASSERT
 675       if (VerifyOptoOopOffsets) {
 676         MemNode *mem = n->as_Mem();
 677         // Check to see if address types have grounded out somehow.
 678         const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
 679         ciInstanceKlass *k = tp->klass()->as_instance_klass();
 680         bool oop_offset_is_sane = k->contains_field_offset(tp->offset());
 681         assert(!tp || oop_offset_is_sane, "");
 682       }
 683 #endif
 684       return true;
 685     default:
 686       return false;
 687   }
 688 }
 689 
 690 bool ZBarrierSetC2::matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const {
 691   switch(opcode) {
 692     case Op_CallLeaf:
 693       if (n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() ||
 694           n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr()) {
 695         mem_op = true;
 696         mem_addr_idx = TypeFunc::Parms + 1;
 697         return true;
 698       }
 699       return false;
 700     default:
 701       return false;
 702   }
 703 }
 704 
 705 bool ZBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const {
 706   switch(opcode) {
 707     case Op_ZCompareAndExchangeP:
 708     case Op_ZCompareAndSwapP:
 709     case Op_ZWeakCompareAndSwapP: {
 710       Node *mem = n->in(MemNode::Address);
 711       Node *keepalive = n->in(5);
 712       Node *pair1 = new BinaryNode(mem, keepalive);
 713 
 714       Node *newval = n->in(MemNode::ValueIn);
 715       Node *oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
 716       Node *pair2 = new BinaryNode(oldval, newval);
 717 
 718       n->set_req(MemNode::Address, pair1);
 719       n->set_req(MemNode::ValueIn, pair2);
 720       n->del_req(5);
 721       n->del_req(LoadStoreConditionalNode::ExpectedIn);
 722       return true;
 723     }
 724     case Op_ZGetAndSetP: {
 725       Node *keepalive = n->in(4);
 726       Node *newval = n->in(MemNode::ValueIn);
 727       Node *pair = new BinaryNode(newval, keepalive);
 728       n->set_req(MemNode::ValueIn, pair);
 729       n->del_req(4);
 730       return true;
 731     }
 732 
 733     default:
 734       return false;
 735   }
 736 }
 737 
 738 // == Verification ==
 739 
 740 #ifdef ASSERT
 741 
 742 static void verify_slippery_safepoints_internal(Node* ctrl) {
 743   // Given a CFG node, make sure it does not contain both safepoints and loads
 744   // that have expanded barriers.
 745   bool found_safepoint = false;
 746   bool found_load = false;
 747 
 748   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
 749     Node* node = ctrl->fast_out(i);
 750     if (node->in(0) != ctrl) {
 751       // Skip outgoing precedence edges from ctrl.
 752       continue;
 753     }
 754     if (node->is_SafePoint()) {
 755       found_safepoint = true;
 756     }
 757     if (node->is_Load() && load_require_barrier(node->as_Load()) &&
 758         load_has_expanded_barrier(node->as_Load())) {
 759       found_load = true;
 760     }
 761   }
 762   assert(!found_safepoint || !found_load, "found load and safepoint in same block");
 763 }
 764 
 765 static void verify_slippery_safepoints(Compile* C) {
 766   ResourceArea *area = Thread::current()->resource_area();
 767   Unique_Node_List visited(area);
 768   Unique_Node_List checked(area);
 769 
 770   // Recursively walk the graph.
 771   visited.push(C->root());
 772   while (visited.size() > 0) {
 773     Node* node = visited.pop();
 774 
 775     Node* ctrl = node;
 776     if (!node->is_CFG()) {
 777       ctrl = node->in(0);
 778     }
 779 
 780     if (ctrl != NULL && !checked.member(ctrl)) {
 781       // For each block found in the graph, verify that it does not
 782       // contain both a safepoint and a load requiring barriers.
 783       verify_slippery_safepoints_internal(ctrl);
 784 
 785       checked.push(ctrl);
 786     }
 787 
 788     checked.push(node);
 789 
 790     for (DUIterator_Fast imax, i = node->fast_outs(imax); i < imax; i++) {
 791       Node* use = node->fast_out(i);
 792       if (checked.member(use))  continue;
 793       if (visited.member(use))  continue;
 794       visited.push(use);
 795     }
 796   }
 797 }
 798 
 799 void ZBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
 800   switch(phase) {
 801     case BarrierSetC2::BeforeOptimize:
 802     case BarrierSetC2::BeforeLateInsertion:
 803       assert(state()->load_barrier_count() == 0, "No barriers inserted yet");
 804       break;
 805     case BarrierSetC2::BeforeMacroExpand:
 806       // Barrier placement should be set by now.
 807       verify_gc_barriers(false /*post_parse*/);
 808       break;
 809     case BarrierSetC2::BeforeCodeGen:
 810       // Barriers has been fully expanded.
 811       assert(state()->load_barrier_count() == 0, "No more macro barriers");
 812       verify_slippery_safepoints(compile);
 813       break;
 814     default:
 815       assert(0, "Phase without verification");
 816   }
 817 }
 818 
 819 // post_parse implies that there might be load barriers without uses after parsing
 820 // That only applies when adding barriers at parse time.
 821 void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
 822   ZBarrierSetC2State* s = state();
 823   Compile* C = Compile::current();
 824   ResourceMark rm;
 825   VectorSet visited(Thread::current()->resource_area());
 826 
 827   for (int i = 0; i < s->load_barrier_count(); i++) {
 828     LoadBarrierNode* n = s->load_barrier_node(i);
 829 
 830     // The dominating barrier on the same address if it exists and
 831     // this barrier must not be applied on the value from the same
 832     // load otherwise the value is not reloaded before it's used the
 833     // second time.
 834     assert(n->in(LoadBarrierNode::Similar)->is_top() ||
 835            (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
 836             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Address) == n->in(LoadBarrierNode::Address) &&
 837             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Oop) != n->in(LoadBarrierNode::Oop)),
 838            "broken similar edge");
 839 
 840     assert(n->as_LoadBarrier()->has_true_uses(),
 841            "found unneeded load barrier");
 842 
 843     // Several load barrier nodes chained through their Similar edge
 844     // break the code that remove the barriers in final graph reshape.
 845     assert(n->in(LoadBarrierNode::Similar)->is_top() ||
 846            (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
 847             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Similar)->is_top()),
 848            "chain of Similar load barriers");
 849 
 850     if (!n->in(LoadBarrierNode::Similar)->is_top()) {
 851       ResourceMark rm;
 852       Unique_Node_List wq;
 853       Node* other = n->in(LoadBarrierNode::Similar)->in(0);
 854       wq.push(n);
 855       for (uint next = 0; next < wq.size(); ++next) {
 856         Node *nn = wq.at(next);
 857         assert(nn->is_CFG(), "");
 858         assert(!nn->is_SafePoint(), "");
 859 
 860         if (nn == other) {
 861           continue;
 862         }
 863 
 864         if (nn->is_Region()) {
 865           for (uint i = 1; i < nn->req(); i++) {
 866             Node* m = nn->in(i);
 867             if (m != NULL) {
 868               wq.push(m);
 869             }
 870           }
 871         } else {
 872           Node* m = nn->in(0);
 873           if (m != NULL) {
 874             wq.push(m);
 875           }
 876         }
 877       }
 878     }
 879   }
 880 }
 881 
 882 #endif // end verification code
 883 
 884 static void call_catch_cleanup_one(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl);
 885 
 886 // This code is cloning all uses of a load that is between a call and the catch blocks,
 887 // to each use.
 888 
 889 static bool fixup_uses_in_catch(PhaseIdealLoop *phase, Node *start_ctrl, Node *node) {
 890 
 891   if (!phase->has_ctrl(node)) {
 892     // This node is floating - doesn't need to be cloned.
 893     assert(node != start_ctrl, "check");
 894     return false;
 895   }
 896 
 897   Node* ctrl = phase->get_ctrl(node);
 898   if (ctrl != start_ctrl) {
 899     // We are in a successor block - the node is ok.
 900     return false; // Unwind
 901   }
 902 
 903   // Process successor nodes
 904   int outcnt = node->outcnt();
 905   for (int i = 0; i < outcnt; i++) {
 906     Node* n = node->raw_out(0);
 907     assert(!n->is_LoadBarrier(), "Sanity");
 908     // Calling recursively, visiting leafs first
 909     fixup_uses_in_catch(phase, start_ctrl, n);
 910   }
 911 
 912   // Now all successors are outside
 913   // - Clone this node to both successors
 914   int no_succs = node->outcnt();
 915   assert(!node->is_Store(), "Stores not expected here");
 916 
 917   // In some very rare cases a load that doesn't need a barrier will end up here
 918   // Treat it as a LoadP and the insertion of phis will be done correctly.
 919   if (node->is_Load()) {
 920     call_catch_cleanup_one(phase, node->as_Load(), phase->get_ctrl(node));
 921   } else {
 922     for (DUIterator_Fast jmax, i = node->fast_outs(jmax); i < jmax; i++) {
 923       Node* use = node->fast_out(i);
 924       Node* clone = node->clone();
 925       assert(clone->outcnt() == 0, "");
 926 
 927       assert(use->find_edge(node) != -1, "check");
 928       phase->igvn().rehash_node_delayed(use);
 929       use->replace_edge(node, clone);
 930 
 931       Node* new_ctrl;
 932       if (use->is_block_start()) {
 933         new_ctrl = use;
 934       } else if (use->is_CFG()) {
 935         new_ctrl = use->in(0);
 936         assert (new_ctrl != NULL, "");
 937       } else {
 938         new_ctrl = phase->get_ctrl(use);
 939       }
 940 
 941       phase->set_ctrl(clone, new_ctrl);
 942 
 943       if (phase->C->directive()->ZTraceLoadBarriersOption) tty->print_cr("  Clone op %i as %i to control %i", node->_idx, clone->_idx, new_ctrl->_idx);
 944       phase->igvn().register_new_node_with_optimizer(clone);
 945       --i, --jmax;
 946     }
 947     assert(node->outcnt() == 0, "must be empty now");
 948 
 949     // Node node is dead.
 950     phase->igvn().remove_dead_node(node);
 951   }
 952   return true; // unwind - return if a use was processed
 953 }
 954 
 955 // Clone a load to a specific catch_proj
 956 static Node* clone_load_to_catchproj(PhaseIdealLoop* phase, Node* load, Node* catch_proj) {
 957   Node* cloned_load = load->clone();
 958   cloned_load->set_req(0, catch_proj);      // set explicit control
 959   phase->set_ctrl(cloned_load, catch_proj); // update
 960   if (phase->C->directive()->ZTraceLoadBarriersOption) tty->print_cr("  Clone LOAD %i as %i to control %i", load->_idx, cloned_load->_idx, catch_proj->_idx);
 961   phase->igvn().register_new_node_with_optimizer(cloned_load);
 962   return cloned_load;
 963 }
 964 
 965 static Node* get_dominating_region(PhaseIdealLoop* phase, Node* node, Node* stop) {
 966   Node* region = node;
 967   while (!region->isa_Region()) {
 968     Node *up = phase->idom(region);
 969     assert(up != region, "Must not loop");
 970     assert(up != stop,   "Must not find original control");
 971     region = up;
 972   }
 973   return region;
 974 }
 975 
 976 // Clone this load to each catch block
 977 static void call_catch_cleanup_one(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl) {
 978   bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
 979   phase->igvn().set_delay_transform(true);
 980 
 981   // Verify pre conditions
 982   assert(ctrl->isa_Proj() && ctrl->in(0)->isa_Call(), "Must be a call proj");
 983   assert(ctrl->raw_out(0)->isa_Catch(), "Must be a catch");
 984 
 985   if (ctrl->raw_out(0)->isa_Catch()->outcnt() == 1) {
 986     if (trace) tty->print_cr("Cleaning up catch: Skipping load %i, call with single catch", load->_idx);
 987     return;
 988   }
 989 
 990   // Process the loads successor nodes - if any is between
 991   // the call and the catch blocks, they need to be cloned to.
 992   // This is done recursively
 993   int outcnt = load->outcnt();
 994   uint index = 0;
 995   for (int i = 0; i < outcnt; i++) {
 996     if (index < load->outcnt()) {
 997       Node *n = load->raw_out(index);
 998       assert(!n->is_LoadBarrier(), "Sanity");
 999       if (!fixup_uses_in_catch(phase, ctrl, n)) {
1000         // if no successor was cloned, progress to next out.
1001         index++;
1002       }
1003     }
1004   }
1005 
1006   // Now all the loads uses has been cloned down
1007   // Only thing left is to clone the loads, but they must end up
1008   // first in the catch blocks.
1009 
1010   // We clone the loads oo the catch blocks only when needed.
1011   // An array is used to map the catch blocks to each lazily cloned load.
1012   // In that way no extra unnecessary loads are cloned.
1013 
1014   // Any use dominated by original block must have an phi and a region added
1015 
1016   Node* catch_node = ctrl->raw_out(0);
1017   int number_of_catch_projs = catch_node->outcnt();
1018   Node** proj_to_load_mapping = NEW_RESOURCE_ARRAY(Node*, number_of_catch_projs);
1019   Copy::zero_to_bytes(proj_to_load_mapping, sizeof(Node*) * number_of_catch_projs);
1020 
1021   // The phi_map is used to keep track of where phis have already been inserted
1022   int phi_map_len = phase->C->unique();
1023   Node** phi_map = NEW_RESOURCE_ARRAY(Node*, phi_map_len);
1024   Copy::zero_to_bytes(phi_map, sizeof(Node*) * phi_map_len);
1025 
1026   for (unsigned int i = 0; i  < load->outcnt(); i++) {
1027     Node* load_use_control = NULL;
1028     Node* load_use = load->raw_out(i);
1029 
1030     if (phase->has_ctrl(load_use)) {
1031       load_use_control = phase->get_ctrl(load_use);
1032     } else {
1033       load_use_control = load_use->in(0);
1034     }
1035     assert(load_use_control != NULL, "sanity");
1036     if (trace) tty->print_cr("  Handling use: %i, with control: %i", load_use->_idx, load_use_control->_idx);
1037 
1038     // Some times the loads use is a phi. For them we need to determine from which catch block
1039     // the use is defined.
1040     bool load_use_is_phi = false;
1041     unsigned int load_use_phi_index = 0;
1042     Node* phi_ctrl = NULL;
1043     if (load_use->is_Phi()) {
1044       // Find phi input that matches load
1045       for (unsigned int u = 1; u < load_use->req(); u++) {
1046         if (load_use->in(u) == load) {
1047           load_use_is_phi = true;
1048           load_use_phi_index = u;
1049           assert(load_use->in(0)->is_Region(), "Region or broken");
1050           phi_ctrl = load_use->in(0)->in(u);
1051           assert(phi_ctrl->is_CFG(), "check");
1052           assert(phi_ctrl != load,   "check");
1053           break;
1054         }
1055       }
1056       assert(load_use_is_phi,        "must find");
1057       assert(load_use_phi_index > 0, "sanity");
1058     }
1059 
1060     // For each load use, see which catch projs dominates, create load clone lazily and reconnect
1061     bool found_dominating_catchproj = false;
1062     for (int c = 0; c < number_of_catch_projs; c++) {
1063       Node* catchproj = catch_node->raw_out(c);
1064       assert(catchproj != NULL && catchproj->isa_CatchProj(), "Sanity");
1065 
1066       if (!phase->is_dominator(catchproj, load_use_control)) {
1067         if (load_use_is_phi && phase->is_dominator(catchproj, phi_ctrl)) {
1068           // The loads use is local to the catchproj.
1069           // fall out and replace load with catch-local load clone.
1070         } else {
1071           continue;
1072         }
1073       }
1074       assert(!found_dominating_catchproj, "Max one should match");
1075 
1076       // Clone loads to catch projs
1077       Node* load_clone = proj_to_load_mapping[c];
1078       if (load_clone == NULL) {
1079         load_clone = clone_load_to_catchproj(phase, load, catchproj);
1080         proj_to_load_mapping[c] = load_clone;
1081       }
1082       phase->igvn().rehash_node_delayed(load_use);
1083 
1084       if (load_use_is_phi) {
1085         // phis are special - the load is defined from a specific control flow
1086         load_use->set_req(load_use_phi_index, load_clone);
1087       } else {
1088         // Multipe edges can be replaced at once - on calls for example
1089         load_use->replace_edge(load, load_clone);
1090       }
1091       --i; // more than one edge can have been removed, but the next is in later iterations
1092 
1093       // We could break the for-loop after finding a dominating match.
1094       // But keep iterating to catch any bad idom early.
1095       found_dominating_catchproj = true;
1096     }
1097 
1098     // We found no single catchproj that dominated the use - The use is at a point after
1099     // where control flow from multiple catch projs have merged. We will have to create
1100     // phi nodes before the use and tie the output from the cloned loads together. It
1101     // can be a single phi or a number of chained phis, depending on control flow
1102     if (!found_dominating_catchproj) {
1103 
1104       // Use phi-control if use is a phi
1105       if (load_use_is_phi) {
1106         load_use_control = phi_ctrl;
1107       }
1108       assert(phase->is_dominator(ctrl, load_use_control), "Common use but no dominator");
1109 
1110       // Clone a load on all paths
1111       for (int c = 0; c < number_of_catch_projs; c++) {
1112         Node* catchproj = catch_node->raw_out(c);
1113         Node* load_clone = proj_to_load_mapping[c];
1114         if (load_clone == NULL) {
1115           load_clone = clone_load_to_catchproj(phase, load, catchproj);
1116           proj_to_load_mapping[c] = load_clone;
1117         }
1118       }
1119 
1120       // Move up dominator tree from use until dom front is reached
1121       Node* next_region = get_dominating_region(phase, load_use_control, ctrl);
1122       while (phase->idom(next_region) != catch_node) {
1123         next_region = phase->idom(next_region);
1124         if (trace) tty->print_cr("Moving up idom to region ctrl %i", next_region->_idx);
1125       }
1126       assert(phase->is_dominator(catch_node, next_region), "Sanity");
1127 
1128       // Create or reuse phi node that collect all cloned loads and feed it to the use.
1129       Node* test_phi = phi_map[next_region->_idx];
1130       if ((test_phi != NULL) && test_phi->is_Phi()) {
1131         // Reuse an already created phi
1132         if (trace) tty->print_cr("    Using cached Phi %i on load_use %i", test_phi->_idx, load_use->_idx);
1133         phase->igvn().rehash_node_delayed(load_use);
1134         load_use->replace_edge(load, test_phi);
1135         // Now this use is done
1136       } else {
1137         // Otherwise we need to create one or more phis
1138         PhiNode* next_phi = new PhiNode(next_region, load->type());
1139         phi_map[next_region->_idx] = next_phi; // cache new phi
1140         phase->igvn().rehash_node_delayed(load_use);
1141         load_use->replace_edge(load, next_phi);
1142 
1143         int dominators_of_region = 0;
1144         do {
1145           // New phi, connect to region and add all loads as in.
1146           Node* region = next_region;
1147           assert(region->isa_Region() && region->req() > 2, "Catch dead region nodes");
1148           PhiNode* new_phi = next_phi;
1149 
1150           if (trace) tty->print_cr("Created Phi %i on load %i with control %i", new_phi->_idx, load->_idx, region->_idx);
1151 
1152           // Need to add all cloned loads to the phi, taking care that the right path is matched
1153           dominators_of_region = 0; // reset for new region
1154           for (unsigned int reg_i = 1; reg_i < region->req(); reg_i++) {
1155             Node* region_pred = region->in(reg_i);
1156             assert(region_pred->is_CFG(), "check");
1157             bool pred_has_dominator = false;
1158             for (int c = 0; c < number_of_catch_projs; c++) {
1159               Node* catchproj = catch_node->raw_out(c);
1160               if (phase->is_dominator(catchproj, region_pred)) {
1161                 new_phi->set_req(reg_i, proj_to_load_mapping[c]);
1162                 if (trace) tty->print_cr(" - Phi in(%i) set to load %i", reg_i, proj_to_load_mapping[c]->_idx);
1163                 pred_has_dominator = true;
1164                 dominators_of_region++;
1165                 break;
1166               }
1167             }
1168 
1169             // Sometimes we need to chain several phis.
1170             if (!pred_has_dominator) {
1171               assert(dominators_of_region <= 1, "More than one region can't require extra phi");
1172               if (trace) tty->print_cr(" - Region %i pred %i not dominated by catch proj", region->_idx, region_pred->_idx);
1173               // Continue search on on this region_pred
1174               // - walk up to next region
1175               // - create a new phi and connect to first new_phi
1176               next_region = get_dominating_region(phase, region_pred, ctrl);
1177 
1178               // Lookup if there already is a phi, create a new otherwise
1179               Node* test_phi = phi_map[next_region->_idx];
1180               if ((test_phi != NULL) && test_phi->is_Phi()) {
1181                 next_phi = test_phi->isa_Phi();
1182                 dominators_of_region++; // record that a match was found and that we are done
1183                 if (trace) tty->print_cr("    Using cached phi Phi %i on control %i", next_phi->_idx, next_region->_idx);
1184               } else {
1185                 next_phi = new PhiNode(next_region, load->type());
1186                 phi_map[next_region->_idx] = next_phi;
1187               }
1188               new_phi->set_req(reg_i, next_phi);
1189             }
1190           }
1191 
1192           new_phi->set_req(0, region);
1193           phase->igvn().register_new_node_with_optimizer(new_phi);
1194           phase->set_ctrl(new_phi, region);
1195 
1196           assert(dominators_of_region != 0, "Must have found one this iteration");
1197         } while (dominators_of_region == 1);
1198       }
1199       --i;
1200     }
1201   } // end of loop over uses
1202 
1203   assert(load->outcnt() == 0, "All uses should be handled");
1204   phase->igvn().remove_dead_node(load);
1205   phase->C->print_method(PHASE_CALL_CATCH_CLEANUP, 4, load->_idx);
1206 
1207   // Now we should be home
1208   phase->igvn().set_delay_transform(false);
1209 }
1210 
1211 // Sort out the loads that are between a call ant its catch blocks
1212 static void process_catch_cleanup_candidate(PhaseIdealLoop* phase, LoadNode* load) {
1213   bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
1214 
1215   Node* ctrl = phase->get_ctrl(load);
1216   if (!ctrl->is_Proj() || (ctrl->in(0) == NULL) || !ctrl->in(0)->isa_Call()) {
1217     return;
1218   }
1219 
1220   Node* catch_node = ctrl->isa_Proj()->raw_out(0);
1221   if (catch_node->is_Catch()) {
1222     if (catch_node->outcnt() > 1) {
1223       call_catch_cleanup_one(phase, load, ctrl);
1224     } else {
1225       if (trace) tty->print_cr("Call catch cleanup with only one catch: load %i ", load->_idx);
1226     }
1227   }
1228 }
1229 
1230 void ZBarrierSetC2::barrier_insertion_phase(Compile* C, PhaseIterGVN& igvn) const {
1231   PhaseIdealLoop::optimize(igvn, LoopOptsZBarrierInsertion);
1232   if (C->failing())  return;
1233 }
1234 
1235 bool ZBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const {
1236 
1237   if (mode == LoopOptsZBarrierInsertion) {
1238     // First make sure all loads between call and catch are moved to the catch block
1239     clean_catch_blocks(phase);
1240 
1241     // Then expand barriers on all loads
1242     insert_load_barriers(phase);
1243 
1244     // Handle all Unsafe that need barriers.
1245     insert_barriers_on_unsafe(phase);
1246 
1247     phase->C->clear_major_progress();
1248     return true;
1249   } else {
1250     return false;
1251   }
1252 }
1253 
1254 static bool can_simplify_cas(LoadStoreNode* node) {
1255   if (node->isa_LoadStoreConditional()) {
1256     Node *expected_in = node->as_LoadStoreConditional()->in(LoadStoreConditionalNode::ExpectedIn);
1257     return (expected_in->get_ptr_type() == TypePtr::NULL_PTR);
1258   } else {
1259     return false;
1260   }
1261 }
1262 
1263 static void insert_barrier_before_unsafe(PhaseIdealLoop* phase, LoadStoreNode* old_node) {
1264 
1265   Compile *C = phase->C;
1266   PhaseIterGVN &igvn = phase->igvn();
1267   LoadStoreNode* zclone = NULL;
1268 
1269   Node *in_ctrl = old_node->in(MemNode::Control);
1270   Node *in_mem  = old_node->in(MemNode::Memory);
1271   Node *in_adr  = old_node->in(MemNode::Address);
1272   Node *in_val  = old_node->in(MemNode::ValueIn);
1273   const TypePtr *adr_type = old_node->adr_type();
1274   const TypePtr* load_type = TypeOopPtr::BOTTOM; // The type for the load we are adding
1275 
1276   switch (old_node->Opcode()) {
1277     case Op_CompareAndExchangeP: {
1278       zclone = new ZCompareAndExchangePNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn),
1279               adr_type, old_node->get_ptr_type(), ((CompareAndExchangeNode*)old_node)->order());
1280       load_type = old_node->bottom_type()->is_ptr();
1281       break;
1282     }
1283     case Op_WeakCompareAndSwapP: {
1284       if (can_simplify_cas(old_node)) {
1285         break;
1286       }
1287       zclone = new ZWeakCompareAndSwapPNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn),
1288               ((CompareAndSwapNode*)old_node)->order());
1289       adr_type = TypePtr::BOTTOM;
1290       break;
1291     }
1292     case Op_CompareAndSwapP: {
1293       if (can_simplify_cas(old_node)) {
1294         break;
1295       }
1296       zclone = new ZCompareAndSwapPNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn),
1297               ((CompareAndSwapNode*)old_node)->order());
1298       adr_type = TypePtr::BOTTOM;
1299       break;
1300     }
1301     case Op_GetAndSetP: {
1302       zclone = new ZGetAndSetPNode(in_ctrl, in_mem, in_adr, in_val, old_node->adr_type(), old_node->get_ptr_type());
1303       load_type = old_node->bottom_type()->is_ptr();
1304       break;
1305     }
1306   }
1307   if (zclone != NULL) {
1308     igvn.register_new_node_with_optimizer(zclone, old_node);
1309 
1310     // Make load
1311     LoadPNode *load = new LoadPNode(NULL, in_mem, in_adr, adr_type, load_type, MemNode::unordered,
1312                                     LoadNode::DependsOnlyOnTest);
1313     load_set_expanded_barrier(load);
1314     igvn.register_new_node_with_optimizer(load);
1315     igvn.replace_node(old_node, zclone);
1316 
1317     Node *barrier = new LoadBarrierNode(C, NULL, in_mem, load, in_adr, false /* weak */);
1318     Node *barrier_val = new ProjNode(barrier, LoadBarrierNode::Oop);
1319     Node *barrier_ctrl = new ProjNode(barrier, LoadBarrierNode::Control);
1320 
1321     igvn.register_new_node_with_optimizer(barrier);
1322     igvn.register_new_node_with_optimizer(barrier_val);
1323     igvn.register_new_node_with_optimizer(barrier_ctrl);
1324 
1325     // loop over all of in_ctrl usages and move to barrier_ctrl
1326     for (DUIterator_Last imin, i = in_ctrl->last_outs(imin); i >= imin; --i) {
1327       Node *use = in_ctrl->last_out(i);
1328       uint l;
1329       for (l = 0; use->in(l) != in_ctrl; l++) {}
1330       igvn.replace_input_of(use, l, barrier_ctrl);
1331     }
1332 
1333     load->set_req(MemNode::Control, in_ctrl);
1334     barrier->set_req(LoadBarrierNode::Control, in_ctrl);
1335     zclone->add_req(barrier_val); // add req as keep alive.
1336 
1337     C->print_method(PHASE_ADD_UNSAFE_BARRIER, 4, zclone->_idx);
1338   }
1339 }
1340 
1341 void ZBarrierSetC2::insert_barriers_on_unsafe(PhaseIdealLoop* phase) const {
1342   Compile *C = phase->C;
1343   PhaseIterGVN &igvn = phase->igvn();
1344   uint new_ids = C->unique();
1345   VectorSet visited(Thread::current()->resource_area());
1346   GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL);
1347   nodeStack.push(C->root());
1348   visited.test_set(C->root()->_idx);
1349 
1350   // Traverse all nodes, visit all unsafe ops that require a barrier
1351   while (nodeStack.length() > 0) {
1352     Node *n = nodeStack.pop();
1353 
1354     bool is_old_node = (n->_idx < new_ids); // don't process nodes that were created during cleanup
1355     if (is_old_node) {
1356       if (n->is_LoadStore()) {
1357         LoadStoreNode* lsn = n->as_LoadStore();
1358         if (lsn->has_barrier()) {
1359           BasicType bt = lsn->in(MemNode::Address)->bottom_type()->basic_type();
1360           assert ((bt == T_OBJECT || bt == T_ARRAY), "Sanity test");
1361           insert_barrier_before_unsafe(phase, lsn);
1362         }
1363       }
1364     }
1365     for (uint i = 0; i < n->len(); i++) {
1366       if (n->in(i)) {
1367         if (!visited.test_set(n->in(i)->_idx)) {
1368           nodeStack.push(n->in(i));
1369         }
1370       }
1371     }
1372   }
1373 
1374   igvn.optimize();
1375   C->print_method(PHASE_ADD_UNSAFE_BARRIER, 2);
1376 }
1377 
1378 // The purpose of ZBarrierSetC2::clean_catch_blocks is to prepare the IR for
1379 // splicing in load barrier nodes.
1380 //
1381 // The problem is that we might have instructions between a call and its catch nodes.
1382 // (This is usually handled in PhaseCFG:call_catch_cleanup, which clones mach nodes in
1383 // already scheduled blocks.) We can't have loads that require barriers there,
1384 // because we need to splice in new control flow, and that would violate the IR.
1385 //
1386 // clean_catch_blocks find all Loads that require a barrier and clone them and any
1387 // dependent instructions to each use. The loads must be in the beginning of the catch block
1388 // before any store.
1389 //
1390 // Sometimes the loads use will be at a place dominated by all catch blocks, then we need
1391 // a load in each catch block, and a Phi at the dominated use.
1392 
1393 void ZBarrierSetC2::clean_catch_blocks(PhaseIdealLoop* phase) const {
1394 
1395   Compile *C = phase->C;
1396   uint new_ids = C->unique();
1397   PhaseIterGVN &igvn = phase->igvn();
1398   VectorSet visited(Thread::current()->resource_area());
1399   GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL);
1400   nodeStack.push(C->root());
1401   visited.test_set(C->root()->_idx);
1402 
1403   // Traverse all nodes, visit all loads that require a barrier
1404   while(nodeStack.length() > 0) {
1405     Node *n = nodeStack.pop();
1406 
1407     for (uint i = 0; i < n->len(); i++) {
1408       if (n->in(i)) {
1409         if (!visited.test_set(n->in(i)->_idx)) {
1410           nodeStack.push(n->in(i));
1411         }
1412       }
1413     }
1414 
1415     bool is_old_node = (n->_idx < new_ids); // don't process nodes that were created during cleanup
1416     if (n->is_Load() && is_old_node) {
1417       LoadNode* load = n->isa_Load();
1418       // only care about loads that will have a barrier
1419       if (load_require_barrier(load)) {
1420         process_catch_cleanup_candidate(phase, load);
1421       }
1422     }
1423   }
1424 
1425   C->print_method(PHASE_CALL_CATCH_CLEANUP, 2);
1426 }
1427 
1428 class DomDepthCompareClosure : public CompareClosure<LoadNode*> {
1429   PhaseIdealLoop* _phase;
1430 
1431 public:
1432   DomDepthCompareClosure(PhaseIdealLoop* phase) : _phase(phase) { }
1433 
1434   int do_compare(LoadNode* const &n1, LoadNode* const &n2) {
1435     int d1 = _phase->dom_depth(_phase->get_ctrl(n1));
1436     int d2 = _phase->dom_depth(_phase->get_ctrl(n2));
1437     if (d1 == d2) {
1438       // Compare index if the depth is the same, ensures all entries are unique.
1439       return n1->_idx - n2->_idx;
1440     } else {
1441       return d2 - d1;
1442     }
1443   }
1444 };
1445 
1446 // Traverse graph and add all loadPs to list, sorted by dom depth
1447 void gather_loadnodes_sorted(PhaseIdealLoop* phase, GrowableArray<LoadNode*>* loadList) {
1448 
1449   VectorSet visited(Thread::current()->resource_area());
1450   GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL);
1451   DomDepthCompareClosure ddcc(phase);
1452 
1453   nodeStack.push(phase->C->root());
1454   while(nodeStack.length() > 0) {
1455     Node *n = nodeStack.pop();
1456     if (visited.test(n->_idx)) {
1457       continue;
1458     }
1459 
1460     if (n->isa_Load()) {
1461       LoadNode *load = n->as_Load();
1462       if (load_require_barrier(load)) {
1463         assert(phase->get_ctrl(load) != NULL, "sanity");
1464         assert(phase->dom_depth(phase->get_ctrl(load)) != 0, "sanity");
1465         loadList->insert_sorted(&ddcc, load);
1466       }
1467     }
1468 
1469     visited.set(n->_idx);
1470     for (uint i = 0; i < n->req(); i++) {
1471       if (n->in(i)) {
1472         if (!visited.test(n->in(i)->_idx)) {
1473           nodeStack.push(n->in(i));
1474         }
1475       }
1476     }
1477   }
1478 }
1479 
1480 // Add LoadBarriers to all LoadPs
1481 void ZBarrierSetC2::insert_load_barriers(PhaseIdealLoop* phase) const {
1482 
1483   bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
1484   GrowableArray<LoadNode *> loadList(Thread::current()->resource_area(), 0, 0, NULL);
1485   gather_loadnodes_sorted(phase, &loadList);
1486 
1487   PhaseIterGVN &igvn = phase->igvn();
1488   int count = 0;
1489 
1490   for (GrowableArrayIterator<LoadNode *> loadIter = loadList.begin(); loadIter != loadList.end(); ++loadIter) {
1491     LoadNode *load = *loadIter;
1492 
1493     if (load_has_expanded_barrier(load)) {
1494       continue;
1495     }
1496 
1497     do {
1498       // Insert a barrier on a loadP
1499       // if another load is found that needs to be expanded first, retry on that one
1500       LoadNode* result = insert_one_loadbarrier(phase, load, phase->get_ctrl(load));
1501       while (result != NULL) {
1502         result = insert_one_loadbarrier(phase, result, phase->get_ctrl(result));
1503       }
1504     } while (!load_has_expanded_barrier(load));
1505   }
1506 
1507   phase->C->print_method(PHASE_INSERT_BARRIER, 2);
1508 }
1509 
1510 void push_antidependent_stores(PhaseIdealLoop* phase, Node_Stack& nodestack, LoadNode* start_load) {
1511   // push all stores on the same mem, that can_alias
1512   // Any load found must be handled first
1513   PhaseIterGVN &igvn = phase->igvn();
1514   int load_alias_idx = igvn.C->get_alias_index(start_load->adr_type());
1515 
1516   Node *mem = start_load->in(1);
1517   for (DUIterator_Fast imax, u = mem->fast_outs(imax); u < imax; u++) {
1518     Node *mem_use = mem->fast_out(u);
1519 
1520     if (mem_use == start_load) continue;
1521     if (!mem_use->is_Store()) continue;
1522     if (!phase->has_ctrl(mem_use)) continue;
1523     if (phase->get_ctrl(mem_use) != phase->get_ctrl(start_load)) continue;
1524 
1525     // add any aliasing store in this block
1526     StoreNode *store = mem_use->isa_Store();
1527     const TypePtr *adr_type = store->adr_type();
1528     if (igvn.C->can_alias(adr_type, load_alias_idx)) {
1529       nodestack.push(store, 0);
1530     }
1531   }
1532 }
1533 
1534 LoadNode* ZBarrierSetC2::insert_one_loadbarrier(PhaseIdealLoop* phase, LoadNode* start_load, Node* ctrl) const {
1535   bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
1536   PhaseIterGVN &igvn = phase->igvn();
1537 
1538   // Check for other loadPs at the same loop depth that is reachable by a DFS
1539   // - if found - return it. It needs to be inserted first
1540   // - otherwise proceed and insert barrier
1541 
1542   VectorSet visited(Thread::current()->resource_area());
1543   Node_Stack nodestack(100);
1544 
1545   nodestack.push(start_load, 0);
1546   push_antidependent_stores(phase, nodestack, start_load);
1547 
1548   while(!nodestack.is_empty()) {
1549     Node* n = nodestack.node(); // peek
1550     nodestack.pop();
1551     if (visited.test(n->_idx)) {
1552       continue;
1553     }
1554 
1555     if (n->is_Load() && n != start_load && load_require_barrier(n->as_Load()) && !load_has_expanded_barrier(n->as_Load())) {
1556       // Found another load that needs a barrier in the same block. Must expand later loads first.
1557       if (trace) tty->print_cr(" * Found LoadP %i on DFS", n->_idx);
1558       return n->as_Load(); // return node that should be expanded first
1559     }
1560 
1561     if (!phase->has_ctrl(n)) continue;
1562     if (phase->get_ctrl(n) != phase->get_ctrl(start_load)) continue;
1563     if (n->is_Phi()) continue;
1564 
1565     visited.set(n->_idx);
1566     // push all children
1567     for (DUIterator_Fast imax, ii = n->fast_outs(imax); ii < imax; ii++) {
1568       Node* c = n->fast_out(ii);
1569       if (c != NULL) {
1570         nodestack.push(c, 0);
1571       }
1572     }
1573   }
1574 
1575   insert_one_loadbarrier_inner(phase, start_load, ctrl, visited);
1576   return NULL;
1577 }
1578 
1579 void ZBarrierSetC2::insert_one_loadbarrier_inner(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl, VectorSet visited2) const {
1580   PhaseIterGVN &igvn = phase->igvn();
1581   Compile* C = igvn.C;
1582   bool trace = C->directive()->ZTraceLoadBarriersOption;
1583 
1584   // create barrier
1585   Node* barrier = new LoadBarrierNode(C, NULL, load->in(LoadNode::Memory), NULL, load->in(LoadNode::Address), load_has_weak_barrier(load));
1586   Node* barrier_val = new ProjNode(barrier, LoadBarrierNode::Oop);
1587   Node* barrier_ctrl = new ProjNode(barrier, LoadBarrierNode::Control);
1588 
1589   if (trace) tty->print_cr("Insert load %i with barrier: %i and ctrl : %i", load->_idx, barrier->_idx, ctrl->_idx);
1590 
1591   // Splice control
1592   // - insert barrier control diamond between loads ctrl and ctrl successor on path to block end.
1593   // - If control successor is a catch, step over to next.
1594   Node* ctrl_succ = NULL;
1595   for (DUIterator_Fast imax, j = ctrl->fast_outs(imax); j < imax; j++) {
1596     Node* tmp = ctrl->fast_out(j);
1597 
1598     // - CFG nodes is the ones we are going to splice (1 only!)
1599     // - Phi nodes will continue to hang from the region node!
1600     // - self loops should be skipped
1601     if (tmp->is_Phi() || tmp == ctrl) {
1602       continue;
1603     }
1604 
1605     if (tmp->is_CFG()) {
1606       assert(ctrl_succ == NULL, "There can be only one");
1607       ctrl_succ = tmp;
1608       continue;
1609     }
1610   }
1611 
1612   // Now splice control
1613   assert(ctrl_succ != load, "sanity");
1614   assert(ctrl_succ != NULL, "Broken IR");
1615   bool found = false;
1616   for(uint k = 0; k < ctrl_succ->req(); k++) {
1617     if (ctrl_succ->in(k) == ctrl) {
1618       assert(!found, "sanity");
1619       if (trace) tty->print_cr(" Move CFG ctrl_succ %i to barrier_ctrl", ctrl_succ->_idx);
1620       igvn.replace_input_of(ctrl_succ, k, barrier_ctrl);
1621       found = true;
1622       k--;
1623     }
1624   }
1625 
1626   // For all successors of ctrl - move all visited to become successors of barrier_ctrl instead
1627   for (DUIterator_Fast imax, r = ctrl->fast_outs(imax); r < imax; r++) {
1628     Node* tmp = ctrl->fast_out(r);
1629     if (tmp->is_SafePoint() || (visited2.test(tmp->_idx) && (tmp != load))) {
1630       if (trace) tty->print_cr(" Move ctrl_succ %i to barrier_ctrl", tmp->_idx);
1631       igvn.replace_input_of(tmp, 0, barrier_ctrl);
1632       --r; --imax;
1633     }
1634   }
1635 
1636   // Move the loads user to the barrier
1637   for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) {
1638     Node* u = load->fast_out(i);
1639     if (u->isa_LoadBarrier()) {
1640       continue;
1641     }
1642 
1643     // find correct input  - replace with iterator?
1644     for(uint j = 0; j < u->req(); j++) {
1645       if (u->in(j) == load) {
1646         igvn.replace_input_of(u, j, barrier_val);
1647         --i; --imax; // Adjust the iterator of the *outer* loop
1648         break; // some nodes (calls) might have several uses from the same node
1649       }
1650     }
1651   }
1652 
1653   // Connect barrier to load and control
1654   barrier->set_req(LoadBarrierNode::Oop, load);
1655   barrier->set_req(LoadBarrierNode::Control, ctrl);
1656 
1657   igvn.replace_input_of(load, MemNode::Control, ctrl);
1658   load->pin();
1659 
1660   igvn.rehash_node_delayed(load);
1661   igvn.register_new_node_with_optimizer(barrier);
1662   igvn.register_new_node_with_optimizer(barrier_val);
1663   igvn.register_new_node_with_optimizer(barrier_ctrl);
1664   load_set_expanded_barrier(load);
1665 
1666   C->print_method(PHASE_INSERT_BARRIER, 3, load->_idx);
1667 }
1668 
1669 // The bad_mask in the ThreadLocalData shouldn't have an anti-dep-check.
1670 // The bad_mask address if of type TypeRawPtr, but that will alias
1671 // InitializeNodes until the type system is expanded.
1672 bool ZBarrierSetC2::needs_anti_dependence_check(const Node* node) const {
1673   MachNode* mnode = node->as_Mach();
1674   if (mnode != NULL) {
1675     intptr_t offset = 0;
1676     const TypePtr *adr_type2 = NULL;
1677     const Node* base = mnode->get_base_and_disp(offset, adr_type2);
1678     if ((base != NULL) &&
1679         (base->is_Mach() && base->as_Mach()->ideal_Opcode() == Op_ThreadLocal) &&
1680         (offset == in_bytes(ZThreadLocalData::address_bad_mask_offset()))) {
1681       return false;
1682     }
1683   }
1684   return true;
1685 }