1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "opto/castnode.hpp"
  26 #include "opto/compile.hpp"
  27 #include "opto/escape.hpp"
  28 #include "opto/graphKit.hpp"
  29 #include "opto/loopnode.hpp"
  30 #include "opto/machnode.hpp"
  31 #include "opto/macro.hpp"
  32 #include "opto/memnode.hpp"
  33 #include "opto/movenode.hpp"
  34 #include "opto/node.hpp"
  35 #include "opto/phase.hpp"
  36 #include "opto/phaseX.hpp"
  37 #include "opto/rootnode.hpp"
  38 #include "opto/type.hpp"
  39 #include "utilities/copy.hpp"
  40 #include "utilities/growableArray.hpp"
  41 #include "utilities/macros.hpp"
  42 #include "gc/z/zBarrierSet.hpp"
  43 #include "gc/z/c2/zBarrierSetC2.hpp"
  44 #include "gc/z/zThreadLocalData.hpp"
  45 #include "gc/z/zBarrierSetRuntime.hpp"
  46 
  47 ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena) :
  48     _load_barrier_nodes(new (comp_arena) GrowableArray<LoadBarrierNode*>(comp_arena, 8,  0, NULL)) {}
  49 
  50 int ZBarrierSetC2State::load_barrier_count() const {
  51   return _load_barrier_nodes->length();
  52 }
  53 
  54 void ZBarrierSetC2State::add_load_barrier_node(LoadBarrierNode * n) {
  55   assert(!_load_barrier_nodes->contains(n), " duplicate entry in expand list");
  56   _load_barrier_nodes->append(n);
  57 }
  58 
  59 void ZBarrierSetC2State::remove_load_barrier_node(LoadBarrierNode * n) {
  60   // this function may be called twice for a node so check
  61   // that the node is in the array before attempting to remove it
  62   if (_load_barrier_nodes->contains(n)) {
  63     _load_barrier_nodes->remove(n);
  64   }
  65 }
  66 
  67 LoadBarrierNode* ZBarrierSetC2State::load_barrier_node(int idx) const {
  68   return _load_barrier_nodes->at(idx);
  69 }
  70 
  71 void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
  72   return new(comp_arena) ZBarrierSetC2State(comp_arena);
  73 }
  74 
  75 ZBarrierSetC2State* ZBarrierSetC2::state() const {
  76   return reinterpret_cast<ZBarrierSetC2State*>(Compile::current()->barrier_set_state());
  77 }
  78 
  79 bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const {
  80   // 1. This step follows potential oop projections of a load barrier before expansion
  81   if (node->is_Proj()) {
  82     node = node->in(0);
  83   }
  84 
  85   // 2. This step checks for unexpanded load barriers
  86   if (node->is_LoadBarrier()) {
  87     return true;
  88   }
  89 
  90   // 3. This step checks for the phi corresponding to an optimized load barrier expansion
  91   if (node->is_Phi()) {
  92     PhiNode* phi = node->as_Phi();
  93     Node* n = phi->in(1);
  94     if (n != NULL && n->is_LoadBarrierSlowReg()) {
  95       return true;
  96     }
  97   }
  98 
  99   return false;
 100 }
 101 
 102 void ZBarrierSetC2::register_potential_barrier_node(Node* node) const {
 103   if (node->is_LoadBarrier()) {
 104     state()->add_load_barrier_node(node->as_LoadBarrier());
 105   }
 106 }
 107 
 108 void ZBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
 109   if (node->is_LoadBarrier()) {
 110     state()->remove_load_barrier_node(node->as_LoadBarrier());
 111   }
 112 }
 113 
 114 void ZBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {
 115   // Remove useless LoadBarrier nodes
 116   ZBarrierSetC2State* s = state();
 117   for (int i = s->load_barrier_count()-1; i >= 0; i--) {
 118     LoadBarrierNode* n = s->load_barrier_node(i);
 119     if (!useful.member(n)) {
 120       unregister_potential_barrier_node(n);
 121     }
 122   }
 123 }
 124 
 125 void ZBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
 126   if (node->is_LoadBarrier() && !node->as_LoadBarrier()->has_true_uses()) {
 127     igvn->_worklist.push(node);
 128   }
 129 }
 130 
 131 const uint NoBarrier       = 0;
 132 const uint RequireBarrier  = 1;
 133 const uint WeakBarrier     = 2;
 134 const uint ExpandedBarrier = 4;
 135 
 136 static bool load_require_barrier(LoadNode* load)      { return (load->barrier_data() & RequireBarrier)  == RequireBarrier; }
 137 static bool load_has_weak_barrier(LoadNode* load)     { return (load->barrier_data() & WeakBarrier)     == WeakBarrier; }
 138 static bool load_has_expanded_barrier(LoadNode* load) { return (load->barrier_data() & ExpandedBarrier) == ExpandedBarrier; }
 139 static void load_set_expanded_barrier(LoadNode* load) { return load->set_barrier_data(ExpandedBarrier); }
 140 
 141 static void load_set_barrier(LoadNode* load, bool weak) {
 142   if (weak) {
 143     load->set_barrier_data(RequireBarrier | WeakBarrier);
 144   } else {
 145     load->set_barrier_data(RequireBarrier);
 146   }
 147 }
 148 
 149 // == LoadBarrierNode ==
 150 
 151 LoadBarrierNode::LoadBarrierNode(Compile* C,
 152                                  Node* c,
 153                                  Node* mem,
 154                                  Node* val,
 155                                  Node* adr,
 156                                  bool weak) :
 157     MultiNode(Number_of_Inputs),
 158     _weak(weak) {
 159   init_req(Control, c);
 160   init_req(Memory, mem);
 161   init_req(Oop, val);
 162   init_req(Address, adr);
 163   init_req(Similar, C->top());
 164 
 165   init_class_id(Class_LoadBarrier);
 166   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 167   bs->register_potential_barrier_node(this);
 168 }
 169 
 170 uint LoadBarrierNode::size_of() const {
 171   return sizeof(*this);
 172 }
 173 
 174 bool LoadBarrierNode::cmp(const Node& n) const {
 175   ShouldNotReachHere();
 176   return false;
 177 }
 178 
 179 const Type *LoadBarrierNode::bottom_type() const {
 180   const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
 181   Node* in_oop = in(Oop);
 182   floadbarrier[Control] = Type::CONTROL;
 183   floadbarrier[Memory] = Type::MEMORY;
 184   floadbarrier[Oop] = in_oop == NULL ? Type::TOP : in_oop->bottom_type();
 185   return TypeTuple::make(Number_of_Outputs, floadbarrier);
 186 }
 187 
 188 const TypePtr* LoadBarrierNode::adr_type() const {
 189   ShouldNotReachHere();
 190   return NULL;
 191 }
 192 
 193 const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
 194   const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
 195   const Type* val_t = phase->type(in(Oop));
 196   floadbarrier[Control] = Type::CONTROL;
 197   floadbarrier[Memory]  = Type::MEMORY;
 198   floadbarrier[Oop]     = val_t;
 199   return TypeTuple::make(Number_of_Outputs, floadbarrier);
 200 }
 201 
 202 bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n) {
 203   if (phase != NULL) {
 204     return phase->is_dominator(d, n);
 205   }
 206 
 207   for (int i = 0; i < 10 && n != NULL; i++) {
 208     n = IfNode::up_one_dom(n, linear_only);
 209     if (n == d) {
 210       return true;
 211     }
 212   }
 213 
 214   return false;
 215 }
 216 
 217 LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) {
 218   if (is_weak()) {
 219     // Weak barriers can't be eliminated
 220     return NULL;
 221   }
 222 
 223   Node* val = in(LoadBarrierNode::Oop);
 224   if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) {
 225     LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier();
 226     assert(lb->in(Address) == in(Address), "");
 227     // Load barrier on Similar edge dominates so if it now has the Oop field it can replace this barrier.
 228     if (lb->in(Oop) == in(Oop)) {
 229       return lb;
 230     }
 231     // Follow chain of load barrier through Similar edges
 232     while (!lb->in(Similar)->is_top()) {
 233       lb = lb->in(Similar)->in(0)->as_LoadBarrier();
 234       assert(lb->in(Address) == in(Address), "");
 235     }
 236     if (lb != in(Similar)->in(0)) {
 237       return lb;
 238     }
 239   }
 240   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
 241     Node* u = val->fast_out(i);
 242     if (u != this && u->is_LoadBarrier() && u->in(Oop) == val && u->as_LoadBarrier()->has_true_uses()) {
 243       Node* this_ctrl = in(LoadBarrierNode::Control);
 244       Node* other_ctrl = u->in(LoadBarrierNode::Control);
 245       if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
 246         return u->as_LoadBarrier();
 247       }
 248     }
 249   }
 250 
 251   if (can_be_eliminated()) {
 252     return NULL;
 253   }
 254 
 255   if (!look_for_similar) {
 256     return NULL;
 257   }
 258 
 259   Node* addr = in(LoadBarrierNode::Address);
 260   for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
 261     Node* u = addr->fast_out(i);
 262     if (u != this && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
 263       Node* this_ctrl = in(LoadBarrierNode::Control);
 264       Node* other_ctrl = u->in(LoadBarrierNode::Control);
 265       if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
 266         ResourceMark rm;
 267         Unique_Node_List wq;
 268         wq.push(in(LoadBarrierNode::Control));
 269         bool ok = true;
 270         bool dom_found = false;
 271         for (uint next = 0; next < wq.size(); ++next) {
 272           Node *n = wq.at(next);
 273           if (n->is_top()) {
 274             return NULL;
 275           }
 276           assert(n->is_CFG(), "");
 277           if (n->is_SafePoint()) {
 278             ok = false;
 279             break;
 280           }
 281           if (n == u) {
 282             dom_found = true;
 283             continue;
 284           }
 285           if (n->is_Region()) {
 286             for (uint i = 1; i < n->req(); i++) {
 287               Node* m = n->in(i);
 288               if (m != NULL) {
 289                 wq.push(m);
 290               }
 291             }
 292           } else {
 293             Node* m = n->in(0);
 294             if (m != NULL) {
 295               wq.push(m);
 296             }
 297           }
 298         }
 299         if (ok) {
 300           assert(dom_found, "");
 301           return u->as_LoadBarrier();
 302         }
 303         break;
 304       }
 305     }
 306   }
 307 
 308   return NULL;
 309 }
 310 
 311 void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
 312   // Change to that barrier may affect a dominated barrier so re-push those
 313   assert(!is_weak(), "sanity");
 314   Node* val = in(LoadBarrierNode::Oop);
 315 
 316   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
 317     Node* u = val->fast_out(i);
 318     if (u != this && u->is_LoadBarrier() && u->in(Oop) == val) {
 319       Node* this_ctrl = in(Control);
 320       Node* other_ctrl = u->in(Control);
 321       if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
 322         igvn->_worklist.push(u);
 323       }
 324     }
 325 
 326     Node* addr = in(LoadBarrierNode::Address);
 327     for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
 328       Node* u = addr->fast_out(i);
 329       if (u != this && u->is_LoadBarrier() && u->in(Similar)->is_top()) {
 330         Node* this_ctrl = in(Control);
 331         Node* other_ctrl = u->in(Control);
 332         if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
 333           igvn->_worklist.push(u);
 334         }
 335       }
 336     }
 337   }
 338 }
 339 
 340 Node *LoadBarrierNode::Identity(PhaseGVN *phase) {
 341   LoadBarrierNode* dominating_barrier = has_dominating_barrier(NULL, true, false);
 342   if (dominating_barrier != NULL) {
 343     assert(!is_weak(), "Weak barriers cant be eliminated");
 344     assert(dominating_barrier->in(Oop) == in(Oop), "");
 345     return dominating_barrier;
 346   }
 347 
 348   return this;
 349 }
 350 
 351 Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 352   if (remove_dead_region(phase, can_reshape)) {
 353     return this;
 354   }
 355 
 356   Node *val = in(Oop);
 357   Node *mem = in(Memory);
 358   Node *ctrl = in(Control);
 359 
 360   assert(val->Opcode() != Op_LoadN, "");
 361   assert(val->Opcode() != Op_DecodeN, "");
 362 
 363   if (mem->is_MergeMem()) {
 364     Node *new_mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
 365     set_req(Memory, new_mem);
 366     if (mem->outcnt() == 0 && can_reshape) {
 367       phase->is_IterGVN()->_worklist.push(mem);
 368     }
 369     return this;
 370   }
 371 
 372   LoadBarrierNode *dominating_barrier = NULL;
 373   if (!is_weak()) {
 374     dominating_barrier = has_dominating_barrier(NULL, !can_reshape, !phase->C->major_progress());
 375     if (dominating_barrier != NULL && dominating_barrier->in(Oop) != in(Oop)) {
 376       assert(in(Address) == dominating_barrier->in(Address), "");
 377       set_req(Similar, dominating_barrier->proj_out(Oop));
 378       return this;
 379     }
 380   }
 381 
 382   bool eliminate = can_reshape && (dominating_barrier != NULL || !has_true_uses());
 383   if (eliminate) {
 384     if (can_reshape) {
 385       PhaseIterGVN* igvn = phase->is_IterGVN();
 386       Node* out_ctrl = proj_out_or_null(Control);
 387       Node* out_res = proj_out_or_null(Oop);
 388 
 389       if (out_ctrl != NULL) {
 390         igvn->replace_node(out_ctrl, ctrl);
 391       }
 392 
 393       // That transformation may cause the Similar edge on the load barrier to be invalid
 394       fix_similar_in_uses(igvn);
 395       if (out_res != NULL) {
 396         if (dominating_barrier != NULL) {
 397           assert(!is_weak(), "Sanity");
 398           igvn->replace_node(out_res, dominating_barrier->proj_out(Oop));
 399         } else {
 400           igvn->replace_node(out_res, val);
 401         }
 402       }
 403     }
 404     return new ConINode(TypeInt::ZERO);
 405   }
 406 
 407   // If the Similar edge is no longer a load barrier, clear it
 408   Node* similar = in(Similar);
 409   if (!similar->is_top() && !(similar->is_Proj() && similar->in(0)->is_LoadBarrier())) {
 410     set_req(Similar, phase->C->top());
 411     return this;
 412   }
 413 
 414   if (can_reshape && !is_weak()) {
 415     // If this barrier is linked through the Similar edge by a
 416     // dominated barrier and both barriers have the same Oop field,
 417     // the dominated barrier can go away, so push it for reprocessing.
 418     // We also want to avoid a barrier to depend on another dominating
 419     // barrier through its Similar edge that itself depend on another
 420     // barrier through its Similar edge and rather have the first
 421     // depend on the third.
 422     PhaseIterGVN* igvn = phase->is_IterGVN();
 423     Node* out_res = proj_out(Oop);
 424     for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 425       Node* u = out_res->fast_out(i);
 426       if (u->is_LoadBarrier() && u->in(Similar) == out_res &&
 427           (u->in(Oop) == val || !u->in(Similar)->is_top())) {
 428         assert(!u->as_LoadBarrier()->is_weak(), "Sanity");
 429         igvn->_worklist.push(u);
 430       }
 431     }
 432     push_dominated_barriers(igvn);
 433   }
 434 
 435   return NULL;
 436 }
 437 
 438 uint LoadBarrierNode::match_edge(uint idx) const {
 439   ShouldNotReachHere();
 440   return 0;
 441 }
 442 
 443 void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
 444   Node* out_res = proj_out_or_null(Oop);
 445   if (out_res == NULL) {
 446     return;
 447   }
 448 
 449   for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 450     Node* u = out_res->fast_out(i);
 451     if (u->is_LoadBarrier() && u->in(Similar) == out_res) {
 452       igvn->replace_input_of(u, Similar, igvn->C->top());
 453       --i;
 454       --imax;
 455     }
 456   }
 457 }
 458 
 459 bool LoadBarrierNode::has_true_uses() const {
 460   Node* out_res = proj_out_or_null(Oop);
 461   if (out_res != NULL) {
 462     for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 463       Node *u = out_res->fast_out(i);
 464       if (!u->is_LoadBarrier() || u->in(Similar) != out_res) {
 465         return true;
 466       }
 467     }
 468   }
 469   return false;
 470 }
 471 
 472 static bool barrier_needed(C2Access& access) {
 473   return ZBarrierSet::barrier_needed(access.decorators(), access.type());
 474 }
 475 
 476 Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 477   Node* p = BarrierSetC2::load_at_resolved(access, val_type);
 478   if (!barrier_needed(access)) {
 479     return p;
 480   }
 481 
 482   bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
 483   if (p->isa_Load()) {
 484     load_set_barrier(p->as_Load(), weak);
 485   }
 486   return p;
 487 }
 488 
 489 Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 490                                                     Node* new_val, const Type* val_type) const {
 491   Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
 492   LoadStoreNode* lsn = result->as_LoadStore();
 493   if (barrier_needed(access)) {
 494     lsn->set_has_barrier();
 495   }
 496   return lsn;
 497 }
 498 
 499 Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 500                                                      Node* new_val, const Type* value_type) const {
 501   Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
 502   LoadStoreNode* lsn = result->as_LoadStore();
 503   if (barrier_needed(access)) {
 504     lsn->set_has_barrier();
 505   }
 506   return lsn;
 507 }
 508 
 509 Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const {
 510   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
 511   LoadStoreNode* lsn = result->as_LoadStore();
 512   if (barrier_needed(access)) {
 513     lsn->set_has_barrier();
 514   }
 515   return lsn;
 516 }
 517 
 518 // == Macro Expansion ==
 519 
 520 // Optimized, low spill, loadbarrier variant using stub specialized on register used
 521 void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const {
 522   PhaseIterGVN &igvn = phase->igvn();
 523   float unlikely  = PROB_UNLIKELY(0.999);
 524 
 525   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
 526   Node* in_mem = barrier->in(LoadBarrierNode::Memory);
 527   Node* in_val = barrier->in(LoadBarrierNode::Oop);
 528   Node* in_adr = barrier->in(LoadBarrierNode::Address);
 529 
 530   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
 531   Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
 532 
 533   assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null");
 534 
 535   Node* jthread = igvn.transform(new ThreadLocalNode());
 536   Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
 537   Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr,
 538                                                  TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(),
 539                                                  MemNode::unordered));
 540   Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
 541   Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
 542   Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
 543   Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
 544   IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
 545   Node* then = igvn.transform(new IfTrueNode(iff));
 546   Node* elsen = igvn.transform(new IfFalseNode(iff));
 547 
 548   Node* new_loadp = igvn.transform(new LoadBarrierSlowRegNode(then, in_adr, in_val,
 549                                                               (const TypePtr*) in_val->bottom_type(), barrier->is_weak()));
 550 
 551   // Create the final region/phi pair to converge cntl/data paths to downstream code
 552   Node* result_region = igvn.transform(new RegionNode(3));
 553   result_region->set_req(1, then);
 554   result_region->set_req(2, elsen);
 555 
 556   Node* result_phi = igvn.transform(new PhiNode(result_region, TypeInstPtr::BOTTOM));
 557   result_phi->set_req(1, new_loadp);
 558   result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
 559 
 560   igvn.replace_node(out_ctrl, result_region);
 561   igvn.replace_node(out_res, result_phi);
 562 
 563   assert(barrier->outcnt() == 0,"LoadBarrier macro node has non-null outputs after expansion!");
 564 
 565   igvn.remove_dead_node(barrier);
 566   igvn.remove_dead_node(out_ctrl);
 567   igvn.remove_dead_node(out_res);
 568 
 569   assert(is_gc_barrier_node(result_phi), "sanity");
 570   assert(step_over_gc_barrier(result_phi) == in_val, "sanity");
 571 
 572   phase->C->print_method(PHASE_BARRIER_EXPANSION, 4, barrier->_idx);
 573 }
 574 
 575 bool ZBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
 576   ZBarrierSetC2State* s = state();
 577   if (s->load_barrier_count() > 0) {
 578     PhaseMacroExpand macro(igvn);
 579 
 580     int skipped = 0;
 581     while (s->load_barrier_count() > skipped) {
 582       int load_barrier_count = s->load_barrier_count();
 583       LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
 584       if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
 585         // Node is unreachable, so don't try to expand it
 586         s->remove_load_barrier_node(n);
 587         continue;
 588       }
 589       if (!n->can_be_eliminated()) {
 590         skipped++;
 591         continue;
 592       }
 593       expand_loadbarrier_node(&macro, n);
 594       assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
 595       if (C->failing()) {
 596         return true;
 597       }
 598     }
 599     while (s->load_barrier_count() > 0) {
 600       int load_barrier_count = s->load_barrier_count();
 601       LoadBarrierNode* n = s->load_barrier_node(load_barrier_count - 1);
 602       assert(!(igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())), "should have been processed already");
 603       assert(!n->can_be_eliminated(), "should have been processed already");
 604       expand_loadbarrier_node(&macro, n);
 605       assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
 606       if (C->failing()) {
 607         return true;
 608       }
 609     }
 610     igvn.set_delay_transform(false);
 611     igvn.optimize();
 612     if (C->failing()) {
 613       return true;
 614     }
 615   }
 616 
 617   return false;
 618 }
 619 
 620 Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const {
 621   Node* node = c;
 622 
 623   // 1. This step follows potential oop projections of a load barrier before expansion
 624   if (node->is_Proj()) {
 625     node = node->in(0);
 626   }
 627 
 628   // 2. This step checks for unexpanded load barriers
 629   if (node->is_LoadBarrier()) {
 630     return node->in(LoadBarrierNode::Oop);
 631   }
 632 
 633   // 3. This step checks for the phi corresponding to an optimized load barrier expansion
 634   if (node->is_Phi()) {
 635     PhiNode* phi = node->as_Phi();
 636     Node* n = phi->in(1);
 637     if (n != NULL && n->is_LoadBarrierSlowReg()) {
 638       assert(c == node, "projections from step 1 should only be seen before macro expansion");
 639       return phi->in(2);
 640     }
 641   }
 642 
 643   return c;
 644 }
 645 
 646 Node* ZBarrierSetC2::step_over_gc_barrier_ctrl(Node* c) const {
 647   Node* node = c;
 648 
 649   // 1. This step follows potential ctrl projections of a load barrier before expansion
 650   if (node->is_Proj()) {
 651     node = node->in(0);
 652   }
 653 
 654   // 2. This step checks for unexpanded load barriers
 655   if (node->is_LoadBarrier()) {
 656     return node->in(LoadBarrierNode::Control);
 657   }
 658 
 659   return c;
 660 }
 661 
 662 bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const {
 663   return type == T_OBJECT || type == T_ARRAY;
 664 }
 665 
 666 bool ZBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const {
 667   switch (opcode) {
 668     case Op_LoadBarrier:
 669       assert(0, "There should be no load barriers left");
 670     case Op_ZGetAndSetP:
 671     case Op_ZCompareAndExchangeP:
 672     case Op_ZCompareAndSwapP:
 673     case Op_ZWeakCompareAndSwapP:
 674 #ifdef ASSERT
 675       if (VerifyOptoOopOffsets) {
 676         MemNode *mem = n->as_Mem();
 677         // Check to see if address types have grounded out somehow.
 678         const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
 679         ciInstanceKlass *k = tp->klass()->as_instance_klass();
 680         bool oop_offset_is_sane = k->contains_field_offset(tp->offset());
 681         assert(!tp || oop_offset_is_sane, "");
 682       }
 683 #endif
 684       return true;
 685     default:
 686       return false;
 687   }
 688 }
 689 
 690 bool ZBarrierSetC2::matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const {
 691   switch(opcode) {
 692     case Op_CallLeaf:
 693       if (n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() ||
 694           n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr()) {
 695         mem_op = true;
 696         mem_addr_idx = TypeFunc::Parms + 1;
 697         return true;
 698       }
 699       return false;
 700     default:
 701       return false;
 702   }
 703 }
 704 
 705 bool ZBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const {
 706   switch(opcode) {
 707     case Op_ZCompareAndExchangeP:
 708     case Op_ZCompareAndSwapP:
 709     case Op_ZWeakCompareAndSwapP: {
 710       Node *mem = n->in(MemNode::Address);
 711       Node *keepalive = n->in(5);
 712       Node *pair1 = new BinaryNode(mem, keepalive);
 713 
 714       Node *newval = n->in(MemNode::ValueIn);
 715       Node *oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
 716       Node *pair2 = new BinaryNode(oldval, newval);
 717 
 718       n->set_req(MemNode::Address, pair1);
 719       n->set_req(MemNode::ValueIn, pair2);
 720       n->del_req(5);
 721       n->del_req(LoadStoreConditionalNode::ExpectedIn);
 722       return true;
 723     }
 724     case Op_ZGetAndSetP: {
 725       Node *keepalive = n->in(4);
 726       Node *newval = n->in(MemNode::ValueIn);
 727       Node *pair = new BinaryNode(newval, keepalive);
 728       n->set_req(MemNode::ValueIn, pair);
 729       n->del_req(4);
 730       return true;
 731     }
 732 
 733     default:
 734       return false;
 735   }
 736 }
 737 
 738 // == Verification ==
 739 
 740 #ifdef ASSERT
 741 
 742 static void verify_slippery_safepoints_internal(Node* ctrl) {
 743   // Given a CFG node, make sure it does not contain both safepoints and loads
 744   // that have expanded barriers.
 745   bool found_safepoint = false;
 746   bool found_load = false;
 747 
 748   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
 749     Node* node = ctrl->fast_out(i);
 750     if (node->in(0) != ctrl) {
 751       // Skip outgoing precedence edges from ctrl.
 752       continue;
 753     }
 754     if (node->is_SafePoint()) {
 755       found_safepoint = true;
 756     }
 757     if (node->is_Load() && load_require_barrier(node->as_Load()) &&
 758         load_has_expanded_barrier(node->as_Load())) {
 759       found_load = true;
 760     }
 761   }
 762   assert(!found_safepoint || !found_load, "found load and safepoint in same block");
 763 }
 764 
 765 static void verify_slippery_safepoints(Compile* C) {
 766   ResourceArea *area = Thread::current()->resource_area();
 767   Unique_Node_List visited(area);
 768   Unique_Node_List checked(area);
 769 
 770   // Recursively walk the graph.
 771   visited.push(C->root());
 772   while (visited.size() > 0) {
 773     Node* node = visited.pop();
 774 
 775     Node* ctrl = node;
 776     if (!node->is_CFG()) {
 777       ctrl = node->in(0);
 778     }
 779 
 780     if (ctrl != NULL && !checked.member(ctrl)) {
 781       // For each block found in the graph, verify that it does not
 782       // contain both a safepoint and a load requiring barriers.
 783       verify_slippery_safepoints_internal(ctrl);
 784 
 785       checked.push(ctrl);
 786     }
 787 
 788     checked.push(node);
 789 
 790     for (DUIterator_Fast imax, i = node->fast_outs(imax); i < imax; i++) {
 791       Node* use = node->fast_out(i);
 792       if (checked.member(use))  continue;
 793       if (visited.member(use))  continue;
 794       visited.push(use);
 795     }
 796   }
 797 }
 798 
 799 void ZBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
 800   switch(phase) {
 801     case BarrierSetC2::BeforeOptimize:
 802     case BarrierSetC2::BeforeLateInsertion:
 803       assert(state()->load_barrier_count() == 0, "No barriers inserted yet");
 804       break;
 805     case BarrierSetC2::BeforeMacroExpand:
 806       // Barrier placement should be set by now.
 807       verify_gc_barriers(false /*post_parse*/);
 808       break;
 809     case BarrierSetC2::BeforeCodeGen:
 810       // Barriers has been fully expanded.
 811       assert(state()->load_barrier_count() == 0, "No more macro barriers");
 812       verify_slippery_safepoints(compile);
 813       break;
 814     default:
 815       assert(0, "Phase without verification");
 816   }
 817 }
 818 
 819 // post_parse implies that there might be load barriers without uses after parsing
 820 // That only applies when adding barriers at parse time.
 821 void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
 822   ZBarrierSetC2State* s = state();
 823   Compile* C = Compile::current();
 824   ResourceMark rm;
 825   VectorSet visited(Thread::current()->resource_area());
 826 
 827   for (int i = 0; i < s->load_barrier_count(); i++) {
 828     LoadBarrierNode* n = s->load_barrier_node(i);
 829 
 830     // The dominating barrier on the same address if it exists and
 831     // this barrier must not be applied on the value from the same
 832     // load otherwise the value is not reloaded before it's used the
 833     // second time.
 834     assert(n->in(LoadBarrierNode::Similar)->is_top() ||
 835            (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
 836             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Address) == n->in(LoadBarrierNode::Address) &&
 837             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Oop) != n->in(LoadBarrierNode::Oop)),
 838            "broken similar edge");
 839 
 840     assert(n->as_LoadBarrier()->has_true_uses(),
 841            "found unneeded load barrier");
 842 
 843     // Several load barrier nodes chained through their Similar edge
 844     // break the code that remove the barriers in final graph reshape.
 845     assert(n->in(LoadBarrierNode::Similar)->is_top() ||
 846            (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
 847             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Similar)->is_top()),
 848            "chain of Similar load barriers");
 849 
 850     if (!n->in(LoadBarrierNode::Similar)->is_top()) {
 851       ResourceMark rm;
 852       Unique_Node_List wq;
 853       Node* other = n->in(LoadBarrierNode::Similar)->in(0);
 854       wq.push(n);
 855       for (uint next = 0; next < wq.size(); ++next) {
 856         Node *nn = wq.at(next);
 857         assert(nn->is_CFG(), "");
 858         assert(!nn->is_SafePoint(), "");
 859 
 860         if (nn == other) {
 861           continue;
 862         }
 863 
 864         if (nn->is_Region()) {
 865           for (uint i = 1; i < nn->req(); i++) {
 866             Node* m = nn->in(i);
 867             if (m != NULL) {
 868               wq.push(m);
 869             }
 870           }
 871         } else {
 872           Node* m = nn->in(0);
 873           if (m != NULL) {
 874             wq.push(m);
 875           }
 876         }
 877       }
 878     }
 879   }
 880 }
 881 
 882 #endif // end verification code
 883 
 884 static void call_catch_cleanup_one(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl);
 885 
 886 // This code is cloning all uses of a load that is between a call and the catch blocks,
 887 // to each use.
 888 
 889 static bool fixup_uses_in_catch(PhaseIdealLoop *phase, Node *start_ctrl, Node *node) {
 890 
 891   if (!phase->has_ctrl(node)) {
 892     // This node is floating - doesn't need to be cloned.
 893     assert(node != start_ctrl, "check");
 894     return false;
 895   }
 896 
 897   Node* ctrl = phase->get_ctrl(node);
 898   if (ctrl != start_ctrl) {
 899     // We are in a successor block - the node is ok.
 900     return false; // Unwind
 901   }
 902 
 903   // Process successor nodes
 904   int outcnt = node->outcnt();
 905   for (int i = 0; i < outcnt; i++) {
 906     Node* n = node->raw_out(0);
 907     assert(!n->is_LoadBarrier(), "Sanity");
 908     // Calling recursively, visiting leafs first
 909     fixup_uses_in_catch(phase, start_ctrl, n);
 910   }
 911 
 912   // Now all successors are outside
 913   // - Clone this node to both successors
 914   int no_succs = node->outcnt();
 915   assert(!node->is_Store(), "Stores not expected here");
 916 
 917   // In some very rare cases a load that doesn't need a barrier will end up here
 918   // Treat it as a LoadP and the insertion of phis will be done correctly.
 919   if (node->is_Load()) {
 920     call_catch_cleanup_one(phase, node->as_Load(), phase->get_ctrl(node));
 921   } else {
 922     for (DUIterator_Fast jmax, i = node->fast_outs(jmax); i < jmax; i++) {
 923       Node* use = node->fast_out(i);
 924       Node* clone = node->clone();
 925       assert(clone->outcnt() == 0, "");
 926 
 927       assert(use->find_edge(node) != -1, "check");
 928       phase->igvn().rehash_node_delayed(use);
 929       use->replace_edge(node, clone);
 930 
 931       Node* new_ctrl;
 932       if (use->is_block_start()) {
 933         new_ctrl = use;
 934       } else if (use->is_CFG()) {
 935         new_ctrl = use->in(0);
 936         assert (new_ctrl != NULL, "");
 937       } else {
 938         new_ctrl = phase->get_ctrl(use);
 939       }
 940 
 941       phase->set_ctrl(clone, new_ctrl);
 942 
 943       if (phase->C->directive()->ZTraceLoadBarriersOption) tty->print_cr("  Clone op %i as %i to control %i", node->_idx, clone->_idx, new_ctrl->_idx);
 944       phase->igvn().register_new_node_with_optimizer(clone);
 945       --i, --jmax;
 946     }
 947     assert(node->outcnt() == 0, "must be empty now");
 948 
 949     // Node node is dead.
 950     phase->igvn().remove_dead_node(node);
 951   }
 952   return true; // unwind - return if a use was processed
 953 }
 954 
 955 // Clone a load to a specific catch_proj
 956 static Node* clone_load_to_catchproj(PhaseIdealLoop* phase, Node* load, Node* catch_proj) {
 957   Node* cloned_load = load->clone();
 958   cloned_load->set_req(0, catch_proj);      // set explicit control
 959   phase->set_ctrl(cloned_load, catch_proj); // update
 960   if (phase->C->directive()->ZTraceLoadBarriersOption) tty->print_cr("  Clone LOAD %i as %i to control %i", load->_idx, cloned_load->_idx, catch_proj->_idx);
 961   phase->igvn().register_new_node_with_optimizer(cloned_load);
 962   return cloned_load;
 963 }
 964 
 965 static Node* get_dominating_region(PhaseIdealLoop* phase, Node* node, Node* stop) {
 966   Node* region = node;
 967   while (!region->isa_Region()) {
 968     Node *up = phase->idom(region);
 969     assert(up != region, "Must not loop");
 970     assert(up != stop,   "Must not find original control");
 971     region = up;
 972   }
 973   return region;
 974 }
 975 
 976 // Clone this load to each catch block
 977 static void call_catch_cleanup_one(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl) {
 978   bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
 979   phase->igvn().set_delay_transform(true);
 980 
 981   // Verify pre conditions
 982   assert(ctrl->isa_Proj() && ctrl->in(0)->isa_Call(), "Must be a call proj");
 983   assert(ctrl->raw_out(0)->isa_Catch(), "Must be a catch");
 984 
 985   if (ctrl->raw_out(0)->isa_Catch()->outcnt() == 1) {
 986     if (trace) tty->print_cr("Cleaning up catch: Skipping load %i, call with single catch", load->_idx);
 987     return;
 988   }
 989 
 990   // Process the loads successor nodes - if any is between
 991   // the call and the catch blocks, they need to be cloned to.
 992   // This is done recursively
 993   for (uint i = 0; i < load->outcnt();) {
 994     Node *n = load->raw_out(i);
 995     assert(!n->is_LoadBarrier(), "Sanity");
 996     if (!fixup_uses_in_catch(phase, ctrl, n)) {
 997       // if no successor was cloned, progress to next out.
 998       i++;
 999     }
1000   }
1001 
1002   // Now all the loads uses has been cloned down
1003   // Only thing left is to clone the loads, but they must end up
1004   // first in the catch blocks.
1005 
1006   // We clone the loads oo the catch blocks only when needed.
1007   // An array is used to map the catch blocks to each lazily cloned load.
1008   // In that way no extra unnecessary loads are cloned.
1009 
1010   // Any use dominated by original block must have an phi and a region added
1011 
1012   Node* catch_node = ctrl->raw_out(0);
1013   int number_of_catch_projs = catch_node->outcnt();
1014   Node** proj_to_load_mapping = NEW_RESOURCE_ARRAY(Node*, number_of_catch_projs);
1015   Copy::zero_to_bytes(proj_to_load_mapping, sizeof(Node*) * number_of_catch_projs);
1016 
1017   // The phi_map is used to keep track of where phis have already been inserted
1018   int phi_map_len = phase->C->unique();
1019   Node** phi_map = NEW_RESOURCE_ARRAY(Node*, phi_map_len);
1020   Copy::zero_to_bytes(phi_map, sizeof(Node*) * phi_map_len);
1021 
1022   for (unsigned int i = 0; i  < load->outcnt(); i++) {
1023     Node* load_use_control = NULL;
1024     Node* load_use = load->raw_out(i);
1025 
1026     if (phase->has_ctrl(load_use)) {
1027       load_use_control = phase->get_ctrl(load_use);
1028     } else {
1029       load_use_control = load_use->in(0);
1030     }
1031     assert(load_use_control != NULL, "sanity");
1032     if (trace) tty->print_cr("  Handling use: %i, with control: %i", load_use->_idx, load_use_control->_idx);
1033 
1034     // Some times the loads use is a phi. For them we need to determine from which catch block
1035     // the use is defined.
1036     bool load_use_is_phi = false;
1037     unsigned int load_use_phi_index = 0;
1038     Node* phi_ctrl = NULL;
1039     if (load_use->is_Phi()) {
1040       // Find phi input that matches load
1041       for (unsigned int u = 1; u < load_use->req(); u++) {
1042         if (load_use->in(u) == load) {
1043           load_use_is_phi = true;
1044           load_use_phi_index = u;
1045           assert(load_use->in(0)->is_Region(), "Region or broken");
1046           phi_ctrl = load_use->in(0)->in(u);
1047           assert(phi_ctrl->is_CFG(), "check");
1048           assert(phi_ctrl != load,   "check");
1049           break;
1050         }
1051       }
1052       assert(load_use_is_phi,        "must find");
1053       assert(load_use_phi_index > 0, "sanity");
1054     }
1055 
1056     // For each load use, see which catch projs dominates, create load clone lazily and reconnect
1057     bool found_dominating_catchproj = false;
1058     for (int c = 0; c < number_of_catch_projs; c++) {
1059       Node* catchproj = catch_node->raw_out(c);
1060       assert(catchproj != NULL && catchproj->isa_CatchProj(), "Sanity");
1061 
1062       if (!phase->is_dominator(catchproj, load_use_control)) {
1063         if (load_use_is_phi && phase->is_dominator(catchproj, phi_ctrl)) {
1064           // The loads use is local to the catchproj.
1065           // fall out and replace load with catch-local load clone.
1066         } else {
1067           continue;
1068         }
1069       }
1070       assert(!found_dominating_catchproj, "Max one should match");
1071 
1072       // Clone loads to catch projs
1073       Node* load_clone = proj_to_load_mapping[c];
1074       if (load_clone == NULL) {
1075         load_clone = clone_load_to_catchproj(phase, load, catchproj);
1076         proj_to_load_mapping[c] = load_clone;
1077       }
1078       phase->igvn().rehash_node_delayed(load_use);
1079 
1080       if (load_use_is_phi) {
1081         // phis are special - the load is defined from a specific control flow
1082         load_use->set_req(load_use_phi_index, load_clone);
1083       } else {
1084         // Multipe edges can be replaced at once - on calls for example
1085         load_use->replace_edge(load, load_clone);
1086       }
1087       --i; // more than one edge can have been removed, but the next is in later iterations
1088 
1089       // We could break the for-loop after finding a dominating match.
1090       // But keep iterating to catch any bad idom early.
1091       found_dominating_catchproj = true;
1092     }
1093 
1094     // We found no single catchproj that dominated the use - The use is at a point after
1095     // where control flow from multiple catch projs have merged. We will have to create
1096     // phi nodes before the use and tie the output from the cloned loads together. It
1097     // can be a single phi or a number of chained phis, depending on control flow
1098     if (!found_dominating_catchproj) {
1099 
1100       // Use phi-control if use is a phi
1101       if (load_use_is_phi) {
1102         load_use_control = phi_ctrl;
1103       }
1104       assert(phase->is_dominator(ctrl, load_use_control), "Common use but no dominator");
1105 
1106       // Clone a load on all paths
1107       for (int c = 0; c < number_of_catch_projs; c++) {
1108         Node* catchproj = catch_node->raw_out(c);
1109         Node* load_clone = proj_to_load_mapping[c];
1110         if (load_clone == NULL) {
1111           load_clone = clone_load_to_catchproj(phase, load, catchproj);
1112           proj_to_load_mapping[c] = load_clone;
1113         }
1114       }
1115 
1116       // Move up dominator tree from use until dom front is reached
1117       Node* next_region = get_dominating_region(phase, load_use_control, ctrl);
1118       while (phase->idom(next_region) != catch_node) {
1119         next_region = phase->idom(next_region);
1120         if (trace) tty->print_cr("Moving up idom to region ctrl %i", next_region->_idx);
1121       }
1122       assert(phase->is_dominator(catch_node, next_region), "Sanity");
1123 
1124       // Create or reuse phi node that collect all cloned loads and feed it to the use.
1125       Node* test_phi = phi_map[next_region->_idx];
1126       if ((test_phi != NULL) && test_phi->is_Phi()) {
1127         // Reuse an already created phi
1128         if (trace) tty->print_cr("    Using cached Phi %i on load_use %i", test_phi->_idx, load_use->_idx);
1129         phase->igvn().rehash_node_delayed(load_use);
1130         load_use->replace_edge(load, test_phi);
1131         // Now this use is done
1132       } else {
1133         // Otherwise we need to create one or more phis
1134         PhiNode* next_phi = new PhiNode(next_region, load->type());
1135         phi_map[next_region->_idx] = next_phi; // cache new phi
1136         phase->igvn().rehash_node_delayed(load_use);
1137         load_use->replace_edge(load, next_phi);
1138 
1139         int dominators_of_region = 0;
1140         do {
1141           // New phi, connect to region and add all loads as in.
1142           Node* region = next_region;
1143           assert(region->isa_Region() && region->req() > 2, "Catch dead region nodes");
1144           PhiNode* new_phi = next_phi;
1145 
1146           if (trace) tty->print_cr("Created Phi %i on load %i with control %i", new_phi->_idx, load->_idx, region->_idx);
1147 
1148           // Need to add all cloned loads to the phi, taking care that the right path is matched
1149           dominators_of_region = 0; // reset for new region
1150           for (unsigned int reg_i = 1; reg_i < region->req(); reg_i++) {
1151             Node* region_pred = region->in(reg_i);
1152             assert(region_pred->is_CFG(), "check");
1153             bool pred_has_dominator = false;
1154             for (int c = 0; c < number_of_catch_projs; c++) {
1155               Node* catchproj = catch_node->raw_out(c);
1156               if (phase->is_dominator(catchproj, region_pred)) {
1157                 new_phi->set_req(reg_i, proj_to_load_mapping[c]);
1158                 if (trace) tty->print_cr(" - Phi in(%i) set to load %i", reg_i, proj_to_load_mapping[c]->_idx);
1159                 pred_has_dominator = true;
1160                 dominators_of_region++;
1161                 break;
1162               }
1163             }
1164 
1165             // Sometimes we need to chain several phis.
1166             if (!pred_has_dominator) {
1167               assert(dominators_of_region <= 1, "More than one region can't require extra phi");
1168               if (trace) tty->print_cr(" - Region %i pred %i not dominated by catch proj", region->_idx, region_pred->_idx);
1169               // Continue search on on this region_pred
1170               // - walk up to next region
1171               // - create a new phi and connect to first new_phi
1172               next_region = get_dominating_region(phase, region_pred, ctrl);
1173 
1174               // Lookup if there already is a phi, create a new otherwise
1175               Node* test_phi = phi_map[next_region->_idx];
1176               if ((test_phi != NULL) && test_phi->is_Phi()) {
1177                 next_phi = test_phi->isa_Phi();
1178                 dominators_of_region++; // record that a match was found and that we are done
1179                 if (trace) tty->print_cr("    Using cached phi Phi %i on control %i", next_phi->_idx, next_region->_idx);
1180               } else {
1181                 next_phi = new PhiNode(next_region, load->type());
1182                 phi_map[next_region->_idx] = next_phi;
1183               }
1184               new_phi->set_req(reg_i, next_phi);
1185             }
1186           }
1187 
1188           new_phi->set_req(0, region);
1189           phase->igvn().register_new_node_with_optimizer(new_phi);
1190           phase->set_ctrl(new_phi, region);
1191 
1192           assert(dominators_of_region != 0, "Must have found one this iteration");
1193         } while (dominators_of_region == 1);
1194       }
1195       --i;
1196     }
1197   } // end of loop over uses
1198 
1199   assert(load->outcnt() == 0, "All uses should be handled");
1200   phase->igvn().remove_dead_node(load);
1201   phase->C->print_method(PHASE_CALL_CATCH_CLEANUP, 4, load->_idx);
1202 
1203   // Now we should be home
1204   phase->igvn().set_delay_transform(false);
1205 }
1206 
1207 // Sort out the loads that are between a call ant its catch blocks
1208 static void process_catch_cleanup_candidate(PhaseIdealLoop* phase, LoadNode* load) {
1209   bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
1210 
1211   Node* ctrl = phase->get_ctrl(load);
1212   if (!ctrl->is_Proj() || (ctrl->in(0) == NULL) || !ctrl->in(0)->isa_Call()) {
1213     return;
1214   }
1215 
1216   Node* catch_node = ctrl->isa_Proj()->raw_out(0);
1217   if (catch_node->is_Catch()) {
1218     if (catch_node->outcnt() > 1) {
1219       call_catch_cleanup_one(phase, load, ctrl);
1220     } else {
1221       if (trace) tty->print_cr("Call catch cleanup with only one catch: load %i ", load->_idx);
1222     }
1223   }
1224 }
1225 
1226 void ZBarrierSetC2::barrier_insertion_phase(Compile* C, PhaseIterGVN& igvn) const {
1227   PhaseIdealLoop::optimize(igvn, LoopOptsZBarrierInsertion);
1228   if (C->failing())  return;
1229 }
1230 
1231 bool ZBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const {
1232 
1233   if (mode == LoopOptsZBarrierInsertion) {
1234     // First make sure all loads between call and catch are moved to the catch block
1235     clean_catch_blocks(phase);
1236 
1237     // Then expand barriers on all loads
1238     insert_load_barriers(phase);
1239 
1240     // Handle all Unsafe that need barriers.
1241     insert_barriers_on_unsafe(phase);
1242 
1243     phase->C->clear_major_progress();
1244     return true;
1245   } else {
1246     return false;
1247   }
1248 }
1249 
1250 static bool can_simplify_cas(LoadStoreNode* node) {
1251   if (node->isa_LoadStoreConditional()) {
1252     Node *expected_in = node->as_LoadStoreConditional()->in(LoadStoreConditionalNode::ExpectedIn);
1253     return (expected_in->get_ptr_type() == TypePtr::NULL_PTR);
1254   } else {
1255     return false;
1256   }
1257 }
1258 
1259 static void insert_barrier_before_unsafe(PhaseIdealLoop* phase, LoadStoreNode* old_node) {
1260 
1261   Compile *C = phase->C;
1262   PhaseIterGVN &igvn = phase->igvn();
1263   LoadStoreNode* zclone = NULL;
1264 
1265   Node *in_ctrl = old_node->in(MemNode::Control);
1266   Node *in_mem  = old_node->in(MemNode::Memory);
1267   Node *in_adr  = old_node->in(MemNode::Address);
1268   Node *in_val  = old_node->in(MemNode::ValueIn);
1269   const TypePtr *adr_type = old_node->adr_type();
1270   const TypePtr* load_type = TypeOopPtr::BOTTOM; // The type for the load we are adding
1271 
1272   switch (old_node->Opcode()) {
1273     case Op_CompareAndExchangeP: {
1274       zclone = new ZCompareAndExchangePNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn),
1275               adr_type, old_node->get_ptr_type(), ((CompareAndExchangeNode*)old_node)->order());
1276       load_type = old_node->bottom_type()->is_ptr();
1277       break;
1278     }
1279     case Op_WeakCompareAndSwapP: {
1280       if (can_simplify_cas(old_node)) {
1281         break;
1282       }
1283       zclone = new ZWeakCompareAndSwapPNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn),
1284               ((CompareAndSwapNode*)old_node)->order());
1285       adr_type = TypePtr::BOTTOM;
1286       break;
1287     }
1288     case Op_CompareAndSwapP: {
1289       if (can_simplify_cas(old_node)) {
1290         break;
1291       }
1292       zclone = new ZCompareAndSwapPNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn),
1293               ((CompareAndSwapNode*)old_node)->order());
1294       adr_type = TypePtr::BOTTOM;
1295       break;
1296     }
1297     case Op_GetAndSetP: {
1298       zclone = new ZGetAndSetPNode(in_ctrl, in_mem, in_adr, in_val, old_node->adr_type(), old_node->get_ptr_type());
1299       load_type = old_node->bottom_type()->is_ptr();
1300       break;
1301     }
1302   }
1303   if (zclone != NULL) {
1304     igvn.register_new_node_with_optimizer(zclone, old_node);
1305 
1306     // Make load
1307     LoadPNode *load = new LoadPNode(NULL, in_mem, in_adr, adr_type, load_type, MemNode::unordered,
1308                                     LoadNode::DependsOnlyOnTest);
1309     load_set_expanded_barrier(load);
1310     igvn.register_new_node_with_optimizer(load);
1311     igvn.replace_node(old_node, zclone);
1312 
1313     Node *barrier = new LoadBarrierNode(C, NULL, in_mem, load, in_adr, false /* weak */);
1314     Node *barrier_val = new ProjNode(barrier, LoadBarrierNode::Oop);
1315     Node *barrier_ctrl = new ProjNode(barrier, LoadBarrierNode::Control);
1316 
1317     igvn.register_new_node_with_optimizer(barrier);
1318     igvn.register_new_node_with_optimizer(barrier_val);
1319     igvn.register_new_node_with_optimizer(barrier_ctrl);
1320 
1321     // loop over all of in_ctrl usages and move to barrier_ctrl
1322     for (DUIterator_Last imin, i = in_ctrl->last_outs(imin); i >= imin; --i) {
1323       Node *use = in_ctrl->last_out(i);
1324       uint l;
1325       for (l = 0; use->in(l) != in_ctrl; l++) {}
1326       igvn.replace_input_of(use, l, barrier_ctrl);
1327     }
1328 
1329     load->set_req(MemNode::Control, in_ctrl);
1330     barrier->set_req(LoadBarrierNode::Control, in_ctrl);
1331     zclone->add_req(barrier_val); // add req as keep alive.
1332 
1333     C->print_method(PHASE_ADD_UNSAFE_BARRIER, 4, zclone->_idx);
1334   }
1335 }
1336 
1337 void ZBarrierSetC2::insert_barriers_on_unsafe(PhaseIdealLoop* phase) const {
1338   Compile *C = phase->C;
1339   PhaseIterGVN &igvn = phase->igvn();
1340   uint new_ids = C->unique();
1341   VectorSet visited(Thread::current()->resource_area());
1342   GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL);
1343   nodeStack.push(C->root());
1344   visited.test_set(C->root()->_idx);
1345 
1346   // Traverse all nodes, visit all unsafe ops that require a barrier
1347   while (nodeStack.length() > 0) {
1348     Node *n = nodeStack.pop();
1349 
1350     bool is_old_node = (n->_idx < new_ids); // don't process nodes that were created during cleanup
1351     if (is_old_node) {
1352       if (n->is_LoadStore()) {
1353         LoadStoreNode* lsn = n->as_LoadStore();
1354         if (lsn->has_barrier()) {
1355           BasicType bt = lsn->in(MemNode::Address)->bottom_type()->basic_type();
1356           assert ((bt == T_OBJECT || bt == T_ARRAY), "Sanity test");
1357           insert_barrier_before_unsafe(phase, lsn);
1358         }
1359       }
1360     }
1361     for (uint i = 0; i < n->len(); i++) {
1362       if (n->in(i)) {
1363         if (!visited.test_set(n->in(i)->_idx)) {
1364           nodeStack.push(n->in(i));
1365         }
1366       }
1367     }
1368   }
1369 
1370   igvn.optimize();
1371   C->print_method(PHASE_ADD_UNSAFE_BARRIER, 2);
1372 }
1373 
1374 // The purpose of ZBarrierSetC2::clean_catch_blocks is to prepare the IR for
1375 // splicing in load barrier nodes.
1376 //
1377 // The problem is that we might have instructions between a call and its catch nodes.
1378 // (This is usually handled in PhaseCFG:call_catch_cleanup, which clones mach nodes in
1379 // already scheduled blocks.) We can't have loads that require barriers there,
1380 // because we need to splice in new control flow, and that would violate the IR.
1381 //
1382 // clean_catch_blocks find all Loads that require a barrier and clone them and any
1383 // dependent instructions to each use. The loads must be in the beginning of the catch block
1384 // before any store.
1385 //
1386 // Sometimes the loads use will be at a place dominated by all catch blocks, then we need
1387 // a load in each catch block, and a Phi at the dominated use.
1388 
1389 void ZBarrierSetC2::clean_catch_blocks(PhaseIdealLoop* phase) const {
1390 
1391   Compile *C = phase->C;
1392   uint new_ids = C->unique();
1393   PhaseIterGVN &igvn = phase->igvn();
1394   VectorSet visited(Thread::current()->resource_area());
1395   GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL);
1396   nodeStack.push(C->root());
1397   visited.test_set(C->root()->_idx);
1398 
1399   // Traverse all nodes, visit all loads that require a barrier
1400   while(nodeStack.length() > 0) {
1401     Node *n = nodeStack.pop();
1402 
1403     for (uint i = 0; i < n->len(); i++) {
1404       if (n->in(i)) {
1405         if (!visited.test_set(n->in(i)->_idx)) {
1406           nodeStack.push(n->in(i));
1407         }
1408       }
1409     }
1410 
1411     bool is_old_node = (n->_idx < new_ids); // don't process nodes that were created during cleanup
1412     if (n->is_Load() && is_old_node) {
1413       LoadNode* load = n->isa_Load();
1414       // only care about loads that will have a barrier
1415       if (load_require_barrier(load)) {
1416         process_catch_cleanup_candidate(phase, load);
1417       }
1418     }
1419   }
1420 
1421   C->print_method(PHASE_CALL_CATCH_CLEANUP, 2);
1422 }
1423 
1424 class DomDepthCompareClosure : public CompareClosure<LoadNode*> {
1425   PhaseIdealLoop* _phase;
1426 
1427 public:
1428   DomDepthCompareClosure(PhaseIdealLoop* phase) : _phase(phase) { }
1429 
1430   int do_compare(LoadNode* const &n1, LoadNode* const &n2) {
1431     int d1 = _phase->dom_depth(_phase->get_ctrl(n1));
1432     int d2 = _phase->dom_depth(_phase->get_ctrl(n2));
1433     if (d1 == d2) {
1434       // Compare index if the depth is the same, ensures all entries are unique.
1435       return n1->_idx - n2->_idx;
1436     } else {
1437       return d2 - d1;
1438     }
1439   }
1440 };
1441 
1442 // Traverse graph and add all loadPs to list, sorted by dom depth
1443 void gather_loadnodes_sorted(PhaseIdealLoop* phase, GrowableArray<LoadNode*>* loadList) {
1444 
1445   VectorSet visited(Thread::current()->resource_area());
1446   GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL);
1447   DomDepthCompareClosure ddcc(phase);
1448 
1449   nodeStack.push(phase->C->root());
1450   while(nodeStack.length() > 0) {
1451     Node *n = nodeStack.pop();
1452     if (visited.test(n->_idx)) {
1453       continue;
1454     }
1455 
1456     if (n->isa_Load()) {
1457       LoadNode *load = n->as_Load();
1458       if (load_require_barrier(load)) {
1459         assert(phase->get_ctrl(load) != NULL, "sanity");
1460         assert(phase->dom_depth(phase->get_ctrl(load)) != 0, "sanity");
1461         loadList->insert_sorted(&ddcc, load);
1462       }
1463     }
1464 
1465     visited.set(n->_idx);
1466     for (uint i = 0; i < n->req(); i++) {
1467       if (n->in(i)) {
1468         if (!visited.test(n->in(i)->_idx)) {
1469           nodeStack.push(n->in(i));
1470         }
1471       }
1472     }
1473   }
1474 }
1475 
1476 // Add LoadBarriers to all LoadPs
1477 void ZBarrierSetC2::insert_load_barriers(PhaseIdealLoop* phase) const {
1478 
1479   bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
1480   GrowableArray<LoadNode *> loadList(Thread::current()->resource_area(), 0, 0, NULL);
1481   gather_loadnodes_sorted(phase, &loadList);
1482 
1483   PhaseIterGVN &igvn = phase->igvn();
1484   int count = 0;
1485 
1486   for (GrowableArrayIterator<LoadNode *> loadIter = loadList.begin(); loadIter != loadList.end(); ++loadIter) {
1487     LoadNode *load = *loadIter;
1488 
1489     if (load_has_expanded_barrier(load)) {
1490       continue;
1491     }
1492 
1493     do {
1494       // Insert a barrier on a loadP
1495       // if another load is found that needs to be expanded first, retry on that one
1496       LoadNode* result = insert_one_loadbarrier(phase, load, phase->get_ctrl(load));
1497       while (result != NULL) {
1498         result = insert_one_loadbarrier(phase, result, phase->get_ctrl(result));
1499       }
1500     } while (!load_has_expanded_barrier(load));
1501   }
1502 
1503   phase->C->print_method(PHASE_INSERT_BARRIER, 2);
1504 }
1505 
1506 void push_antidependent_stores(PhaseIdealLoop* phase, Node_Stack& nodestack, LoadNode* start_load) {
1507   // push all stores on the same mem, that can_alias
1508   // Any load found must be handled first
1509   PhaseIterGVN &igvn = phase->igvn();
1510   int load_alias_idx = igvn.C->get_alias_index(start_load->adr_type());
1511 
1512   Node *mem = start_load->in(1);
1513   for (DUIterator_Fast imax, u = mem->fast_outs(imax); u < imax; u++) {
1514     Node *mem_use = mem->fast_out(u);
1515 
1516     if (mem_use == start_load) continue;
1517     if (!mem_use->is_Store()) continue;
1518     if (!phase->has_ctrl(mem_use)) continue;
1519     if (phase->get_ctrl(mem_use) != phase->get_ctrl(start_load)) continue;
1520 
1521     // add any aliasing store in this block
1522     StoreNode *store = mem_use->isa_Store();
1523     const TypePtr *adr_type = store->adr_type();
1524     if (igvn.C->can_alias(adr_type, load_alias_idx)) {
1525       nodestack.push(store, 0);
1526     }
1527   }
1528 }
1529 
1530 LoadNode* ZBarrierSetC2::insert_one_loadbarrier(PhaseIdealLoop* phase, LoadNode* start_load, Node* ctrl) const {
1531   bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
1532   PhaseIterGVN &igvn = phase->igvn();
1533 
1534   // Check for other loadPs at the same loop depth that is reachable by a DFS
1535   // - if found - return it. It needs to be inserted first
1536   // - otherwise proceed and insert barrier
1537 
1538   VectorSet visited(Thread::current()->resource_area());
1539   Node_Stack nodestack(100);
1540 
1541   nodestack.push(start_load, 0);
1542   push_antidependent_stores(phase, nodestack, start_load);
1543 
1544   while(!nodestack.is_empty()) {
1545     Node* n = nodestack.node(); // peek
1546     nodestack.pop();
1547     if (visited.test(n->_idx)) {
1548       continue;
1549     }
1550 
1551     if (n->is_Load() && n != start_load && load_require_barrier(n->as_Load()) && !load_has_expanded_barrier(n->as_Load())) {
1552       // Found another load that needs a barrier in the same block. Must expand later loads first.
1553       if (trace) tty->print_cr(" * Found LoadP %i on DFS", n->_idx);
1554       return n->as_Load(); // return node that should be expanded first
1555     }
1556 
1557     if (!phase->has_ctrl(n)) continue;
1558     if (phase->get_ctrl(n) != phase->get_ctrl(start_load)) continue;
1559     if (n->is_Phi()) continue;
1560 
1561     visited.set(n->_idx);
1562     // push all children
1563     for (DUIterator_Fast imax, ii = n->fast_outs(imax); ii < imax; ii++) {
1564       Node* c = n->fast_out(ii);
1565       if (c != NULL) {
1566         nodestack.push(c, 0);
1567       }
1568     }
1569   }
1570 
1571   insert_one_loadbarrier_inner(phase, start_load, ctrl, visited);
1572   return NULL;
1573 }
1574 
1575 void ZBarrierSetC2::insert_one_loadbarrier_inner(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl, VectorSet visited2) const {
1576   PhaseIterGVN &igvn = phase->igvn();
1577   Compile* C = igvn.C;
1578   bool trace = C->directive()->ZTraceLoadBarriersOption;
1579 
1580   // create barrier
1581   Node* barrier = new LoadBarrierNode(C, NULL, load->in(LoadNode::Memory), NULL, load->in(LoadNode::Address), load_has_weak_barrier(load));
1582   Node* barrier_val = new ProjNode(barrier, LoadBarrierNode::Oop);
1583   Node* barrier_ctrl = new ProjNode(barrier, LoadBarrierNode::Control);
1584 
1585   if (trace) tty->print_cr("Insert load %i with barrier: %i and ctrl : %i", load->_idx, barrier->_idx, ctrl->_idx);
1586 
1587   // Splice control
1588   // - insert barrier control diamond between loads ctrl and ctrl successor on path to block end.
1589   // - If control successor is a catch, step over to next.
1590   Node* ctrl_succ = NULL;
1591   for (DUIterator_Fast imax, j = ctrl->fast_outs(imax); j < imax; j++) {
1592     Node* tmp = ctrl->fast_out(j);
1593 
1594     // - CFG nodes is the ones we are going to splice (1 only!)
1595     // - Phi nodes will continue to hang from the region node!
1596     // - self loops should be skipped
1597     if (tmp->is_Phi() || tmp == ctrl) {
1598       continue;
1599     }
1600 
1601     if (tmp->is_CFG()) {
1602       assert(ctrl_succ == NULL, "There can be only one");
1603       ctrl_succ = tmp;
1604       continue;
1605     }
1606   }
1607 
1608   // Now splice control
1609   assert(ctrl_succ != load, "sanity");
1610   assert(ctrl_succ != NULL, "Broken IR");
1611   bool found = false;
1612   for(uint k = 0; k < ctrl_succ->req(); k++) {
1613     if (ctrl_succ->in(k) == ctrl) {
1614       assert(!found, "sanity");
1615       if (trace) tty->print_cr(" Move CFG ctrl_succ %i to barrier_ctrl", ctrl_succ->_idx);
1616       igvn.replace_input_of(ctrl_succ, k, barrier_ctrl);
1617       found = true;
1618       k--;
1619     }
1620   }
1621 
1622   // For all successors of ctrl - move all visited to become successors of barrier_ctrl instead
1623   for (DUIterator_Fast imax, r = ctrl->fast_outs(imax); r < imax; r++) {
1624     Node* tmp = ctrl->fast_out(r);
1625     if (tmp->is_SafePoint() || (visited2.test(tmp->_idx) && (tmp != load))) {
1626       if (trace) tty->print_cr(" Move ctrl_succ %i to barrier_ctrl", tmp->_idx);
1627       igvn.replace_input_of(tmp, 0, barrier_ctrl);
1628       --r; --imax;
1629     }
1630   }
1631 
1632   // Move the loads user to the barrier
1633   for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) {
1634     Node* u = load->fast_out(i);
1635     if (u->isa_LoadBarrier()) {
1636       continue;
1637     }
1638 
1639     // find correct input  - replace with iterator?
1640     for(uint j = 0; j < u->req(); j++) {
1641       if (u->in(j) == load) {
1642         igvn.replace_input_of(u, j, barrier_val);
1643         --i; --imax; // Adjust the iterator of the *outer* loop
1644         break; // some nodes (calls) might have several uses from the same node
1645       }
1646     }
1647   }
1648 
1649   // Connect barrier to load and control
1650   barrier->set_req(LoadBarrierNode::Oop, load);
1651   barrier->set_req(LoadBarrierNode::Control, ctrl);
1652 
1653   igvn.replace_input_of(load, MemNode::Control, ctrl);
1654   load->pin();
1655 
1656   igvn.rehash_node_delayed(load);
1657   igvn.register_new_node_with_optimizer(barrier);
1658   igvn.register_new_node_with_optimizer(barrier_val);
1659   igvn.register_new_node_with_optimizer(barrier_ctrl);
1660   load_set_expanded_barrier(load);
1661 
1662   C->print_method(PHASE_INSERT_BARRIER, 3, load->_idx);
1663 }
1664 
1665 // The bad_mask in the ThreadLocalData shouldn't have an anti-dep-check.
1666 // The bad_mask address if of type TypeRawPtr, but that will alias
1667 // InitializeNodes until the type system is expanded.
1668 bool ZBarrierSetC2::needs_anti_dependence_check(const Node* node) const {
1669   MachNode* mnode = node->as_Mach();
1670   if (mnode != NULL) {
1671     intptr_t offset = 0;
1672     const TypePtr *adr_type2 = NULL;
1673     const Node* base = mnode->get_base_and_disp(offset, adr_type2);
1674     if ((base != NULL) &&
1675         (base->is_Mach() && base->as_Mach()->ideal_Opcode() == Op_ThreadLocal) &&
1676         (offset == in_bytes(ZThreadLocalData::address_bad_mask_offset()))) {
1677       return false;
1678     }
1679   }
1680   return true;
1681 }