1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/c2/barrierSetC2.hpp"
  27 #include "opto/arraycopynode.hpp"
  28 #include "opto/graphKit.hpp"
  29 #include "opto/idealKit.hpp"
  30 #include "opto/narrowptrnode.hpp"
  31 #include "utilities/macros.hpp"
  32 
  33 // By default this is a no-op.
  34 void BarrierSetC2::resolve_address(C2Access& access) const { }
  35 
  36 void* C2Access::barrier_set_state() const {
  37   return _kit->barrier_set_state();
  38 }
  39 
  40 bool C2Access::needs_cpu_membar() const {
  41   bool mismatched = (_decorators & C2_MISMATCHED) != 0;
  42   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
  43   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
  44   bool in_heap = (_decorators & IN_HEAP) != 0;
  45 
  46   bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
  47   bool is_read = (_decorators & C2_READ_ACCESS) != 0;
  48   bool is_atomic = is_read && is_write;
  49 
  50   if (is_atomic) {
  51     // Atomics always need to be wrapped in CPU membars
  52     return true;
  53   }
  54 
  55   if (anonymous) {
  56     // We will need memory barriers unless we can determine a unique
  57     // alias category for this reference.  (Note:  If for some reason
  58     // the barriers get omitted and the unsafe reference begins to "pollute"
  59     // the alias analysis of the rest of the graph, either Compile::can_alias
  60     // or Compile::must_alias will throw a diagnostic assert.)
  61     if (!in_heap || !is_unordered || (mismatched && !_addr.type()->isa_aryptr())) {
  62       return true;
  63     }
  64   }
  65 
  66   return false;
  67 }
  68 
  69 Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
  70   DecoratorSet decorators = access.decorators();
  71   GraphKit* kit = access.kit();
  72 
  73   bool mismatched = (decorators & C2_MISMATCHED) != 0;
  74   bool unaligned = (decorators & C2_UNALIGNED) != 0;
  75   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
  76 
  77   bool in_native = (decorators & IN_NATIVE) != 0;
  78   assert(!in_native, "not supported yet");
  79 
  80   if (access.type() == T_DOUBLE) {
  81     Node* new_val = kit->dstore_rounding(val.node());
  82     val.set_node(new_val);
  83   }
  84 
  85   MemNode::MemOrd mo = access.mem_node_mo();
  86 
  87   Node* store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(),
  88                                      access.addr().type(), mo, requires_atomic_access, unaligned, mismatched);
  89   access.set_raw_access(store);
  90   return store;
  91 }
  92 
  93 Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
  94   DecoratorSet decorators = access.decorators();
  95   GraphKit* kit = access.kit();
  96 
  97   Node* adr = access.addr().node();
  98   const TypePtr* adr_type = access.addr().type();
  99 
 100   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 101   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
 102   bool unaligned = (decorators & C2_UNALIGNED) != 0;
 103   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
 104   bool pinned = (decorators & C2_PINNED_LOAD) != 0;
 105 
 106   bool in_native = (decorators & IN_NATIVE) != 0;
 107 
 108   MemNode::MemOrd mo = access.mem_node_mo();
 109   LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest;
 110   Node* control = control_dependent ? kit->control() : NULL;
 111 
 112   Node* load;
 113   if (in_native) {
 114     load = kit->make_load(control, adr, val_type, access.type(), mo);
 115   } else {
 116     load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
 117                           dep, requires_atomic_access, unaligned, mismatched);
 118   }
 119   access.set_raw_access(load);
 120 
 121   return load;
 122 }
 123 
 124 class C2AccessFence: public StackObj {
 125   C2Access& _access;
 126   Node* _leading_membar;
 127 
 128 public:
 129   C2AccessFence(C2Access& access) :
 130     _access(access), _leading_membar(NULL) {
 131     GraphKit* kit = access.kit();
 132     DecoratorSet decorators = access.decorators();
 133 
 134     bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
 135     bool is_read = (decorators & C2_READ_ACCESS) != 0;
 136     bool is_atomic = is_read && is_write;
 137 
 138     bool is_volatile = (decorators & MO_SEQ_CST) != 0;
 139     bool is_release = (decorators & MO_RELEASE) != 0;
 140 
 141     if (is_atomic) {
 142       // Memory-model-wise, a LoadStore acts like a little synchronized
 143       // block, so needs barriers on each side.  These don't translate
 144       // into actual barriers on most machines, but we still need rest of
 145       // compiler to respect ordering.
 146       if (is_release) {
 147         _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
 148       } else if (is_volatile) {
 149         if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 150           _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
 151         } else {
 152           _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
 153         }
 154       }
 155     } else if (is_write) {
 156       // If reference is volatile, prevent following memory ops from
 157       // floating down past the volatile write.  Also prevents commoning
 158       // another volatile read.
 159       if (is_volatile || is_release) {
 160         _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
 161       }
 162     } else {
 163       // Memory barrier to prevent normal and 'unsafe' accesses from
 164       // bypassing each other.  Happens after null checks, so the
 165       // exception paths do not take memory state from the memory barrier,
 166       // so there's no problems making a strong assert about mixing users
 167       // of safe & unsafe memory.
 168       if (is_volatile && support_IRIW_for_not_multiple_copy_atomic_cpu) {
 169         _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
 170       }
 171     }
 172 
 173     if (access.needs_cpu_membar()) {
 174       kit->insert_mem_bar(Op_MemBarCPUOrder);
 175     }
 176 
 177     if (is_atomic) {
 178       // 4984716: MemBars must be inserted before this
 179       //          memory node in order to avoid a false
 180       //          dependency which will confuse the scheduler.
 181       access.set_memory();
 182     }
 183   }
 184 
 185   ~C2AccessFence() {
 186     GraphKit* kit = _access.kit();
 187     DecoratorSet decorators = _access.decorators();
 188 
 189     bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
 190     bool is_read = (decorators & C2_READ_ACCESS) != 0;
 191     bool is_atomic = is_read && is_write;
 192 
 193     bool is_volatile = (decorators & MO_SEQ_CST) != 0;
 194     bool is_acquire = (decorators & MO_ACQUIRE) != 0;
 195 
 196     // If reference is volatile, prevent following volatiles ops from
 197     // floating up before the volatile access.
 198     if (_access.needs_cpu_membar()) {
 199       kit->insert_mem_bar(Op_MemBarCPUOrder);
 200     }
 201 
 202     if (is_atomic) {
 203       if (is_acquire || is_volatile) {
 204         Node* n = _access.raw_access();
 205         Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
 206         if (_leading_membar != NULL) {
 207           MemBarNode::set_load_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar());
 208         }
 209       }
 210     } else if (is_write) {
 211       // If not multiple copy atomic, we do the MemBarVolatile before the load.
 212       if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
 213         Node* n = _access.raw_access();
 214         Node* mb = kit->insert_mem_bar(Op_MemBarVolatile, n); // Use fat membar
 215         if (_leading_membar != NULL) {
 216           MemBarNode::set_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar());
 217         }
 218       }
 219     } else {
 220       if (is_volatile || is_acquire) {
 221         Node* n = _access.raw_access();
 222         assert(_leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected");
 223         Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
 224         mb->as_MemBar()->set_trailing_load();
 225       }
 226     }
 227   }
 228 };
 229 
 230 Node* BarrierSetC2::store_at(C2Access& access, C2AccessValue& val) const {
 231   C2AccessFence fence(access);
 232   resolve_address(access);
 233   return store_at_resolved(access, val);
 234 }
 235 
 236 Node* BarrierSetC2::load_at(C2Access& access, const Type* val_type) const {
 237   C2AccessFence fence(access);
 238   resolve_address(access);
 239   return load_at_resolved(access, val_type);
 240 }
 241 
 242 MemNode::MemOrd C2Access::mem_node_mo() const {
 243   bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
 244   bool is_read = (_decorators & C2_READ_ACCESS) != 0;
 245   if ((_decorators & MO_SEQ_CST) != 0) {
 246     if (is_write && is_read) {
 247       // For atomic operations
 248       return MemNode::seqcst;
 249     } else if (is_write) {
 250       return MemNode::release;
 251     } else {
 252       assert(is_read, "what else?");
 253       return MemNode::acquire;
 254     }
 255   } else if ((_decorators & MO_RELEASE) != 0) {
 256     return MemNode::release;
 257   } else if ((_decorators & MO_ACQUIRE) != 0) {
 258     return MemNode::acquire;
 259   } else if (is_write) {
 260     // Volatile fields need releasing stores.
 261     // Non-volatile fields also need releasing stores if they hold an
 262     // object reference, because the object reference might point to
 263     // a freshly created object.
 264     // Conservatively release stores of object references.
 265     return StoreNode::release_if_reference(_type);
 266   } else {
 267     return MemNode::unordered;
 268   }
 269 }
 270 
 271 void C2Access::fixup_decorators() {
 272   bool default_mo = (_decorators & MO_DECORATOR_MASK) == 0;
 273   bool is_unordered = (_decorators & MO_UNORDERED) != 0 || default_mo;
 274   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
 275 
 276   bool is_read = (_decorators & C2_READ_ACCESS) != 0;
 277   bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
 278 
 279   if (AlwaysAtomicAccesses && is_unordered) {
 280     _decorators &= ~MO_DECORATOR_MASK; // clear the MO bits
 281     _decorators |= MO_RELAXED; // Force the MO_RELAXED decorator with AlwaysAtomicAccess
 282   }
 283 
 284   _decorators = AccessInternal::decorator_fixup(_decorators);
 285 
 286   if (is_read && !is_write && anonymous) {
 287     // To be valid, unsafe loads may depend on other conditions than
 288     // the one that guards them: pin the Load node
 289     _decorators |= C2_CONTROL_DEPENDENT_LOAD;
 290     _decorators |= C2_PINNED_LOAD;
 291     const TypePtr* adr_type = _addr.type();
 292     Node* adr = _addr.node();
 293     if (!needs_cpu_membar() && adr_type->isa_instptr()) {
 294       assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
 295       intptr_t offset = Type::OffsetBot;
 296       AddPNode::Ideal_base_and_offset(adr, &_kit->gvn(), offset);
 297       if (offset >= 0) {
 298         int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper());
 299         if (offset < s) {
 300           // Guaranteed to be a valid access, no need to pin it
 301           _decorators ^= C2_CONTROL_DEPENDENT_LOAD;
 302           _decorators ^= C2_PINNED_LOAD;
 303         }
 304       }
 305     }
 306   }
 307 }
 308 
 309 //--------------------------- atomic operations---------------------------------
 310 
 311 static void pin_atomic_op(C2AtomicAccess& access) {
 312   if (!access.needs_pinning()) {
 313     return;
 314   }
 315   // SCMemProjNodes represent the memory state of a LoadStore. Their
 316   // main role is to prevent LoadStore nodes from being optimized away
 317   // when their results aren't used.
 318   GraphKit* kit = access.kit();
 319   Node* load_store = access.raw_access();
 320   assert(load_store != NULL, "must pin atomic op");
 321   Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
 322   kit->set_memory(proj, access.alias_idx());
 323 }
 324 
 325 void C2AtomicAccess::set_memory() {
 326   Node *mem = _kit->memory(_alias_idx);
 327   _memory = mem;
 328 }
 329 
 330 Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
 331                                                    Node* new_val, const Type* value_type) const {
 332   GraphKit* kit = access.kit();
 333   MemNode::MemOrd mo = access.mem_node_mo();
 334   Node* mem = access.memory();
 335 
 336   Node* adr = access.addr().node();
 337   const TypePtr* adr_type = access.addr().type();
 338 
 339   Node* load_store = NULL;
 340 
 341   if (access.is_oop()) {
 342 #ifdef _LP64
 343     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 344       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 345       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 346       load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
 347     } else
 348 #endif
 349     {
 350       load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
 351     }
 352   } else {
 353     switch (access.type()) {
 354       case T_BYTE: {
 355         load_store = kit->gvn().transform(new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
 356         break;
 357       }
 358       case T_SHORT: {
 359         load_store = kit->gvn().transform(new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
 360         break;
 361       }
 362       case T_INT: {
 363         load_store = kit->gvn().transform(new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
 364         break;
 365       }
 366       case T_LONG: {
 367         load_store = kit->gvn().transform(new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
 368         break;
 369       }
 370       default:
 371         ShouldNotReachHere();
 372     }
 373   }
 374 
 375   access.set_raw_access(load_store);
 376   pin_atomic_op(access);
 377 
 378 #ifdef _LP64
 379   if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) {
 380     return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
 381   }
 382 #endif
 383 
 384   return load_store;
 385 }
 386 
 387 Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
 388                                                     Node* new_val, const Type* value_type) const {
 389   GraphKit* kit = access.kit();
 390   DecoratorSet decorators = access.decorators();
 391   MemNode::MemOrd mo = access.mem_node_mo();
 392   Node* mem = access.memory();
 393   bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
 394   Node* load_store = NULL;
 395   Node* adr = access.addr().node();
 396 
 397   if (access.is_oop()) {
 398 #ifdef _LP64
 399     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 400       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 401       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 402       if (is_weak_cas) {
 403         load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 404       } else {
 405         load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 406       }
 407     } else
 408 #endif
 409     {
 410       if (is_weak_cas) {
 411         load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 412       } else {
 413         load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 414       }
 415     }
 416   } else {
 417     switch(access.type()) {
 418       case T_BYTE: {
 419         if (is_weak_cas) {
 420           load_store = kit->gvn().transform(new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo));
 421         } else {
 422           load_store = kit->gvn().transform(new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo));
 423         }
 424         break;
 425       }
 426       case T_SHORT: {
 427         if (is_weak_cas) {
 428           load_store = kit->gvn().transform(new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo));
 429         } else {
 430           load_store = kit->gvn().transform(new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo));
 431         }
 432         break;
 433       }
 434       case T_INT: {
 435         if (is_weak_cas) {
 436           load_store = kit->gvn().transform(new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
 437         } else {
 438           load_store = kit->gvn().transform(new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
 439         }
 440         break;
 441       }
 442       case T_LONG: {
 443         if (is_weak_cas) {
 444           load_store = kit->gvn().transform(new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
 445         } else {
 446           load_store = kit->gvn().transform(new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
 447         }
 448         break;
 449       }
 450       default:
 451         ShouldNotReachHere();
 452     }
 453   }
 454 
 455   access.set_raw_access(load_store);
 456   pin_atomic_op(access);
 457 
 458   return load_store;
 459 }
 460 
 461 Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
 462   GraphKit* kit = access.kit();
 463   Node* mem = access.memory();
 464   Node* adr = access.addr().node();
 465   const TypePtr* adr_type = access.addr().type();
 466   Node* load_store = NULL;
 467 
 468   if (access.is_oop()) {
 469 #ifdef _LP64
 470     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 471       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 472       load_store = kit->gvn().transform(new GetAndSetNNode(kit->control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop()));
 473     } else
 474 #endif
 475     {
 476       load_store = kit->gvn().transform(new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr()));
 477     }
 478   } else  {
 479     switch (access.type()) {
 480       case T_BYTE:
 481         load_store = kit->gvn().transform(new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type));
 482         break;
 483       case T_SHORT:
 484         load_store = kit->gvn().transform(new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type));
 485         break;
 486       case T_INT:
 487         load_store = kit->gvn().transform(new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type));
 488         break;
 489       case T_LONG:
 490         load_store = kit->gvn().transform(new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type));
 491         break;
 492       default:
 493         ShouldNotReachHere();
 494     }
 495   }
 496 
 497   access.set_raw_access(load_store);
 498   pin_atomic_op(access);
 499 
 500 #ifdef _LP64
 501   if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) {
 502     return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
 503   }
 504 #endif
 505 
 506   return load_store;
 507 }
 508 
 509 Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
 510   Node* load_store = NULL;
 511   GraphKit* kit = access.kit();
 512   Node* adr = access.addr().node();
 513   const TypePtr* adr_type = access.addr().type();
 514   Node* mem = access.memory();
 515 
 516   switch(access.type()) {
 517     case T_BYTE:
 518       load_store = kit->gvn().transform(new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type));
 519       break;
 520     case T_SHORT:
 521       load_store = kit->gvn().transform(new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type));
 522       break;
 523     case T_INT:
 524       load_store = kit->gvn().transform(new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type));
 525       break;
 526     case T_LONG:
 527       load_store = kit->gvn().transform(new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type));
 528       break;
 529     default:
 530       ShouldNotReachHere();
 531   }
 532 
 533   access.set_raw_access(load_store);
 534   pin_atomic_op(access);
 535 
 536   return load_store;
 537 }
 538 
 539 Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicAccess& access, Node* expected_val,
 540                                           Node* new_val, const Type* value_type) const {
 541   C2AccessFence fence(access);
 542   resolve_address(access);
 543   return atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
 544 }
 545 
 546 Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicAccess& access, Node* expected_val,
 547                                            Node* new_val, const Type* value_type) const {
 548   C2AccessFence fence(access);
 549   resolve_address(access);
 550   return atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
 551 }
 552 
 553 Node* BarrierSetC2::atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
 554   C2AccessFence fence(access);
 555   resolve_address(access);
 556   return atomic_xchg_at_resolved(access, new_val, value_type);
 557 }
 558 
 559 Node* BarrierSetC2::atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
 560   C2AccessFence fence(access);
 561   resolve_address(access);
 562   return atomic_add_at_resolved(access, new_val, value_type);
 563 }
 564 
 565 void BarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
 566   // Exclude the header but include array length to copy by 8 bytes words.
 567   // Can't use base_offset_in_bytes(bt) since basic type is unknown.
 568   int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
 569                             instanceOopDesc::base_offset_in_bytes();
 570   // base_off:
 571   // 8  - 32-bit VM
 572   // 12 - 64-bit VM, compressed klass
 573   // 16 - 64-bit VM, normal klass
 574   if (base_off % BytesPerLong != 0) {
 575     assert(UseCompressedClassPointers, "");
 576     if (is_array) {
 577       // Exclude length to copy by 8 bytes words.
 578       base_off += sizeof(int);
 579     } else {
 580       // Include klass to copy by 8 bytes words.
 581       base_off = instanceOopDesc::klass_offset_in_bytes();
 582     }
 583     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
 584   }
 585   Node* src_base  = kit->basic_plus_adr(src,  base_off);
 586   Node* dst_base = kit->basic_plus_adr(dst, base_off);
 587 
 588   // Compute the length also, if needed:
 589   Node* countx = size;
 590   countx = kit->gvn().transform(new SubXNode(countx, kit->MakeConX(base_off)));
 591   countx = kit->gvn().transform(new URShiftXNode(countx, kit->intcon(LogBytesPerLong) ));
 592 
 593   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
 594 
 595   ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, false, false);
 596   ac->set_clonebasic();
 597   Node* n = kit->gvn().transform(ac);
 598   if (n == ac) {
 599     kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
 600   } else {
 601     kit->set_all_memory(n);
 602   }
 603 }