1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "opto/arraycopynode.hpp"
  27 #include "opto/graphKit.hpp"
  28 #include "opto/idealKit.hpp"
  29 #include "opto/narrowptrnode.hpp"
  30 #include "gc/shared/c2_BarrierSetCodeGen.hpp"
  31 #include "utilities/macros.hpp"
  32 
  33 Node* C2BarrierSetCodeGen::store_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, Node* val, const Type* val_type, BasicType bt, C2DecoratorSet decorators) {
  34   bool mismatched = (decorators & C2_MISMATCHED) != 0;
  35   bool is_vol = (decorators & C2_MO_VOLATILE) != 0;
  36   bool is_release = (decorators & C2_MO_RELEASE) != 0;
  37   bool unaligned = (decorators & C2_ACCESS_UNALIGNED) != 0;
  38   bool requires_atomic_access = (decorators & C2_ACCESS_ATOMIC) != 0;
  39 
  40   if (bt == T_DOUBLE) {
  41     val = kit->dstore_rounding(val);
  42   }
  43 
  44   MemNode::MemOrd mo;
  45   if (is_vol || is_release) {
  46     mo = MemNode::release;
  47   } else {
  48     // Volatile fields need releasing stores.
  49     // Non-volatile fields also need releasing stores if they hold an
  50     // object reference, because the object reference might point to
  51     // a freshly created object.
  52     // Conservatively release stores of object references.
  53     mo = StoreNode::release_if_reference(bt);
  54   }
  55 
  56   return kit->store_to_memory(kit->control(), adr, val, bt, adr_type, mo, requires_atomic_access, unaligned, mismatched);
  57 }
  58 
  59 Node* C2BarrierSetCodeGen::load_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, const Type* val_type, BasicType bt, C2DecoratorSet decorators) {
  60   bool mismatched = (decorators & C2_MISMATCHED) != 0;
  61   bool is_vol = (decorators & C2_MO_VOLATILE) != 0;
  62   bool is_acquire = (decorators & C2_MO_RELEASE) != 0;
  63   bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0;
  64   bool requires_atomic_access = (decorators & C2_ACCESS_ATOMIC) != 0;
  65   bool unaligned = (decorators & C2_ACCESS_UNALIGNED) != 0;
  66   bool no_control = (decorators & C2_ACCESS_FREE_CONTROL) != 0;
  67 
  68   MemNode::MemOrd mo;
  69   if (is_vol || is_acquire) {
  70     mo = MemNode::acquire;
  71   } else {
  72     mo = MemNode::unordered;
  73   }
  74 
  75   LoadNode::ControlDependency dep = LoadNode::DependsOnlyOnTest;
  76   if (anonymous) {
  77     // To be valid, unsafe loads may depend on other conditions than
  78     // the one that guards them: pin the Load node
  79     Node* ctrl = kit->control();
  80     if (adr_type->isa_instptr()) {
  81       assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
  82       intptr_t offset = Type::OffsetBot;
  83       AddPNode::Ideal_base_and_offset(adr, &kit->gvn(), offset);
  84       if (offset >= 0) {
  85         int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper());
  86         if (offset < s) {
  87           // Guaranteed to be a valid access, no need to pin it
  88           no_control = true;
  89         } else {
  90           dep = LoadNode::Pinned;
  91         }
  92       }
  93     }
  94   }
  95 
  96   // To be valid, unsafe loads may depend on other conditions than
  97   // the one that guards them: pin the Load node
  98   return kit->make_load(!no_control ? kit->control() : NULL, adr, val_type, bt, adr_type, mo,
  99                         dep, requires_atomic_access, unaligned, mismatched);
 100 }
 101 
 102 Node* C2BarrierSetCodeGen::store_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, Node* val, const Type* val_type, BasicType bt, C2DecoratorSet decorators) {
 103   decorators = fixup_decorators(decorators);
 104   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 105   bool is_vol = (decorators & C2_MO_VOLATILE) != 0;
 106   bool is_release = (decorators & C2_MO_RELEASE) != 0;
 107   bool is_relaxed = (decorators & C2_MO_UNORDERED) != 0;
 108   bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0;
 109   bool on_heap = (decorators & C2_ACCESS_ON_HEAP) != 0;
 110 
 111   // We will need memory barriers unless we can determine a unique
 112   // alias category for this reference.  (Note:  If for some reason
 113   // the barriers get omitted and the unsafe reference begins to "pollute"
 114   // the alias analysis of the rest of the graph, either Compile::can_alias
 115   // or Compile::must_alias will throw a diagnostic assert.)
 116   bool need_cpu_mem_bar = anonymous && (!is_relaxed || mismatched || !on_heap);
 117 
 118   if (need_cpu_mem_bar) {
 119     kit->insert_mem_bar(Op_MemBarCPUOrder);
 120   }
 121   // If reference is volatile, prevent following memory ops from
 122   // floating down past the volatile write.  Also prevents commoning
 123   // another volatile read.
 124   if (is_vol || is_release) {
 125     kit->insert_mem_bar(Op_MemBarRelease);
 126   }
 127 
 128   Node* store = store_at_resolved(kit, obj, adr, adr_type, val, val_type, bt, decorators);
 129 
 130   // If reference is volatile, prevent following volatiles ops from
 131   // floating up before the volatile write.
 132     // If not multiple copy atomic, we do the MemBarVolatile before the load.
 133   if (is_vol && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
 134     kit->insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 135   }
 136   if (need_cpu_mem_bar) {
 137     kit->insert_mem_bar(Op_MemBarCPUOrder);
 138   }
 139 
 140   return store;
 141 }
 142 
 143 Node* C2BarrierSetCodeGen::load_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, const Type* val_type, BasicType bt, C2DecoratorSet decorators) {
 144   decorators = fixup_decorators(decorators);
 145   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 146   bool is_vol = (decorators & C2_MO_VOLATILE) != 0;
 147   bool is_acquire = (decorators & C2_MO_ACQUIRE) != 0;
 148   bool is_relaxed = (decorators & C2_MO_UNORDERED) != 0;
 149   bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0;
 150   bool on_heap = (decorators & C2_ACCESS_ON_HEAP) != 0;
 151 
 152   bool need_cpu_mem_bar = anonymous && (!is_relaxed || mismatched || !on_heap);
 153 
 154   // Memory barrier to prevent normal and 'unsafe' accesses from
 155   // bypassing each other.  Happens after null checks, so the
 156   // exception paths do not take memory state from the memory barrier,
 157   // so there's no problems making a strong assert about mixing users
 158   // of safe & unsafe memory.
 159   if (need_cpu_mem_bar) {
 160     kit->insert_mem_bar(Op_MemBarCPUOrder);
 161   }
 162   if (is_vol && support_IRIW_for_not_multiple_copy_atomic_cpu) {
 163     kit->insert_mem_bar(Op_MemBarVolatile);
 164   }
 165 
 166   Node* p = load_at_resolved(kit, obj, adr, adr_type, val_type, bt, decorators);
 167 
 168   if (is_vol || is_acquire) {
 169     kit->insert_mem_bar(Op_MemBarAcquire, p);
 170   }
 171   if (need_cpu_mem_bar) {
 172     kit->insert_mem_bar(Op_MemBarCPUOrder);
 173   }
 174 
 175   return p;
 176 }
 177 
 178 //--------------------------- atomic operations---------------------------------
 179 
 180 static MemNode::MemOrd atomic_op_mo_from_decorators(C2DecoratorSet decorators) {
 181   if ((decorators & C2_MO_RELEASE) != 0) {
 182     return MemNode::release;
 183   } else if ((decorators & C2_MO_ACQUIRE) != 0) {
 184     return MemNode::acquire;
 185   } else if ((decorators & C2_MO_VOLATILE) != 0) {
 186     return MemNode::seqcst;
 187   } else {
 188     return MemNode::unordered;
 189   }
 190 }
 191 
 192 static void pin_atomic_op(GraphKit* kit, Node* load_store, int alias_idx) {
 193   // SCMemProjNodes represent the memory state of a LoadStore. Their
 194   // main role is to prevent LoadStore nodes from being optimized away
 195   // when their results aren't used.
 196   Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
 197   kit->set_memory(proj, alias_idx);
 198 }
 199 
 200 static Node* atomic_op_membar_prologue(GraphKit* kit, C2DecoratorSet decorators, int alias_idx) {
 201   bool is_release = (decorators & C2_MO_RELEASE) != 0;
 202   bool is_volatile = (decorators & C2_MO_VOLATILE) != 0;
 203   // Memory-model-wise, a LoadStore acts like a little synchronized
 204   // block, so needs barriers on each side.  These don't translate
 205   // into actual barriers on most machines, but we still need rest of
 206   // compiler to respect ordering.
 207   if (is_release) {
 208     kit->insert_mem_bar(Op_MemBarRelease);
 209   } else if (is_volatile) {
 210     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 211       kit->insert_mem_bar(Op_MemBarVolatile);
 212     } else {
 213       kit->insert_mem_bar(Op_MemBarRelease);
 214     }
 215   }
 216   kit->insert_mem_bar(Op_MemBarCPUOrder);
 217 
 218   // 4984716: MemBars must be inserted before this
 219   //          memory node in order to avoid a false
 220   //          dependency which will confuse the scheduler.
 221   Node *mem = kit->memory(alias_idx);
 222   return mem;
 223 }
 224 
 225 static void atomic_op_membar_epilogue(GraphKit* kit, C2DecoratorSet decorators) {
 226   bool is_acquire = (decorators & C2_MO_ACQUIRE) != 0;
 227   bool is_volatile = (decorators & C2_MO_VOLATILE) != 0;
 228   // Add the trailing membar surrounding the access
 229   kit->insert_mem_bar(Op_MemBarCPUOrder);
 230   if (is_acquire || is_volatile) {
 231     kit->insert_mem_bar(Op_MemBarAcquire);
 232   }
 233 }
 234 
 235 Node* C2BarrierSetCodeGen::cas_val_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 236                                                Node* expected_val, Node* new_val, const Type* value_type,
 237                                                Node* mem, Node*& load_store, BasicType bt, C2DecoratorSet decorators) {
 238   bool is_obj = bt == T_OBJECT || bt == T_ARRAY;
 239   MemNode::MemOrd mo = atomic_op_mo_from_decorators(decorators);
 240 
 241   if (is_obj) {
 242 #ifdef _LP64
 243     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 244       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 245       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 246       load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
 247     } else
 248 #endif
 249       {
 250         load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
 251 
 252       }
 253   } else {
 254     switch (bt) {
 255       case T_BYTE: {
 256         load_store = kit->gvn().transform(new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
 257         break;
 258       }
 259       case T_SHORT: {
 260         load_store = kit->gvn().transform(new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
 261         break;
 262       }
 263       case T_INT: {
 264         load_store = kit->gvn().transform(new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
 265         break;
 266       }
 267       case T_LONG: {
 268         load_store = kit->gvn().transform(new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
 269         break;
 270       }
 271       default:
 272         ShouldNotReachHere();
 273     }
 274   }
 275 
 276   pin_atomic_op(kit, load_store, alias_idx);
 277 
 278   Node* result = load_store;
 279 #ifdef _LP64
 280   if (is_obj && adr->bottom_type()->is_ptr_to_narrowoop()) {
 281     result = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
 282   }
 283 #endif
 284 
 285   return result;
 286 }
 287 
 288 Node* C2BarrierSetCodeGen::cas_bool_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 289                                                 Node* expected_val, Node* new_val, const Type* value_type,
 290                                                 Node* mem, BasicType bt, C2DecoratorSet decorators) {
 291   bool is_obj = bt == T_OBJECT || bt == T_ARRAY;
 292   bool is_weak_cas = (decorators & C2_WEAK_CAS) != 0;
 293 
 294   MemNode::MemOrd mo = atomic_op_mo_from_decorators(decorators);
 295 
 296   Node* load_store = NULL;
 297   if (is_obj) {
 298 #ifdef _LP64
 299     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 300       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 301       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 302       if (is_weak_cas) {
 303         load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 304       } else {
 305         load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 306       }
 307     } else
 308 #endif
 309     {
 310       if (is_weak_cas) {
 311         load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 312       } else {
 313         load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 314       }
 315     }
 316   } else {
 317     switch(bt) {
 318       case T_BYTE: {
 319         if (is_weak_cas) {
 320           load_store = kit->gvn().transform(new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo));
 321         } else {
 322           load_store = kit->gvn().transform(new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo));
 323         }
 324         break;
 325       }
 326       case T_SHORT: {
 327         if (is_weak_cas) {
 328           load_store = kit->gvn().transform(new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo));
 329         } else {
 330           load_store = kit->gvn().transform(new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo));
 331         }
 332         break;
 333       }
 334       case T_INT: {
 335         if (is_weak_cas) {
 336           load_store = kit->gvn().transform(new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
 337         } else {
 338           load_store = kit->gvn().transform(new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
 339         }
 340         break;
 341       }
 342       case T_LONG: {
 343         if (is_weak_cas) {
 344           load_store = kit->gvn().transform(new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
 345         } else {
 346           load_store = kit->gvn().transform(new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
 347         }
 348         break;
 349       }
 350       default:
 351         ShouldNotReachHere();
 352     }
 353   }
 354 
 355   pin_atomic_op(kit, load_store, alias_idx);
 356 
 357   return load_store;
 358 }
 359 
 360 Node* C2BarrierSetCodeGen::swap_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 361                                             Node* new_val, const Type* value_type,
 362                                             Node* mem, Node*& load_store, BasicType bt, C2DecoratorSet decorators) {
 363   bool is_obj = bt == T_OBJECT || bt == T_ARRAY;
 364   if (is_obj) {
 365 #ifdef _LP64
 366     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 367       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 368       load_store = kit->gvn().transform(new GetAndSetNNode(kit->control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop()));
 369     } else
 370 #endif
 371     {
 372       load_store = kit->gvn().transform(new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr()));
 373     }
 374   } else  {
 375     switch (bt) {
 376       case T_BYTE:
 377         load_store = kit->gvn().transform(new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type));
 378         break;
 379       case T_SHORT:
 380         load_store = kit->gvn().transform(new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type));
 381         break;
 382       case T_INT:
 383         load_store = kit->gvn().transform(new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type));
 384         break;
 385       case T_LONG:
 386         load_store = kit->gvn().transform(new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type));
 387         break;
 388       default:
 389         ShouldNotReachHere();
 390     }
 391   }
 392 
 393   pin_atomic_op(kit, load_store, alias_idx);
 394 
 395   Node* result = load_store;
 396 #ifdef _LP64
 397   if (is_obj && adr->bottom_type()->is_ptr_to_narrowoop()) {
 398     result = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
 399   }
 400 #endif
 401 
 402   return result;
 403 }
 404 
 405 Node* C2BarrierSetCodeGen::fetch_and_add_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 406                                                      Node* new_val, const Type* value_type,
 407                                                      Node* mem, BasicType bt, C2DecoratorSet decorators) {
 408   Node* load_store = NULL;
 409   switch(bt) {
 410     case T_BYTE:
 411       load_store = kit->gvn().transform(new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type));
 412       break;
 413     case T_SHORT:
 414       load_store = kit->gvn().transform(new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type));
 415       break;
 416     case T_INT:
 417       load_store = kit->gvn().transform(new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type));
 418       break;
 419     case T_LONG:
 420       load_store = kit->gvn().transform(new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type));
 421       break;
 422     default:
 423       ShouldNotReachHere();
 424   }
 425 
 426   pin_atomic_op(kit, load_store, alias_idx);
 427 
 428   return load_store;
 429 }
 430 
 431 Node* C2BarrierSetCodeGen::cas_val_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 432                                       Node* expected_val, Node* new_val, const Type* value_type,
 433                                       BasicType bt, C2DecoratorSet decorators) {
 434   decorators = fixup_decorators(decorators);
 435   Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx);
 436   Node* load_store = NULL;
 437   Node* result = cas_val_at_resolved(kit, obj, adr, adr_type, alias_idx, expected_val, new_val, value_type, mem, load_store, bt, decorators);
 438   atomic_op_membar_epilogue(kit, decorators);
 439   return result;
 440 }
 441 
 442 Node* C2BarrierSetCodeGen::cas_bool_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 443                                        Node* expected_val, Node* new_val, const Type* value_type,
 444                                        BasicType bt, C2DecoratorSet decorators) {
 445   decorators = fixup_decorators(decorators);
 446   Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx);
 447   Node* result = cas_bool_at_resolved(kit, obj, adr, adr_type, alias_idx, expected_val, new_val, value_type, mem, bt, decorators);
 448   atomic_op_membar_epilogue(kit, decorators);
 449   return result;
 450 }
 451 
 452 Node* C2BarrierSetCodeGen::swap_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 453                                    Node* new_val, const Type* value_type,
 454                                    BasicType bt, C2DecoratorSet decorators) {
 455   decorators = fixup_decorators(decorators);
 456   Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx);
 457   Node* load_store = NULL;
 458   Node* result = swap_at_resolved(kit, obj, adr, adr_type, alias_idx, new_val, value_type, mem, load_store, bt, decorators);
 459   atomic_op_membar_epilogue(kit, decorators);
 460   return result;
 461 }
 462 
 463 Node* C2BarrierSetCodeGen::fetch_and_add_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 464                                             Node* new_val, const Type* value_type,
 465                                             BasicType bt, C2DecoratorSet decorators) {
 466   decorators = fixup_decorators(decorators);
 467   Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx);
 468   Node* result = fetch_and_add_at_resolved(kit, obj, adr, adr_type, alias_idx, new_val, value_type, mem, bt, decorators);
 469   atomic_op_membar_epilogue(kit, decorators);
 470   return result;
 471 }
 472 
 473 void C2BarrierSetCodeGen::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) {
 474   // Exclude the header but include array length to copy by 8 bytes words.
 475   // Can't use base_offset_in_bytes(bt) since basic type is unknown.
 476   int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
 477                             instanceOopDesc::base_offset_in_bytes();
 478   // base_off:
 479   // 8  - 32-bit VM
 480   // 12 - 64-bit VM, compressed klass
 481   // 16 - 64-bit VM, normal klass
 482   if (base_off % BytesPerLong != 0) {
 483     assert(UseCompressedClassPointers, "");
 484     if (is_array) {
 485       // Exclude length to copy by 8 bytes words.
 486       base_off += sizeof(int);
 487     } else {
 488       // Include klass to copy by 8 bytes words.
 489       base_off = instanceOopDesc::klass_offset_in_bytes();
 490     }
 491     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
 492   }
 493   Node* src_base  = kit->basic_plus_adr(src,  base_off);
 494   Node* dst_base = kit->basic_plus_adr(dst, base_off);
 495 
 496   // Compute the length also, if needed:
 497   Node* countx = size;
 498   countx = kit->gvn().transform(new SubXNode(countx, kit->MakeConX(base_off)));
 499   countx = kit->gvn().transform(new URShiftXNode(countx, kit->intcon(LogBytesPerLong) ));
 500 
 501   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
 502 
 503   ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, false, false);
 504   ac->set_clonebasic();
 505   Node* n = kit->gvn().transform(ac);
 506   if (n == ac) {
 507     kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
 508   } else {
 509     kit->set_all_memory(n);
 510   }
 511 }
 512 
 513 C2DecoratorSet C2BarrierSetCodeGen::fixup_decorators(C2DecoratorSet decorators) {
 514   bool is_volatile = (decorators & C2_MO_VOLATILE) != 0;
 515   bool is_acquire = (decorators & C2_MO_ACQUIRE) != 0;
 516   bool is_release = (decorators & C2_MO_RELEASE) != 0;
 517   bool is_relaxed = (decorators & C2_MO_UNORDERED) != 0;
 518   bool is_atomic = (decorators & C2_ACCESS_ATOMIC) != 0;
 519 
 520   if (is_volatile) {
 521     is_acquire = true;
 522     is_release = true;
 523   }
 524 
 525   if (!is_acquire && !is_release) {
 526     is_relaxed = true;
 527   } else {
 528     is_atomic = true;
 529   }
 530 
 531   // Some accesses require access atomicity for all types, notably longs and doubles.
 532   // When AlwaysAtomicAccesses is enabled, all accesses are atomic.
 533   is_atomic = is_atomic || AlwaysAtomicAccesses;
 534 
 535   if (is_acquire) {
 536     decorators = decorators | C2_MO_ACQUIRE;
 537   }
 538   if (is_release) {
 539     decorators = decorators | C2_MO_RELEASE;
 540   }
 541   if (is_relaxed) {
 542     decorators = decorators | C2_MO_UNORDERED;
 543   }
 544   if (is_atomic) {
 545     decorators = decorators | C2_ACCESS_ATOMIC;
 546   }
 547 
 548   return decorators;
 549 }