1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "opto/arraycopynode.hpp"
  27 #include "opto/graphKit.hpp"
  28 #include "opto/idealKit.hpp"
  29 #include "opto/narrowptrnode.hpp"
  30 #include "gc/shared/c2BarrierSetCodeGen.hpp"
  31 #include "utilities/macros.hpp"
  32 
  33 Node* C2BarrierSetCodeGen::store_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, Node* val, const Type* val_type, BasicType bt, C2DecoratorSet decorators) {
  34   bool mismatched = (decorators & C2_MISMATCHED) != 0;
  35   bool is_vol = (decorators & C2_MO_VOLATILE) != 0;
  36   bool is_release = (decorators & C2_MO_RELEASE) != 0;
  37   bool unaligned = (decorators & C2_ACCESS_UNALIGNED) != 0;
  38   bool requires_atomic_access = (decorators & C2_ACCESS_ATOMIC) != 0;
  39 
  40   if (bt == T_DOUBLE) {
  41     val = kit->dstore_rounding(val);
  42   }
  43 
  44   MemNode::MemOrd mo;
  45   if (is_vol || is_release) {
  46     mo = MemNode::release;
  47   } else {
  48     // Volatile fields need releasing stores.
  49     // Non-volatile fields also need releasing stores if they hold an
  50     // object reference, because the object reference might point to
  51     // a freshly created object.
  52     // Conservatively release stores of object references.
  53     mo = StoreNode::release_if_reference(bt);
  54   }
  55 
  56   return kit->store_to_memory(kit->control(), adr, val, bt, adr_type, mo, requires_atomic_access, unaligned, mismatched);
  57 }
  58 
  59 Node* C2BarrierSetCodeGen::load_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, const Type* val_type, BasicType bt, C2DecoratorSet decorators) {
  60   bool mismatched = (decorators & C2_MISMATCHED) != 0;
  61   bool is_vol = (decorators & C2_MO_VOLATILE) != 0;
  62   bool is_acquire = (decorators & C2_MO_RELEASE) != 0;
  63   bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0;
  64   bool requires_atomic_access = (decorators & C2_ACCESS_ATOMIC) != 0;
  65   bool unaligned = (decorators & C2_ACCESS_UNALIGNED) != 0;
  66   bool no_control = (decorators & C2_ACCESS_FREE_CONTROL) != 0;
  67 
  68   MemNode::MemOrd mo;
  69   if (is_vol || is_acquire) {
  70     mo = MemNode::acquire;
  71   } else {
  72     mo = MemNode::unordered;
  73   }
  74 
  75   // To be valid, unsafe loads may depend on other conditions than
  76   // the one that guards them: pin the Load node
  77   return kit->make_load(!no_control ? kit->control() : NULL, adr, val_type, bt, adr_type, mo,
  78                         anonymous ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest,
  79                         requires_atomic_access, unaligned, mismatched);
  80 }
  81 
  82 Node* C2BarrierSetCodeGen::store_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, Node* val, const Type* val_type, BasicType bt, C2DecoratorSet decorators) {
  83   decorators = fixup_decorators(decorators);
  84   bool mismatched = (decorators & C2_MISMATCHED) != 0;
  85   bool is_vol = (decorators & C2_MO_VOLATILE) != 0;
  86   bool is_release = (decorators & C2_MO_RELEASE) != 0;
  87   bool is_relaxed = (decorators & C2_MO_RELAXED) != 0;
  88   bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0;
  89   bool on_heap = (decorators & C2_ACCESS_ON_HEAP) != 0;
  90 
  91   // We will need memory barriers unless we can determine a unique
  92   // alias category for this reference.  (Note:  If for some reason
  93   // the barriers get omitted and the unsafe reference begins to "pollute"
  94   // the alias analysis of the rest of the graph, either Compile::can_alias
  95   // or Compile::must_alias will throw a diagnostic assert.)
  96   bool need_cpu_mem_bar = anonymous && (!is_relaxed || mismatched || !on_heap);
  97 
  98   if (need_cpu_mem_bar) {
  99     kit->insert_mem_bar(Op_MemBarCPUOrder);
 100   }
 101   // If reference is volatile, prevent following memory ops from
 102   // floating down past the volatile write.  Also prevents commoning
 103   // another volatile read.
 104   if (is_vol || is_release) {
 105     kit->insert_mem_bar(Op_MemBarRelease);
 106   }
 107 
 108   Node* store = store_at_resolved(kit, obj, adr, adr_type, val, val_type, bt, decorators);
 109 
 110   // If reference is volatile, prevent following volatiles ops from
 111   // floating up before the volatile write.
 112     // If not multiple copy atomic, we do the MemBarVolatile before the load.
 113   if (is_vol && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
 114     kit->insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 115   }
 116   if (need_cpu_mem_bar) {
 117     kit->insert_mem_bar(Op_MemBarCPUOrder);
 118   }
 119 
 120   return store;
 121 }
 122 
 123 Node* C2BarrierSetCodeGen::load_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, const Type* val_type, BasicType bt, C2DecoratorSet decorators) {
 124   decorators = fixup_decorators(decorators);
 125   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 126   bool is_vol = (decorators & C2_MO_VOLATILE) != 0;
 127   bool is_acquire = (decorators & C2_MO_ACQUIRE) != 0;
 128   bool is_relaxed = (decorators & C2_MO_RELAXED) != 0;
 129   bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0;
 130   bool on_heap = (decorators & C2_ACCESS_ON_HEAP) != 0;
 131 
 132   bool need_cpu_mem_bar = anonymous && (!is_relaxed || mismatched || !on_heap);
 133 
 134   // Memory barrier to prevent normal and 'unsafe' accesses from
 135   // bypassing each other.  Happens after null checks, so the
 136   // exception paths do not take memory state from the memory barrier,
 137   // so there's no problems making a strong assert about mixing users
 138   // of safe & unsafe memory.
 139   if (need_cpu_mem_bar) {
 140     kit->insert_mem_bar(Op_MemBarCPUOrder);
 141   }
 142   if (is_vol && support_IRIW_for_not_multiple_copy_atomic_cpu) {
 143     kit->insert_mem_bar(Op_MemBarVolatile);
 144   }
 145 
 146   Node* p = load_at_resolved(kit, obj, adr, adr_type, val_type, bt, decorators);
 147 
 148   if (is_vol || is_acquire) {
 149     kit->insert_mem_bar(Op_MemBarAcquire, p);
 150   }
 151   if (need_cpu_mem_bar) {
 152     kit->insert_mem_bar(Op_MemBarCPUOrder);
 153   }
 154 
 155   return p;
 156 }
 157 
 158 //--------------------------- atomic operations---------------------------------
 159 
 160 static MemNode::MemOrd atomic_op_mo_from_decorators(C2DecoratorSet decorators) {
 161   if ((decorators & C2_MO_RELEASE) != 0) {
 162     return MemNode::release;
 163   } else if ((decorators & C2_MO_ACQUIRE) != 0) {
 164     return MemNode::acquire;
 165   } else if ((decorators & C2_MO_VOLATILE) != 0) {
 166     return MemNode::seqcst;
 167   } else {
 168     return MemNode::unordered;
 169   }
 170 }
 171 
 172 static void pin_atomic_op(GraphKit* kit, Node* load_store, int alias_idx) {
 173   // SCMemProjNodes represent the memory state of a LoadStore. Their
 174   // main role is to prevent LoadStore nodes from being optimized away
 175   // when their results aren't used.
 176   Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
 177   kit->set_memory(proj, alias_idx);
 178 }
 179 
 180 static Node* atomic_op_membar_prologue(GraphKit* kit, C2DecoratorSet decorators, int alias_idx) {
 181   bool is_release = (decorators & C2_MO_RELEASE) != 0;
 182   bool is_volatile = (decorators & C2_MO_VOLATILE) != 0;
 183   // Memory-model-wise, a LoadStore acts like a little synchronized
 184   // block, so needs barriers on each side.  These don't translate
 185   // into actual barriers on most machines, but we still need rest of
 186   // compiler to respect ordering.
 187   if (is_release) {
 188     kit->insert_mem_bar(Op_MemBarRelease);
 189   } else if (is_volatile) {
 190     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 191       kit->insert_mem_bar(Op_MemBarVolatile);
 192     } else {
 193       kit->insert_mem_bar(Op_MemBarRelease);
 194     }
 195   }
 196   kit->insert_mem_bar(Op_MemBarCPUOrder);
 197 
 198   // 4984716: MemBars must be inserted before this
 199   //          memory node in order to avoid a false
 200   //          dependency which will confuse the scheduler.
 201   Node *mem = kit->memory(alias_idx);
 202   return mem;
 203 }
 204 
 205 static void atomic_op_membar_epilogue(GraphKit* kit, C2DecoratorSet decorators) {
 206   bool is_acquire = (decorators & C2_MO_ACQUIRE) != 0;
 207   bool is_volatile = (decorators & C2_MO_VOLATILE) != 0;
 208   // Add the trailing membar surrounding the access
 209   kit->insert_mem_bar(Op_MemBarCPUOrder);
 210   if (is_acquire || is_volatile) {
 211     kit->insert_mem_bar(Op_MemBarAcquire);
 212   }
 213 }
 214 
 215 Node* C2BarrierSetCodeGen::cas_val_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 216                                                Node* expected_val, Node* new_val, const Type* value_type,
 217                                                Node* mem, Node*& load_store, BasicType bt, C2DecoratorSet decorators) {
 218   bool is_obj = bt == T_OBJECT || bt == T_ARRAY;
 219   MemNode::MemOrd mo = atomic_op_mo_from_decorators(decorators);
 220 
 221   if (is_obj) {
 222 #ifdef _LP64
 223     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 224       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 225       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 226       load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
 227     } else
 228 #endif
 229       {
 230         load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
 231 
 232       }
 233   } else {
 234     switch (bt) {
 235       case T_BYTE: {
 236         load_store = kit->gvn().transform(new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
 237         break;
 238       }
 239       case T_SHORT: {
 240         load_store = kit->gvn().transform(new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
 241         break;
 242       }
 243       case T_INT: {
 244         load_store = kit->gvn().transform(new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
 245         break;
 246       }
 247       case T_LONG: {
 248         load_store = kit->gvn().transform(new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
 249         break;
 250       }
 251       default:
 252         ShouldNotReachHere();
 253     }
 254   }
 255 
 256   pin_atomic_op(kit, load_store, alias_idx);
 257 
 258   Node* result = load_store;
 259 #ifdef _LP64
 260   if (is_obj && adr->bottom_type()->is_ptr_to_narrowoop()) {
 261     result = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
 262   }
 263 #endif
 264 
 265   return result;
 266 }
 267 
 268 Node* C2BarrierSetCodeGen::cas_bool_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 269                                                 Node* expected_val, Node* new_val, const Type* value_type,
 270                                                 Node* mem, BasicType bt, C2DecoratorSet decorators) {
 271   bool is_obj = bt == T_OBJECT || bt == T_ARRAY;
 272   bool is_weak_cas = (decorators & C2_WEAK_CAS) != 0;
 273 
 274   MemNode::MemOrd mo = atomic_op_mo_from_decorators(decorators);
 275 
 276   Node* load_store = NULL;
 277   if (is_obj) {
 278 #ifdef _LP64
 279     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 280       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 281       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 282       if (is_weak_cas) {
 283         load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 284       } else {
 285         load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 286       }
 287     } else
 288 #endif
 289     {
 290       if (is_weak_cas) {
 291         load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 292       } else {
 293         load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 294       }
 295     }
 296   } else {
 297     switch(bt) {
 298       case T_BYTE: {
 299         if (is_weak_cas) {
 300           load_store = kit->gvn().transform(new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo));
 301         } else {
 302           load_store = kit->gvn().transform(new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo));
 303         }
 304         break;
 305       }
 306       case T_SHORT: {
 307         if (is_weak_cas) {
 308           load_store = kit->gvn().transform(new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo));
 309         } else {
 310           load_store = kit->gvn().transform(new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo));
 311         }
 312         break;
 313       }
 314       case T_INT: {
 315         if (is_weak_cas) {
 316           load_store = kit->gvn().transform(new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
 317         } else {
 318           load_store = kit->gvn().transform(new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
 319         }
 320         break;
 321       }
 322       case T_LONG: {
 323         if (is_weak_cas) {
 324           load_store = kit->gvn().transform(new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
 325         } else {
 326           load_store = kit->gvn().transform(new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
 327         }
 328         break;
 329       }
 330       default:
 331         ShouldNotReachHere();
 332     }
 333   }
 334 
 335   pin_atomic_op(kit, load_store, alias_idx);
 336 
 337   return load_store;
 338 }
 339 
 340 Node* C2BarrierSetCodeGen::swap_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 341                                             Node* new_val, const Type* value_type,
 342                                             Node* mem, Node*& load_store, BasicType bt, C2DecoratorSet decorators) {
 343   bool is_obj = bt == T_OBJECT || bt == T_ARRAY;
 344   if (is_obj) {
 345 #ifdef _LP64
 346     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 347       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 348       load_store = kit->gvn().transform(new GetAndSetNNode(kit->control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop()));
 349     } else
 350 #endif
 351     {
 352       load_store = kit->gvn().transform(new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr()));
 353     }
 354   } else  {
 355     switch (bt) {
 356       case T_BYTE:
 357         load_store = kit->gvn().transform(new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type));
 358         break;
 359       case T_SHORT:
 360         load_store = kit->gvn().transform(new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type));
 361         break;
 362       case T_INT:
 363         load_store = kit->gvn().transform(new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type));
 364         break;
 365       case T_LONG:
 366         load_store = kit->gvn().transform(new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type));
 367         break;
 368       default:
 369         ShouldNotReachHere();
 370     }
 371   }
 372 
 373   pin_atomic_op(kit, load_store, alias_idx);
 374 
 375   Node* result = load_store;
 376 #ifdef _LP64
 377   if (is_obj && adr->bottom_type()->is_ptr_to_narrowoop()) {
 378     result = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
 379   }
 380 #endif
 381 
 382   return result;
 383 }
 384 
 385 Node* C2BarrierSetCodeGen::fetch_and_add_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 386                                                      Node* new_val, const Type* value_type,
 387                                                      Node* mem, BasicType bt, C2DecoratorSet decorators) {
 388   Node* load_store = NULL;
 389   switch(bt) {
 390     case T_BYTE:
 391       load_store = kit->gvn().transform(new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type));
 392       break;
 393     case T_SHORT:
 394       load_store = kit->gvn().transform(new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type));
 395       break;
 396     case T_INT:
 397       load_store = kit->gvn().transform(new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type));
 398       break;
 399     case T_LONG:
 400       load_store = kit->gvn().transform(new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type));
 401       break;
 402     default:
 403       ShouldNotReachHere();
 404   }
 405 
 406   pin_atomic_op(kit, load_store, alias_idx);
 407 
 408   return load_store;
 409 }
 410 
 411 Node* C2BarrierSetCodeGen::cas_val_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 412                                       Node* expected_val, Node* new_val, const Type* value_type,
 413                                       BasicType bt, C2DecoratorSet decorators) {
 414   decorators = fixup_decorators(decorators);
 415   Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx);
 416   Node* load_store = NULL;
 417   Node* result = cas_val_at_resolved(kit, obj, adr, adr_type, alias_idx, expected_val, new_val, value_type, mem, load_store, bt, decorators);
 418   atomic_op_membar_epilogue(kit, decorators);
 419   return result;
 420 }
 421 
 422 Node* C2BarrierSetCodeGen::cas_bool_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 423                                        Node* expected_val, Node* new_val, const Type* value_type,
 424                                        BasicType bt, C2DecoratorSet decorators) {
 425   decorators = fixup_decorators(decorators);
 426   Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx);
 427   Node* result = cas_bool_at_resolved(kit, obj, adr, adr_type, alias_idx, expected_val, new_val, value_type, mem, bt, decorators);
 428   atomic_op_membar_epilogue(kit, decorators);
 429   return result;
 430 }
 431 
 432 Node* C2BarrierSetCodeGen::swap_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 433                                    Node* new_val, const Type* value_type,
 434                                    BasicType bt, C2DecoratorSet decorators) {
 435   decorators = fixup_decorators(decorators);
 436   Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx);
 437   Node* load_store = NULL;
 438   Node* result = swap_at_resolved(kit, obj, adr, adr_type, alias_idx, new_val, value_type, mem, load_store, bt, decorators);
 439   atomic_op_membar_epilogue(kit, decorators);
 440   return result;
 441 }
 442 
 443 Node* C2BarrierSetCodeGen::fetch_and_add_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx,
 444                                             Node* new_val, const Type* value_type,
 445                                             BasicType bt, C2DecoratorSet decorators) {
 446   decorators = fixup_decorators(decorators);
 447   Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx);
 448   Node* result = fetch_and_add_at_resolved(kit, obj, adr, adr_type, alias_idx, new_val, value_type, mem, bt, decorators);
 449   atomic_op_membar_epilogue(kit, decorators);
 450   return result;
 451 }
 452 
 453 void C2BarrierSetCodeGen::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) {
 454   // Exclude the header but include array length to copy by 8 bytes words.
 455   // Can't use base_offset_in_bytes(bt) since basic type is unknown.
 456   int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
 457                             instanceOopDesc::base_offset_in_bytes();
 458   // base_off:
 459   // 8  - 32-bit VM
 460   // 12 - 64-bit VM, compressed klass
 461   // 16 - 64-bit VM, normal klass
 462   if (base_off % BytesPerLong != 0) {
 463     assert(UseCompressedClassPointers, "");
 464     if (is_array) {
 465       // Exclude length to copy by 8 bytes words.
 466       base_off += sizeof(int);
 467     } else {
 468       // Include klass to copy by 8 bytes words.
 469       base_off = instanceOopDesc::klass_offset_in_bytes();
 470     }
 471     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
 472   }
 473   Node* src_base  = kit->basic_plus_adr(src,  base_off);
 474   Node* dst_base = kit->basic_plus_adr(dst, base_off);
 475 
 476   // Compute the length also, if needed:
 477   Node* countx = size;
 478   countx = kit->gvn().transform(new SubXNode(countx, kit->MakeConX(base_off)));
 479   countx = kit->gvn().transform(new URShiftXNode(countx, kit->intcon(LogBytesPerLong) ));
 480 
 481   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
 482 
 483   ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, false, false);
 484   ac->set_clonebasic();
 485   Node* n = kit->gvn().transform(ac);
 486   if (n == ac) {
 487     kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
 488   } else {
 489     kit->set_all_memory(n);
 490   }
 491 }
 492 
 493 C2DecoratorSet C2BarrierSetCodeGen::fixup_decorators(C2DecoratorSet decorators) {
 494   bool is_volatile = (decorators & C2_MO_VOLATILE) != 0;
 495   bool is_acquire = (decorators & C2_MO_ACQUIRE) != 0;
 496   bool is_release = (decorators & C2_MO_RELEASE) != 0;
 497   bool is_relaxed = (decorators & C2_MO_RELAXED) != 0;
 498   bool is_atomic = (decorators & C2_ACCESS_ATOMIC) != 0;
 499 
 500   if (is_volatile) {
 501     is_acquire = true;
 502     is_release = true;
 503   }
 504 
 505   if (!is_acquire && !is_release) {
 506     is_relaxed = true;
 507   } else {
 508     is_atomic = true;
 509   }
 510 
 511   // Some accesses require access atomicity for all types, notably longs and doubles.
 512   // When AlwaysAtomicAccesses is enabled, all accesses are atomic.
 513   is_atomic = is_atomic || AlwaysAtomicAccesses;
 514 
 515   if (is_acquire) {
 516     decorators = decorators | C2_MO_ACQUIRE;
 517   }
 518   if (is_release) {
 519     decorators = decorators | C2_MO_RELEASE;
 520   }
 521   if (is_relaxed) {
 522     decorators = decorators | C2_MO_RELAXED;
 523   }
 524   if (is_atomic) {
 525     decorators = decorators | C2_ACCESS_ATOMIC;
 526   }
 527 
 528   return decorators;
 529 }