1 /* 2 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "opto/arraycopynode.hpp" 27 #include "opto/graphKit.hpp" 28 #include "opto/idealKit.hpp" 29 #include "opto/narrowptrnode.hpp" 30 #include "gc/shared/c2_BarrierSetCodeGen.hpp" 31 #include "utilities/macros.hpp" 32 33 Node* C2BarrierSetCodeGen::store_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, Node* val, const Type* val_type, BasicType bt, C2DecoratorSet decorators) { 34 bool mismatched = (decorators & C2_MISMATCHED) != 0; 35 bool is_vol = (decorators & C2_MO_VOLATILE) != 0; 36 bool is_release = (decorators & C2_MO_RELEASE) != 0; 37 bool unaligned = (decorators & C2_ACCESS_UNALIGNED) != 0; 38 bool requires_atomic_access = (decorators & C2_ACCESS_ATOMIC) != 0; 39 40 if (bt == T_DOUBLE) { 41 val = kit->dstore_rounding(val); 42 } 43 44 MemNode::MemOrd mo; 45 if (is_vol || is_release) { 46 mo = MemNode::release; 47 } else { 48 // Volatile fields need releasing stores. 49 // Non-volatile fields also need releasing stores if they hold an 50 // object reference, because the object reference might point to 51 // a freshly created object. 52 // Conservatively release stores of object references. 53 mo = StoreNode::release_if_reference(bt); 54 } 55 56 return kit->store_to_memory(kit->control(), adr, val, bt, adr_type, mo, requires_atomic_access, unaligned, mismatched); 57 } 58 59 Node* C2BarrierSetCodeGen::load_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, const Type* val_type, BasicType bt, C2DecoratorSet decorators) { 60 bool mismatched = (decorators & C2_MISMATCHED) != 0; 61 bool is_vol = (decorators & C2_MO_VOLATILE) != 0; 62 bool is_acquire = (decorators & C2_MO_RELEASE) != 0; 63 bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0; 64 bool requires_atomic_access = (decorators & C2_ACCESS_ATOMIC) != 0; 65 bool unaligned = (decorators & C2_ACCESS_UNALIGNED) != 0; 66 bool no_control = (decorators & C2_ACCESS_FREE_CONTROL) != 0; 67 68 MemNode::MemOrd mo; 69 if (is_vol || is_acquire) { 70 mo = MemNode::acquire; 71 } else { 72 mo = MemNode::unordered; 73 } 74 75 LoadNode::ControlDependency dep = LoadNode::DependsOnlyOnTest; 76 if (anonymous) { 77 // To be valid, unsafe loads may depend on other conditions than 78 // the one that guards them: pin the Load node 79 Node* ctrl = kit->control(); 80 if (adr_type->isa_instptr()) { 81 assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null"); 82 intptr_t offset = Type::OffsetBot; 83 AddPNode::Ideal_base_and_offset(adr, &kit->gvn(), offset); 84 if (offset >= 0) { 85 int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper()); 86 if (offset < s) { 87 // Guaranteed to be a valid access, no need to pin it 88 no_control = true; 89 } else { 90 dep = LoadNode::Pinned; 91 } 92 } 93 } 94 } 95 96 // To be valid, unsafe loads may depend on other conditions than 97 // the one that guards them: pin the Load node 98 return kit->make_load(!no_control ? kit->control() : NULL, adr, val_type, bt, adr_type, mo, 99 dep, requires_atomic_access, unaligned, mismatched); 100 } 101 102 Node* C2BarrierSetCodeGen::store_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, Node* val, const Type* val_type, BasicType bt, C2DecoratorSet decorators) { 103 decorators = fixup_decorators(decorators); 104 bool mismatched = (decorators & C2_MISMATCHED) != 0; 105 bool is_vol = (decorators & C2_MO_VOLATILE) != 0; 106 bool is_release = (decorators & C2_MO_RELEASE) != 0; 107 bool is_relaxed = (decorators & C2_MO_UNORDERED) != 0; 108 bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0; 109 bool on_heap = (decorators & C2_ACCESS_ON_HEAP) != 0; 110 111 // We will need memory barriers unless we can determine a unique 112 // alias category for this reference. (Note: If for some reason 113 // the barriers get omitted and the unsafe reference begins to "pollute" 114 // the alias analysis of the rest of the graph, either Compile::can_alias 115 // or Compile::must_alias will throw a diagnostic assert.) 116 bool need_cpu_mem_bar = anonymous && (!is_relaxed || mismatched || !on_heap); 117 118 if (need_cpu_mem_bar) { 119 kit->insert_mem_bar(Op_MemBarCPUOrder); 120 } 121 // If reference is volatile, prevent following memory ops from 122 // floating down past the volatile write. Also prevents commoning 123 // another volatile read. 124 if (is_vol || is_release) { 125 kit->insert_mem_bar(Op_MemBarRelease); 126 } 127 128 Node* store = store_at_resolved(kit, obj, adr, adr_type, val, val_type, bt, decorators); 129 130 // If reference is volatile, prevent following volatiles ops from 131 // floating up before the volatile write. 132 // If not multiple copy atomic, we do the MemBarVolatile before the load. 133 if (is_vol && !support_IRIW_for_not_multiple_copy_atomic_cpu) { 134 kit->insert_mem_bar(Op_MemBarVolatile); // Use fat membar 135 } 136 if (need_cpu_mem_bar) { 137 kit->insert_mem_bar(Op_MemBarCPUOrder); 138 } 139 140 return store; 141 } 142 143 Node* C2BarrierSetCodeGen::load_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, const Type* val_type, BasicType bt, C2DecoratorSet decorators) { 144 decorators = fixup_decorators(decorators); 145 bool mismatched = (decorators & C2_MISMATCHED) != 0; 146 bool is_vol = (decorators & C2_MO_VOLATILE) != 0; 147 bool is_acquire = (decorators & C2_MO_ACQUIRE) != 0; 148 bool is_relaxed = (decorators & C2_MO_UNORDERED) != 0; 149 bool anonymous = (decorators & C2_ACCESS_ON_ANONYMOUS) != 0; 150 bool on_heap = (decorators & C2_ACCESS_ON_HEAP) != 0; 151 152 bool need_cpu_mem_bar = anonymous && (!is_relaxed || mismatched || !on_heap); 153 154 // Memory barrier to prevent normal and 'unsafe' accesses from 155 // bypassing each other. Happens after null checks, so the 156 // exception paths do not take memory state from the memory barrier, 157 // so there's no problems making a strong assert about mixing users 158 // of safe & unsafe memory. 159 if (need_cpu_mem_bar) { 160 kit->insert_mem_bar(Op_MemBarCPUOrder); 161 } 162 if (is_vol && support_IRIW_for_not_multiple_copy_atomic_cpu) { 163 kit->insert_mem_bar(Op_MemBarVolatile); 164 } 165 166 Node* p = load_at_resolved(kit, obj, adr, adr_type, val_type, bt, decorators); 167 168 if (is_vol || is_acquire) { 169 kit->insert_mem_bar(Op_MemBarAcquire, p); 170 } 171 if (need_cpu_mem_bar) { 172 kit->insert_mem_bar(Op_MemBarCPUOrder); 173 } 174 175 return p; 176 } 177 178 //--------------------------- atomic operations--------------------------------- 179 180 static MemNode::MemOrd atomic_op_mo_from_decorators(C2DecoratorSet decorators) { 181 if ((decorators & C2_MO_RELEASE) != 0) { 182 return MemNode::release; 183 } else if ((decorators & C2_MO_ACQUIRE) != 0) { 184 return MemNode::acquire; 185 } else if ((decorators & C2_MO_VOLATILE) != 0) { 186 return MemNode::seqcst; 187 } else { 188 return MemNode::unordered; 189 } 190 } 191 192 static void pin_atomic_op(GraphKit* kit, Node* load_store, int alias_idx) { 193 // SCMemProjNodes represent the memory state of a LoadStore. Their 194 // main role is to prevent LoadStore nodes from being optimized away 195 // when their results aren't used. 196 Node* proj = kit->gvn().transform(new SCMemProjNode(load_store)); 197 kit->set_memory(proj, alias_idx); 198 } 199 200 static Node* atomic_op_membar_prologue(GraphKit* kit, C2DecoratorSet decorators, int alias_idx) { 201 bool is_release = (decorators & C2_MO_RELEASE) != 0; 202 bool is_volatile = (decorators & C2_MO_VOLATILE) != 0; 203 // Memory-model-wise, a LoadStore acts like a little synchronized 204 // block, so needs barriers on each side. These don't translate 205 // into actual barriers on most machines, but we still need rest of 206 // compiler to respect ordering. 207 if (is_release) { 208 kit->insert_mem_bar(Op_MemBarRelease); 209 } else if (is_volatile) { 210 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 211 kit->insert_mem_bar(Op_MemBarVolatile); 212 } else { 213 kit->insert_mem_bar(Op_MemBarRelease); 214 } 215 } 216 kit->insert_mem_bar(Op_MemBarCPUOrder); 217 218 // 4984716: MemBars must be inserted before this 219 // memory node in order to avoid a false 220 // dependency which will confuse the scheduler. 221 Node *mem = kit->memory(alias_idx); 222 return mem; 223 } 224 225 static void atomic_op_membar_epilogue(GraphKit* kit, C2DecoratorSet decorators) { 226 bool is_acquire = (decorators & C2_MO_ACQUIRE) != 0; 227 bool is_volatile = (decorators & C2_MO_VOLATILE) != 0; 228 // Add the trailing membar surrounding the access 229 kit->insert_mem_bar(Op_MemBarCPUOrder); 230 if (is_acquire || is_volatile) { 231 kit->insert_mem_bar(Op_MemBarAcquire); 232 } 233 } 234 235 Node* C2BarrierSetCodeGen::cas_val_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, 236 Node* expected_val, Node* new_val, const Type* value_type, 237 Node* mem, Node*& load_store, BasicType bt, C2DecoratorSet decorators) { 238 bool is_obj = bt == T_OBJECT || bt == T_ARRAY; 239 MemNode::MemOrd mo = atomic_op_mo_from_decorators(decorators); 240 241 if (is_obj) { 242 #ifdef _LP64 243 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 244 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); 245 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); 246 load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo)); 247 } else 248 #endif 249 { 250 load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo)); 251 252 } 253 } else { 254 switch (bt) { 255 case T_BYTE: { 256 load_store = kit->gvn().transform(new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo)); 257 break; 258 } 259 case T_SHORT: { 260 load_store = kit->gvn().transform(new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo)); 261 break; 262 } 263 case T_INT: { 264 load_store = kit->gvn().transform(new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo)); 265 break; 266 } 267 case T_LONG: { 268 load_store = kit->gvn().transform(new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo)); 269 break; 270 } 271 default: 272 ShouldNotReachHere(); 273 } 274 } 275 276 return load_store; 277 } 278 279 Node* C2BarrierSetCodeGen::cas_bool_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, 280 Node* expected_val, Node* new_val, const Type* value_type, 281 Node* mem, BasicType bt, C2DecoratorSet decorators) { 282 bool is_obj = bt == T_OBJECT || bt == T_ARRAY; 283 bool is_weak_cas = (decorators & C2_WEAK_CAS) != 0; 284 285 MemNode::MemOrd mo = atomic_op_mo_from_decorators(decorators); 286 287 Node* load_store = NULL; 288 if (is_obj) { 289 #ifdef _LP64 290 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 291 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); 292 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); 293 if (is_weak_cas) { 294 load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 295 } else { 296 load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 297 } 298 } else 299 #endif 300 { 301 if (is_weak_cas) { 302 load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 303 } else { 304 load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 305 } 306 } 307 } else { 308 switch(bt) { 309 case T_BYTE: { 310 if (is_weak_cas) { 311 load_store = kit->gvn().transform(new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo)); 312 } else { 313 load_store = kit->gvn().transform(new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo)); 314 } 315 break; 316 } 317 case T_SHORT: { 318 if (is_weak_cas) { 319 load_store = kit->gvn().transform(new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo)); 320 } else { 321 load_store = kit->gvn().transform(new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo)); 322 } 323 break; 324 } 325 case T_INT: { 326 if (is_weak_cas) { 327 load_store = kit->gvn().transform(new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo)); 328 } else { 329 load_store = kit->gvn().transform(new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo)); 330 } 331 break; 332 } 333 case T_LONG: { 334 if (is_weak_cas) { 335 load_store = kit->gvn().transform(new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo)); 336 } else { 337 load_store = kit->gvn().transform(new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo)); 338 } 339 break; 340 } 341 default: 342 ShouldNotReachHere(); 343 } 344 } 345 346 return load_store; 347 } 348 349 Node* C2BarrierSetCodeGen::swap_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, 350 Node* new_val, const Type* value_type, 351 Node* mem, Node*& load_store, BasicType bt, C2DecoratorSet decorators) { 352 bool is_obj = bt == T_OBJECT || bt == T_ARRAY; 353 if (is_obj) { 354 #ifdef _LP64 355 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 356 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); 357 load_store = kit->gvn().transform(new GetAndSetNNode(kit->control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop())); 358 } else 359 #endif 360 { 361 load_store = kit->gvn().transform(new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr())); 362 } 363 } else { 364 switch (bt) { 365 case T_BYTE: 366 load_store = kit->gvn().transform(new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type)); 367 break; 368 case T_SHORT: 369 load_store = kit->gvn().transform(new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type)); 370 break; 371 case T_INT: 372 load_store = kit->gvn().transform(new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type)); 373 break; 374 case T_LONG: 375 load_store = kit->gvn().transform(new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type)); 376 break; 377 default: 378 ShouldNotReachHere(); 379 } 380 } 381 382 pin_atomic_op(kit, load_store, alias_idx); 383 384 Node* result = load_store; 385 #ifdef _LP64 386 if (is_obj && adr->bottom_type()->is_ptr_to_narrowoop()) { 387 result = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); 388 } 389 #endif 390 391 return result; 392 } 393 394 Node* C2BarrierSetCodeGen::fetch_and_add_at_resolved(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, 395 Node* new_val, const Type* value_type, 396 Node* mem, BasicType bt, C2DecoratorSet decorators) { 397 Node* load_store = NULL; 398 switch(bt) { 399 case T_BYTE: 400 load_store = kit->gvn().transform(new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type)); 401 break; 402 case T_SHORT: 403 load_store = kit->gvn().transform(new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type)); 404 break; 405 case T_INT: 406 load_store = kit->gvn().transform(new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type)); 407 break; 408 case T_LONG: 409 load_store = kit->gvn().transform(new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type)); 410 break; 411 default: 412 ShouldNotReachHere(); 413 } 414 415 pin_atomic_op(kit, load_store, alias_idx); 416 417 return load_store; 418 } 419 420 Node* C2BarrierSetCodeGen::cas_val_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, 421 Node* expected_val, Node* new_val, const Type* value_type, 422 BasicType bt, C2DecoratorSet decorators) { 423 decorators = fixup_decorators(decorators); 424 Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx); 425 Node* load_store = NULL; 426 Node* result = cas_val_at_resolved(kit, obj, adr, adr_type, alias_idx, expected_val, new_val, value_type, mem, load_store, bt, decorators); 427 428 pin_atomic_op(kit, result, alias_idx); 429 430 #ifdef _LP64 431 bool is_obj = bt == T_OBJECT || bt == T_ARRAY; 432 if (is_obj && adr->bottom_type()->is_ptr_to_narrowoop()) { 433 result = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); 434 } 435 #endif 436 437 atomic_op_membar_epilogue(kit, decorators); 438 return result; 439 } 440 441 Node* C2BarrierSetCodeGen::cas_bool_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, 442 Node* expected_val, Node* new_val, const Type* value_type, 443 BasicType bt, C2DecoratorSet decorators) { 444 decorators = fixup_decorators(decorators); 445 Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx); 446 Node* result = cas_bool_at_resolved(kit, obj, adr, adr_type, alias_idx, expected_val, new_val, value_type, mem, bt, decorators); 447 448 pin_atomic_op(kit, result, alias_idx); 449 450 atomic_op_membar_epilogue(kit, decorators); 451 return result; 452 } 453 454 Node* C2BarrierSetCodeGen::swap_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, 455 Node* new_val, const Type* value_type, 456 BasicType bt, C2DecoratorSet decorators) { 457 decorators = fixup_decorators(decorators); 458 Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx); 459 Node* load_store = NULL; 460 Node* result = swap_at_resolved(kit, obj, adr, adr_type, alias_idx, new_val, value_type, mem, load_store, bt, decorators); 461 atomic_op_membar_epilogue(kit, decorators); 462 return result; 463 } 464 465 Node* C2BarrierSetCodeGen::fetch_and_add_at(GraphKit* kit, Node* obj, Node* adr, const TypePtr* adr_type, int alias_idx, 466 Node* new_val, const Type* value_type, 467 BasicType bt, C2DecoratorSet decorators) { 468 decorators = fixup_decorators(decorators); 469 Node* mem = atomic_op_membar_prologue(kit, decorators, alias_idx); 470 Node* result = fetch_and_add_at_resolved(kit, obj, adr, adr_type, alias_idx, new_val, value_type, mem, bt, decorators); 471 atomic_op_membar_epilogue(kit, decorators); 472 return result; 473 } 474 475 void C2BarrierSetCodeGen::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) { 476 // Exclude the header but include array length to copy by 8 bytes words. 477 // Can't use base_offset_in_bytes(bt) since basic type is unknown. 478 int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() : 479 instanceOopDesc::base_offset_in_bytes(); 480 // base_off: 481 // 8 - 32-bit VM 482 // 12 - 64-bit VM, compressed klass 483 // 16 - 64-bit VM, normal klass 484 if (base_off % BytesPerLong != 0) { 485 assert(UseCompressedClassPointers, ""); 486 if (is_array) { 487 // Exclude length to copy by 8 bytes words. 488 base_off += sizeof(int); 489 } else { 490 // Include klass to copy by 8 bytes words. 491 base_off = instanceOopDesc::klass_offset_in_bytes(); 492 } 493 assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment"); 494 } 495 Node* src_base = kit->basic_plus_adr(src, base_off); 496 Node* dst_base = kit->basic_plus_adr(dst, base_off); 497 498 // Compute the length also, if needed: 499 Node* countx = size; 500 countx = kit->gvn().transform(new SubXNode(countx, kit->MakeConX(base_off))); 501 countx = kit->gvn().transform(new URShiftXNode(countx, kit->intcon(LogBytesPerLong) )); 502 503 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 504 505 ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, false, false); 506 ac->set_clonebasic(); 507 Node* n = kit->gvn().transform(ac); 508 if (n == ac) { 509 kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type); 510 } else { 511 kit->set_all_memory(n); 512 } 513 } 514 515 C2DecoratorSet C2BarrierSetCodeGen::fixup_decorators(C2DecoratorSet decorators) { 516 bool is_volatile = (decorators & C2_MO_VOLATILE) != 0; 517 bool is_acquire = (decorators & C2_MO_ACQUIRE) != 0; 518 bool is_release = (decorators & C2_MO_RELEASE) != 0; 519 bool is_relaxed = (decorators & C2_MO_UNORDERED) != 0; 520 bool is_atomic = (decorators & C2_ACCESS_ATOMIC) != 0; 521 522 if (is_volatile) { 523 is_acquire = true; 524 is_release = true; 525 } 526 527 if (!is_acquire && !is_release) { 528 is_relaxed = true; 529 } else { 530 is_atomic = true; 531 } 532 533 // Some accesses require access atomicity for all types, notably longs and doubles. 534 // When AlwaysAtomicAccesses is enabled, all accesses are atomic. 535 is_atomic = is_atomic || AlwaysAtomicAccesses; 536 537 if (is_acquire) { 538 decorators = decorators | C2_MO_ACQUIRE; 539 } 540 if (is_release) { 541 decorators = decorators | C2_MO_RELEASE; 542 } 543 if (is_relaxed) { 544 decorators = decorators | C2_MO_UNORDERED; 545 } 546 if (is_atomic) { 547 decorators = decorators | C2_ACCESS_ATOMIC; 548 } 549 550 return decorators; 551 }