1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/barrierSet.hpp" 26 #include "gc/shenandoah/shenandoahForwarding.hpp" 27 #include "gc/shenandoah/shenandoahHeap.hpp" 28 #include "gc/shenandoah/shenandoahHeuristics.hpp" 29 #include "gc/shenandoah/shenandoahRuntime.hpp" 30 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 31 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" 32 #include "gc/shenandoah/c2/shenandoahSupport.hpp" 33 #include "opto/arraycopynode.hpp" 34 #include "opto/escape.hpp" 35 #include "opto/graphKit.hpp" 36 #include "opto/idealKit.hpp" 37 #include "opto/macro.hpp" 38 #include "opto/movenode.hpp" 39 #include "opto/narrowptrnode.hpp" 40 #include "opto/rootnode.hpp" 41 #include "opto/runtime.hpp" 42 43 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() { 44 return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2()); 45 } 46 47 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena) 48 : _enqueue_barriers(new (comp_arena) GrowableArray<ShenandoahEnqueueBarrierNode*>(comp_arena, 8, 0, NULL)), 49 _load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, NULL)) { 50 } 51 52 int ShenandoahBarrierSetC2State::enqueue_barriers_count() const { 53 return _enqueue_barriers->length(); 54 } 55 56 ShenandoahEnqueueBarrierNode* ShenandoahBarrierSetC2State::enqueue_barrier(int idx) const { 57 return _enqueue_barriers->at(idx); 58 } 59 60 void ShenandoahBarrierSetC2State::add_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) { 61 assert(!_enqueue_barriers->contains(n), "duplicate entry in barrier list"); 62 _enqueue_barriers->append(n); 63 } 64 65 void ShenandoahBarrierSetC2State::remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) { 66 if (_enqueue_barriers->contains(n)) { 67 _enqueue_barriers->remove(n); 68 } 69 } 70 71 int ShenandoahBarrierSetC2State::load_reference_barriers_count() const { 72 return _load_reference_barriers->length(); 73 } 74 75 ShenandoahLoadReferenceBarrierNode* ShenandoahBarrierSetC2State::load_reference_barrier(int idx) const { 76 return _load_reference_barriers->at(idx); 77 } 78 79 void ShenandoahBarrierSetC2State::add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) { 80 assert(!_load_reference_barriers->contains(n), "duplicate entry in barrier list"); 81 _load_reference_barriers->append(n); 82 } 83 84 void ShenandoahBarrierSetC2State::remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) { 85 if (_load_reference_barriers->contains(n)) { 86 _load_reference_barriers->remove(n); 87 } 88 } 89 90 Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const { 91 if (ShenandoahStoreValEnqueueBarrier) { 92 obj = shenandoah_enqueue_barrier(kit, obj); 93 } 94 return obj; 95 } 96 97 #define __ kit-> 98 99 bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr, 100 BasicType bt, uint adr_idx) const { 101 intptr_t offset = 0; 102 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); 103 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); 104 105 if (offset == Type::OffsetBot) { 106 return false; // cannot unalias unless there are precise offsets 107 } 108 109 if (alloc == NULL) { 110 return false; // No allocation found 111 } 112 113 intptr_t size_in_bytes = type2aelembytes(bt); 114 115 Node* mem = __ memory(adr_idx); // start searching here... 116 117 for (int cnt = 0; cnt < 50; cnt++) { 118 119 if (mem->is_Store()) { 120 121 Node* st_adr = mem->in(MemNode::Address); 122 intptr_t st_offset = 0; 123 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); 124 125 if (st_base == NULL) { 126 break; // inscrutable pointer 127 } 128 129 // Break we have found a store with same base and offset as ours so break 130 if (st_base == base && st_offset == offset) { 131 break; 132 } 133 134 if (st_offset != offset && st_offset != Type::OffsetBot) { 135 const int MAX_STORE = BytesPerLong; 136 if (st_offset >= offset + size_in_bytes || 137 st_offset <= offset - MAX_STORE || 138 st_offset <= offset - mem->as_Store()->memory_size()) { 139 // Success: The offsets are provably independent. 140 // (You may ask, why not just test st_offset != offset and be done? 141 // The answer is that stores of different sizes can co-exist 142 // in the same sequence of RawMem effects. We sometimes initialize 143 // a whole 'tile' of array elements with a single jint or jlong.) 144 mem = mem->in(MemNode::Memory); 145 continue; // advance through independent store memory 146 } 147 } 148 149 if (st_base != base 150 && MemNode::detect_ptr_independence(base, alloc, st_base, 151 AllocateNode::Ideal_allocation(st_base, phase), 152 phase)) { 153 // Success: The bases are provably independent. 154 mem = mem->in(MemNode::Memory); 155 continue; // advance through independent store memory 156 } 157 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { 158 159 InitializeNode* st_init = mem->in(0)->as_Initialize(); 160 AllocateNode* st_alloc = st_init->allocation(); 161 162 // Make sure that we are looking at the same allocation site. 163 // The alloc variable is guaranteed to not be null here from earlier check. 164 if (alloc == st_alloc) { 165 // Check that the initialization is storing NULL so that no previous store 166 // has been moved up and directly write a reference 167 Node* captured_store = st_init->find_captured_store(offset, 168 type2aelembytes(T_OBJECT), 169 phase); 170 if (captured_store == NULL || captured_store == st_init->zero_memory()) { 171 return true; 172 } 173 } 174 } 175 176 // Unless there is an explicit 'continue', we must bail out here, 177 // because 'mem' is an inscrutable memory state (e.g., a call). 178 break; 179 } 180 181 return false; 182 } 183 184 #undef __ 185 #define __ ideal. 186 187 void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit, 188 bool do_load, 189 Node* obj, 190 Node* adr, 191 uint alias_idx, 192 Node* val, 193 const TypeOopPtr* val_type, 194 Node* pre_val, 195 BasicType bt) const { 196 // Some sanity checks 197 // Note: val is unused in this routine. 198 199 if (do_load) { 200 // We need to generate the load of the previous value 201 assert(obj != NULL, "must have a base"); 202 assert(adr != NULL, "where are loading from?"); 203 assert(pre_val == NULL, "loaded already?"); 204 assert(val_type != NULL, "need a type"); 205 206 if (ReduceInitialCardMarks 207 && satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) { 208 return; 209 } 210 211 } else { 212 // In this case both val_type and alias_idx are unused. 213 assert(pre_val != NULL, "must be loaded already"); 214 // Nothing to be done if pre_val is null. 215 if (pre_val->bottom_type() == TypePtr::NULL_PTR) return; 216 assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); 217 } 218 assert(bt == T_OBJECT, "or we shouldn't be here"); 219 220 IdealKit ideal(kit, true); 221 222 Node* tls = __ thread(); // ThreadLocalStorage 223 224 Node* no_base = __ top(); 225 Node* zero = __ ConI(0); 226 Node* zeroX = __ ConX(0); 227 228 float likely = PROB_LIKELY(0.999); 229 float unlikely = PROB_UNLIKELY(0.999); 230 231 // Offsets into the thread 232 const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()); 233 const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); 234 235 // Now the actual pointers into the thread 236 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset)); 237 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); 238 239 // Now some of the values 240 Node* marking; 241 Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()))); 242 Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw); 243 marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING)); 244 assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape"); 245 246 // if (!marking) 247 __ if_then(marking, BoolTest::ne, zero, unlikely); { 248 BasicType index_bt = TypeX_X->basic_type(); 249 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size."); 250 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw); 251 252 if (do_load) { 253 // load original value 254 // alias_idx correct?? 255 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx); 256 } 257 258 // if (pre_val != NULL) 259 __ if_then(pre_val, BoolTest::ne, kit->null()); { 260 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); 261 262 // is the queue for this thread full? 263 __ if_then(index, BoolTest::ne, zeroX, likely); { 264 265 // decrement the index 266 Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); 267 268 // Now get the buffer location we will log the previous value into and store it 269 Node *log_addr = __ AddP(no_base, buffer, next_index); 270 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered); 271 // update the index 272 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered); 273 274 } __ else_(); { 275 276 // logging buffer is full, call the runtime 277 const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(); 278 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls); 279 } __ end_if(); // (!index) 280 } __ end_if(); // (pre_val != NULL) 281 } __ end_if(); // (!marking) 282 283 // Final sync IdealKit and GraphKit. 284 kit->final_sync(ideal); 285 286 if (ShenandoahSATBBarrier && adr != NULL) { 287 Node* c = kit->control(); 288 Node* call = c->in(1)->in(1)->in(1)->in(0); 289 assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected"); 290 call->add_req(adr); 291 } 292 } 293 294 bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) { 295 return call->is_CallLeaf() && 296 call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry); 297 } 298 299 bool ShenandoahBarrierSetC2::is_shenandoah_lrb_call(Node* call) { 300 if (!call->is_CallLeaf()) { 301 return false; 302 } 303 304 address entry_point = call->as_CallLeaf()->entry_point(); 305 return (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)) || 306 (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow)); 307 } 308 309 bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) { 310 if (n->Opcode() != Op_If) { 311 return false; 312 } 313 314 Node* bol = n->in(1); 315 assert(bol->is_Bool(), ""); 316 Node* cmpx = bol->in(1); 317 if (bol->as_Bool()->_test._test == BoolTest::ne && 318 cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) && 319 is_shenandoah_state_load(cmpx->in(1)->in(1)) && 320 cmpx->in(1)->in(2)->is_Con() && 321 cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) { 322 return true; 323 } 324 325 return false; 326 } 327 328 bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) { 329 if (!n->is_Load()) return false; 330 const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset()); 331 return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal 332 && n->in(2)->in(3)->is_Con() 333 && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset; 334 } 335 336 void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit, 337 bool do_load, 338 Node* obj, 339 Node* adr, 340 uint alias_idx, 341 Node* val, 342 const TypeOopPtr* val_type, 343 Node* pre_val, 344 BasicType bt) const { 345 if (ShenandoahSATBBarrier) { 346 IdealKit ideal(kit); 347 kit->sync_kit(ideal); 348 349 satb_write_barrier_pre(kit, do_load, obj, adr, alias_idx, val, val_type, pre_val, bt); 350 351 ideal.sync_kit(kit); 352 kit->final_sync(ideal); 353 } 354 } 355 356 Node* ShenandoahBarrierSetC2::shenandoah_enqueue_barrier(GraphKit* kit, Node* pre_val) const { 357 return kit->gvn().transform(new ShenandoahEnqueueBarrierNode(pre_val)); 358 } 359 360 // Helper that guards and inserts a pre-barrier. 361 void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, 362 Node* pre_val, bool need_mem_bar) const { 363 // We could be accessing the referent field of a reference object. If so, when G1 364 // is enabled, we need to log the value in the referent field in an SATB buffer. 365 // This routine performs some compile time filters and generates suitable 366 // runtime filters that guard the pre-barrier code. 367 // Also add memory barrier for non volatile load from the referent field 368 // to prevent commoning of loads across safepoint. 369 370 // Some compile time checks. 371 372 // If offset is a constant, is it java_lang_ref_Reference::_reference_offset? 373 const TypeX* otype = offset->find_intptr_t_type(); 374 if (otype != NULL && otype->is_con() && 375 otype->get_con() != java_lang_ref_Reference::referent_offset) { 376 // Constant offset but not the reference_offset so just return 377 return; 378 } 379 380 // We only need to generate the runtime guards for instances. 381 const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr(); 382 if (btype != NULL) { 383 if (btype->isa_aryptr()) { 384 // Array type so nothing to do 385 return; 386 } 387 388 const TypeInstPtr* itype = btype->isa_instptr(); 389 if (itype != NULL) { 390 // Can the klass of base_oop be statically determined to be 391 // _not_ a sub-class of Reference and _not_ Object? 392 ciKlass* klass = itype->klass(); 393 if ( klass->is_loaded() && 394 !klass->is_subtype_of(kit->env()->Reference_klass()) && 395 !kit->env()->Object_klass()->is_subtype_of(klass)) { 396 return; 397 } 398 } 399 } 400 401 // The compile time filters did not reject base_oop/offset so 402 // we need to generate the following runtime filters 403 // 404 // if (offset == java_lang_ref_Reference::_reference_offset) { 405 // if (instance_of(base, java.lang.ref.Reference)) { 406 // pre_barrier(_, pre_val, ...); 407 // } 408 // } 409 410 float likely = PROB_LIKELY( 0.999); 411 float unlikely = PROB_UNLIKELY(0.999); 412 413 IdealKit ideal(kit); 414 415 Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset); 416 417 __ if_then(offset, BoolTest::eq, referent_off, unlikely); { 418 // Update graphKit memory and control from IdealKit. 419 kit->sync_kit(ideal); 420 421 Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass())); 422 Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con); 423 424 // Update IdealKit memory and control from graphKit. 425 __ sync_kit(kit); 426 427 Node* one = __ ConI(1); 428 // is_instof == 0 if base_oop == NULL 429 __ if_then(is_instof, BoolTest::eq, one, unlikely); { 430 431 // Update graphKit from IdeakKit. 432 kit->sync_kit(ideal); 433 434 // Use the pre-barrier to record the value in the referent field 435 satb_write_barrier_pre(kit, false /* do_load */, 436 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, 437 pre_val /* pre_val */, 438 T_OBJECT); 439 if (need_mem_bar) { 440 // Add memory barrier to prevent commoning reads from this field 441 // across safepoint since GC can change its value. 442 kit->insert_mem_bar(Op_MemBarCPUOrder); 443 } 444 // Update IdealKit from graphKit. 445 __ sync_kit(kit); 446 447 } __ end_if(); // _ref_type != ref_none 448 } __ end_if(); // offset == referent_offset 449 450 // Final sync IdealKit and GraphKit. 451 kit->final_sync(ideal); 452 } 453 454 #undef __ 455 456 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() { 457 const Type **fields = TypeTuple::fields(2); 458 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 459 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread 460 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 461 462 // create result type (range) 463 fields = TypeTuple::fields(0); 464 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 465 466 return TypeFunc::make(domain, range); 467 } 468 469 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() { 470 const Type **fields = TypeTuple::fields(1); 471 fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop 472 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 473 474 // create result type (range) 475 fields = TypeTuple::fields(0); 476 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 477 478 return TypeFunc::make(domain, range); 479 } 480 481 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type() { 482 const Type **fields = TypeTuple::fields(2); 483 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 484 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // original load address 485 486 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 487 488 // create result type (range) 489 fields = TypeTuple::fields(1); 490 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; 491 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 492 493 return TypeFunc::make(domain, range); 494 } 495 496 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { 497 DecoratorSet decorators = access.decorators(); 498 499 const TypePtr* adr_type = access.addr().type(); 500 Node* adr = access.addr().node(); 501 502 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; 503 bool on_heap = (decorators & IN_HEAP) != 0; 504 505 if (!access.is_oop() || (!on_heap && !anonymous)) { 506 return BarrierSetC2::store_at_resolved(access, val); 507 } 508 509 GraphKit* kit = access.kit(); 510 511 uint adr_idx = kit->C->get_alias_index(adr_type); 512 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); 513 Node* value = val.node(); 514 value = shenandoah_storeval_barrier(kit, value); 515 val.set_node(value); 516 shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(), 517 static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type()); 518 return BarrierSetC2::store_at_resolved(access, val); 519 } 520 521 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { 522 DecoratorSet decorators = access.decorators(); 523 524 Node* adr = access.addr().node(); 525 Node* obj = access.base(); 526 527 bool mismatched = (decorators & C2_MISMATCHED) != 0; 528 bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0; 529 bool on_heap = (decorators & IN_HEAP) != 0; 530 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 531 bool is_unordered = (decorators & MO_UNORDERED) != 0; 532 bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap; 533 534 Node* top = Compile::current()->top(); 535 536 Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top; 537 Node* load = BarrierSetC2::load_at_resolved(access, val_type); 538 539 if (access.is_oop()) { 540 if (ShenandoahLoadRefBarrier) { 541 load = new ShenandoahLoadReferenceBarrierNode(NULL, load); 542 load = access.kit()->gvn().transform(load); 543 } 544 } 545 546 // If we are reading the value of the referent field of a Reference 547 // object (either by using Unsafe directly or through reflection) 548 // then, if SATB is enabled, we need to record the referent in an 549 // SATB log buffer using the pre-barrier mechanism. 550 // Also we need to add memory barrier to prevent commoning reads 551 // from this field across safepoint since GC can change its value. 552 bool need_read_barrier = ShenandoahKeepAliveBarrier && 553 (on_heap && (on_weak || (unknown && offset != top && obj != top))); 554 555 if (!access.is_oop() || !need_read_barrier) { 556 return load; 557 } 558 559 GraphKit* kit = access.kit(); 560 561 if (on_weak) { 562 // Use the pre-barrier to record the value in the referent field 563 satb_write_barrier_pre(kit, false /* do_load */, 564 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, 565 load /* pre_val */, T_OBJECT); 566 // Add memory barrier to prevent commoning reads from this field 567 // across safepoint since GC can change its value. 568 kit->insert_mem_bar(Op_MemBarCPUOrder); 569 } else if (unknown) { 570 // We do not require a mem bar inside pre_barrier if need_mem_bar 571 // is set: the barriers would be emitted by us. 572 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar); 573 } 574 575 return load; 576 } 577 578 static void pin_atomic_op(C2AtomicAccess& access) { 579 if (!access.needs_pinning()) { 580 return; 581 } 582 // SCMemProjNodes represent the memory state of a LoadStore. Their 583 // main role is to prevent LoadStore nodes from being optimized away 584 // when their results aren't used. 585 GraphKit* kit = access.kit(); 586 Node* load_store = access.raw_access(); 587 assert(load_store != NULL, "must pin atomic op"); 588 Node* proj = kit->gvn().transform(new SCMemProjNode(load_store)); 589 kit->set_memory(proj, access.alias_idx()); 590 } 591 592 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val, 593 Node* new_val, const Type* value_type) const { 594 GraphKit* kit = access.kit(); 595 if (access.is_oop()) { 596 new_val = shenandoah_storeval_barrier(kit, new_val); 597 shenandoah_write_barrier_pre(kit, false /* do_load */, 598 NULL, NULL, max_juint, NULL, NULL, 599 expected_val /* pre_val */, T_OBJECT); 600 601 MemNode::MemOrd mo = access.mem_node_mo(); 602 Node* mem = access.memory(); 603 Node* adr = access.addr().node(); 604 const TypePtr* adr_type = access.addr().type(); 605 Node* load_store = NULL; 606 607 #ifdef _LP64 608 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 609 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); 610 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); 611 if (ShenandoahCASBarrier) { 612 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo)); 613 } else { 614 load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo)); 615 } 616 } else 617 #endif 618 { 619 if (ShenandoahCASBarrier) { 620 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo)); 621 } else { 622 load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo)); 623 } 624 } 625 626 access.set_raw_access(load_store); 627 pin_atomic_op(access); 628 629 #ifdef _LP64 630 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 631 load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); 632 } 633 #endif 634 load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store)); 635 return load_store; 636 } 637 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); 638 } 639 640 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val, 641 Node* new_val, const Type* value_type) const { 642 GraphKit* kit = access.kit(); 643 if (access.is_oop()) { 644 new_val = shenandoah_storeval_barrier(kit, new_val); 645 shenandoah_write_barrier_pre(kit, false /* do_load */, 646 NULL, NULL, max_juint, NULL, NULL, 647 expected_val /* pre_val */, T_OBJECT); 648 DecoratorSet decorators = access.decorators(); 649 MemNode::MemOrd mo = access.mem_node_mo(); 650 Node* mem = access.memory(); 651 bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0; 652 Node* load_store = NULL; 653 Node* adr = access.addr().node(); 654 #ifdef _LP64 655 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 656 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); 657 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); 658 if (ShenandoahCASBarrier) { 659 if (is_weak_cas) { 660 load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 661 } else { 662 load_store = kit->gvn().transform(new ShenandoahCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 663 } 664 } else { 665 if (is_weak_cas) { 666 load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 667 } else { 668 load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 669 } 670 } 671 } else 672 #endif 673 { 674 if (ShenandoahCASBarrier) { 675 if (is_weak_cas) { 676 load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 677 } else { 678 load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 679 } 680 } else { 681 if (is_weak_cas) { 682 load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 683 } else { 684 load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 685 } 686 } 687 } 688 access.set_raw_access(load_store); 689 pin_atomic_op(access); 690 return load_store; 691 } 692 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); 693 } 694 695 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* val, const Type* value_type) const { 696 GraphKit* kit = access.kit(); 697 if (access.is_oop()) { 698 val = shenandoah_storeval_barrier(kit, val); 699 } 700 Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type); 701 if (access.is_oop()) { 702 result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result)); 703 shenandoah_write_barrier_pre(kit, false /* do_load */, 704 NULL, NULL, max_juint, NULL, NULL, 705 result /* pre_val */, T_OBJECT); 706 } 707 return result; 708 } 709 710 // Support for GC barriers emitted during parsing 711 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const { 712 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) return true; 713 if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) { 714 return false; 715 } 716 CallLeafNode *call = node->as_CallLeaf(); 717 if (call->_name == NULL) { 718 return false; 719 } 720 721 return strcmp(call->_name, "shenandoah_clone_barrier") == 0 || 722 strcmp(call->_name, "shenandoah_cas_obj") == 0 || 723 strcmp(call->_name, "shenandoah_wb_pre") == 0; 724 } 725 726 Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const { 727 if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 728 return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn); 729 } 730 if (c->Opcode() == Op_ShenandoahEnqueueBarrier) { 731 c = c->in(1); 732 } 733 return c; 734 } 735 736 bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const { 737 return !ShenandoahBarrierC2Support::expand(C, igvn); 738 } 739 740 bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { 741 if (mode == LoopOptsShenandoahExpand) { 742 assert(UseShenandoahGC, "only for shenandoah"); 743 ShenandoahBarrierC2Support::pin_and_expand(phase); 744 return true; 745 } else if (mode == LoopOptsShenandoahPostExpand) { 746 assert(UseShenandoahGC, "only for shenandoah"); 747 visited.Clear(); 748 ShenandoahBarrierC2Support::optimize_after_expansion(visited, nstack, worklist, phase); 749 return true; 750 } 751 return false; 752 } 753 754 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(BasicType type) const { 755 return false; 756 } 757 758 bool ShenandoahBarrierSetC2::clone_needs_barrier(Node* src, PhaseGVN& gvn) { 759 const TypeOopPtr* src_type = gvn.type(src)->is_oopptr(); 760 if (src_type->isa_instptr() != NULL) { 761 ciInstanceKlass* ik = src_type->klass()->as_instance_klass(); 762 if ((src_type->klass_is_exact() || (!ik->is_interface() && !ik->has_subklass())) && !ik->has_injected_fields()) { 763 if (ik->has_object_fields()) { 764 return true; 765 } else { 766 if (!src_type->klass_is_exact()) { 767 Compile::current()->dependencies()->assert_leaf_type(ik); 768 } 769 } 770 } else { 771 return true; 772 } 773 } else if (src_type->isa_aryptr()) { 774 BasicType src_elem = src_type->klass()->as_array_klass()->element_type()->basic_type(); 775 if (src_elem == T_OBJECT || src_elem == T_ARRAY) { 776 return true; 777 } 778 } else { 779 return true; 780 } 781 return false; 782 } 783 784 void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const { 785 Node* ctrl = ac->in(TypeFunc::Control); 786 Node* mem = ac->in(TypeFunc::Memory); 787 Node* src = ac->in(ArrayCopyNode::Src); 788 Node* src_offset = ac->in(ArrayCopyNode::SrcPos); 789 Node* dest = ac->in(ArrayCopyNode::Dest); 790 Node* dest_offset = ac->in(ArrayCopyNode::DestPos); 791 Node* length = ac->in(ArrayCopyNode::Length); 792 assert (src_offset == NULL && dest_offset == NULL, "for clone offsets should be null"); 793 assert (src->is_AddP(), "for clone the src should be the interior ptr"); 794 assert (dest->is_AddP(), "for clone the dst should be the interior ptr"); 795 796 if (ShenandoahCloneBarrier && clone_needs_barrier(src, phase->igvn())) { 797 // Check if heap is has forwarded objects. If it does, we need to call into the special 798 // routine that would fix up source references before we can continue. 799 800 enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT }; 801 Node* region = new RegionNode(PATH_LIMIT); 802 Node* mem_phi = new PhiNode(region, Type::MEMORY, TypeRawPtr::BOTTOM); 803 804 Node* thread = phase->transform_later(new ThreadLocalNode()); 805 Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 806 Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset)); 807 808 uint gc_state_idx = Compile::AliasIdxRaw; 809 const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument 810 debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx)); 811 812 Node* gc_state = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered)); 813 Node* stable_and = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED))); 814 Node* stable_cmp = phase->transform_later(new CmpINode(stable_and, phase->igvn().zerocon(T_INT))); 815 Node* stable_test = phase->transform_later(new BoolNode(stable_cmp, BoolTest::ne)); 816 817 IfNode* stable_iff = phase->transform_later(new IfNode(ctrl, stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN))->as_If(); 818 Node* stable_ctrl = phase->transform_later(new IfFalseNode(stable_iff)); 819 Node* unstable_ctrl = phase->transform_later(new IfTrueNode(stable_iff)); 820 821 // Heap is stable, no need to do anything additional 822 region->init_req(_heap_stable, stable_ctrl); 823 mem_phi->init_req(_heap_stable, mem); 824 825 // Heap is unstable, call into clone barrier stub 826 Node* call = phase->make_leaf_call(unstable_ctrl, mem, 827 ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(), 828 CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier), 829 "shenandoah_clone", 830 TypeRawPtr::BOTTOM, 831 src->in(AddPNode::Base)); 832 call = phase->transform_later(call); 833 834 ctrl = phase->transform_later(new ProjNode(call, TypeFunc::Control)); 835 mem = phase->transform_later(new ProjNode(call, TypeFunc::Memory)); 836 region->init_req(_heap_unstable, ctrl); 837 mem_phi->init_req(_heap_unstable, mem); 838 839 // Wire up the actual arraycopy stub now 840 ctrl = phase->transform_later(region); 841 mem = phase->transform_later(mem_phi); 842 843 const char* name = "arraycopy"; 844 call = phase->make_leaf_call(ctrl, mem, 845 OptoRuntime::fast_arraycopy_Type(), 846 phase->basictype2arraycopy(T_LONG, NULL, NULL, true, name, true), 847 name, TypeRawPtr::BOTTOM, 848 src, dest, length 849 LP64_ONLY(COMMA phase->top())); 850 call = phase->transform_later(call); 851 852 // Hook up the whole thing into the graph 853 phase->igvn().replace_node(ac, call); 854 } else { 855 BarrierSetC2::clone_at_expansion(phase, ac); 856 } 857 } 858 859 // Support for macro expanded GC barriers 860 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const { 861 if (node->Opcode() == Op_ShenandoahEnqueueBarrier) { 862 state()->add_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node); 863 } 864 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 865 state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node); 866 } 867 } 868 869 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const { 870 if (node->Opcode() == Op_ShenandoahEnqueueBarrier) { 871 state()->remove_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node); 872 } 873 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 874 state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node); 875 } 876 } 877 878 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const { 879 if (is_shenandoah_wb_pre_call(n)) { 880 shenandoah_eliminate_wb_pre(n, ¯o->igvn()); 881 } 882 } 883 884 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const { 885 assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), ""); 886 Node* c = call->as_Call()->proj_out(TypeFunc::Control); 887 c = c->unique_ctrl_out(); 888 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); 889 c = c->unique_ctrl_out(); 890 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); 891 Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0); 892 assert(iff->is_If(), "expect test"); 893 if (!is_shenandoah_marking_if(igvn, iff)) { 894 c = c->unique_ctrl_out(); 895 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); 896 iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0); 897 assert(is_shenandoah_marking_if(igvn, iff), "expect marking test"); 898 } 899 Node* cmpx = iff->in(1)->in(1); 900 igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ)); 901 igvn->rehash_node_delayed(call); 902 call->del_req(call->req()-1); 903 } 904 905 void ShenandoahBarrierSetC2::enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const { 906 if (node->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(node)) { 907 worklist.push(node); 908 } 909 } 910 911 void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful) const { 912 for (uint i = 0; i < useful.size(); i++) { 913 Node* n = useful.at(i); 914 if (n->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(n)) { 915 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 916 Compile::current()->record_for_igvn(n->fast_out(i)); 917 } 918 } 919 } 920 for (int i = state()->enqueue_barriers_count() - 1; i >= 0; i--) { 921 ShenandoahEnqueueBarrierNode* n = state()->enqueue_barrier(i); 922 if (!useful.member(n)) { 923 state()->remove_enqueue_barrier(n); 924 } 925 } 926 for (int i = state()->load_reference_barriers_count() - 1; i >= 0; i--) { 927 ShenandoahLoadReferenceBarrierNode* n = state()->load_reference_barrier(i); 928 if (!useful.member(n)) { 929 state()->remove_load_reference_barrier(n); 930 } 931 } 932 } 933 934 void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {} 935 936 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const { 937 return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena); 938 } 939 940 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const { 941 return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state()); 942 } 943 944 // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be 945 // expanded later, then now is the time to do so. 946 bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const { return false; } 947 948 #ifdef ASSERT 949 void ShenandoahBarrierSetC2::verify_gc_barriers(bool post_parse) const { 950 if (ShenandoahVerifyOptoBarriers && !post_parse) { 951 ShenandoahBarrierC2Support::verify(Compile::current()->root()); 952 } 953 } 954 #endif 955 956 Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { 957 if (is_shenandoah_wb_pre_call(n)) { 958 uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt(); 959 if (n->req() > cnt) { 960 Node* addp = n->in(cnt); 961 if (has_only_shenandoah_wb_pre_uses(addp)) { 962 n->del_req(cnt); 963 if (can_reshape) { 964 phase->is_IterGVN()->_worklist.push(addp); 965 } 966 return n; 967 } 968 } 969 } 970 if (n->Opcode() == Op_CmpP) { 971 Node* in1 = n->in(1); 972 Node* in2 = n->in(2); 973 if (in1->bottom_type() == TypePtr::NULL_PTR) { 974 in2 = step_over_gc_barrier(in2); 975 } 976 if (in2->bottom_type() == TypePtr::NULL_PTR) { 977 in1 = step_over_gc_barrier(in1); 978 } 979 PhaseIterGVN* igvn = phase->is_IterGVN(); 980 if (in1 != n->in(1)) { 981 if (igvn != NULL) { 982 n->set_req_X(1, in1, igvn); 983 } else { 984 n->set_req(1, in1); 985 } 986 assert(in2 == n->in(2), "only one change"); 987 return n; 988 } 989 if (in2 != n->in(2)) { 990 if (igvn != NULL) { 991 n->set_req_X(2, in2, igvn); 992 } else { 993 n->set_req(2, in2); 994 } 995 return n; 996 } 997 } else if (can_reshape && 998 n->Opcode() == Op_If && 999 ShenandoahBarrierC2Support::is_heap_stable_test(n) && 1000 n->in(0) != NULL) { 1001 Node* dom = n->in(0); 1002 Node* prev_dom = n; 1003 int op = n->Opcode(); 1004 int dist = 16; 1005 // Search up the dominator tree for another heap stable test 1006 while (dom->Opcode() != op || // Not same opcode? 1007 !ShenandoahBarrierC2Support::is_heap_stable_test(dom) || // Not same input 1? 1008 prev_dom->in(0) != dom) { // One path of test does not dominate? 1009 if (dist < 0) return NULL; 1010 1011 dist--; 1012 prev_dom = dom; 1013 dom = IfNode::up_one_dom(dom); 1014 if (!dom) return NULL; 1015 } 1016 1017 // Check that we did not follow a loop back to ourselves 1018 if (n == dom) { 1019 return NULL; 1020 } 1021 1022 return n->as_If()->dominated_by(prev_dom, phase->is_IterGVN()); 1023 } 1024 return NULL; 1025 } 1026 1027 bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) { 1028 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1029 Node* u = n->fast_out(i); 1030 if (!is_shenandoah_wb_pre_call(u)) { 1031 return false; 1032 } 1033 } 1034 return n->outcnt() > 0; 1035 } 1036 1037 Node* ShenandoahBarrierSetC2::arraycopy_load_reference_barrier(PhaseGVN *phase, Node* v) { 1038 if (ShenandoahLoadRefBarrier) { 1039 return phase->transform(new ShenandoahLoadReferenceBarrierNode(NULL, v)); 1040 } 1041 if (ShenandoahStoreValEnqueueBarrier) { 1042 return phase->transform(new ShenandoahEnqueueBarrierNode(v)); 1043 } 1044 return v; 1045 } 1046