1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/barrierSet.hpp" 26 #include "gc/shenandoah/shenandoahHeap.hpp" 27 #include "gc/shenandoah/shenandoahHeuristics.hpp" 28 #include "gc/shenandoah/shenandoahRuntime.hpp" 29 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 30 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" 31 #include "gc/shenandoah/c2/shenandoahSupport.hpp" 32 #include "opto/arraycopynode.hpp" 33 #include "opto/escape.hpp" 34 #include "opto/graphKit.hpp" 35 #include "opto/idealKit.hpp" 36 #include "opto/macro.hpp" 37 #include "opto/movenode.hpp" 38 #include "opto/narrowptrnode.hpp" 39 #include "opto/rootnode.hpp" 40 41 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() { 42 return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2()); 43 } 44 45 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena) 46 : _enqueue_barriers(new (comp_arena) GrowableArray<ShenandoahEnqueueBarrierNode*>(comp_arena, 8, 0, NULL)), 47 _load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, NULL)) { 48 } 49 50 int ShenandoahBarrierSetC2State::enqueue_barriers_count() const { 51 return _enqueue_barriers->length(); 52 } 53 54 ShenandoahEnqueueBarrierNode* ShenandoahBarrierSetC2State::enqueue_barrier(int idx) const { 55 return _enqueue_barriers->at(idx); 56 } 57 58 void ShenandoahBarrierSetC2State::add_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) { 59 assert(!_enqueue_barriers->contains(n), "duplicate entry in barrier list"); 60 _enqueue_barriers->append(n); 61 } 62 63 void ShenandoahBarrierSetC2State::remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) { 64 if (_enqueue_barriers->contains(n)) { 65 _enqueue_barriers->remove(n); 66 } 67 } 68 69 int ShenandoahBarrierSetC2State::load_reference_barriers_count() const { 70 return _load_reference_barriers->length(); 71 } 72 73 ShenandoahLoadReferenceBarrierNode* ShenandoahBarrierSetC2State::load_reference_barrier(int idx) const { 74 return _load_reference_barriers->at(idx); 75 } 76 77 void ShenandoahBarrierSetC2State::add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) { 78 assert(!_load_reference_barriers->contains(n), "duplicate entry in barrier list"); 79 _load_reference_barriers->append(n); 80 } 81 82 void ShenandoahBarrierSetC2State::remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) { 83 if (_load_reference_barriers->contains(n)) { 84 _load_reference_barriers->remove(n); 85 } 86 } 87 88 Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const { 89 if (ShenandoahStoreValEnqueueBarrier) { 90 obj = shenandoah_enqueue_barrier(kit, obj); 91 } 92 return obj; 93 } 94 95 #define __ kit-> 96 97 bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr, 98 BasicType bt, uint adr_idx) const { 99 intptr_t offset = 0; 100 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); 101 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); 102 103 if (offset == Type::OffsetBot) { 104 return false; // cannot unalias unless there are precise offsets 105 } 106 107 if (alloc == NULL) { 108 return false; // No allocation found 109 } 110 111 intptr_t size_in_bytes = type2aelembytes(bt); 112 113 Node* mem = __ memory(adr_idx); // start searching here... 114 115 for (int cnt = 0; cnt < 50; cnt++) { 116 117 if (mem->is_Store()) { 118 119 Node* st_adr = mem->in(MemNode::Address); 120 intptr_t st_offset = 0; 121 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); 122 123 if (st_base == NULL) { 124 break; // inscrutable pointer 125 } 126 127 // Break we have found a store with same base and offset as ours so break 128 if (st_base == base && st_offset == offset) { 129 break; 130 } 131 132 if (st_offset != offset && st_offset != Type::OffsetBot) { 133 const int MAX_STORE = BytesPerLong; 134 if (st_offset >= offset + size_in_bytes || 135 st_offset <= offset - MAX_STORE || 136 st_offset <= offset - mem->as_Store()->memory_size()) { 137 // Success: The offsets are provably independent. 138 // (You may ask, why not just test st_offset != offset and be done? 139 // The answer is that stores of different sizes can co-exist 140 // in the same sequence of RawMem effects. We sometimes initialize 141 // a whole 'tile' of array elements with a single jint or jlong.) 142 mem = mem->in(MemNode::Memory); 143 continue; // advance through independent store memory 144 } 145 } 146 147 if (st_base != base 148 && MemNode::detect_ptr_independence(base, alloc, st_base, 149 AllocateNode::Ideal_allocation(st_base, phase), 150 phase)) { 151 // Success: The bases are provably independent. 152 mem = mem->in(MemNode::Memory); 153 continue; // advance through independent store memory 154 } 155 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { 156 157 InitializeNode* st_init = mem->in(0)->as_Initialize(); 158 AllocateNode* st_alloc = st_init->allocation(); 159 160 // Make sure that we are looking at the same allocation site. 161 // The alloc variable is guaranteed to not be null here from earlier check. 162 if (alloc == st_alloc) { 163 // Check that the initialization is storing NULL so that no previous store 164 // has been moved up and directly write a reference 165 Node* captured_store = st_init->find_captured_store(offset, 166 type2aelembytes(T_OBJECT), 167 phase); 168 if (captured_store == NULL || captured_store == st_init->zero_memory()) { 169 return true; 170 } 171 } 172 } 173 174 // Unless there is an explicit 'continue', we must bail out here, 175 // because 'mem' is an inscrutable memory state (e.g., a call). 176 break; 177 } 178 179 return false; 180 } 181 182 #undef __ 183 #define __ ideal. 184 185 void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit, 186 bool do_load, 187 Node* obj, 188 Node* adr, 189 uint alias_idx, 190 Node* val, 191 const TypeOopPtr* val_type, 192 Node* pre_val, 193 BasicType bt) const { 194 // Some sanity checks 195 // Note: val is unused in this routine. 196 197 if (do_load) { 198 // We need to generate the load of the previous value 199 assert(obj != NULL, "must have a base"); 200 assert(adr != NULL, "where are loading from?"); 201 assert(pre_val == NULL, "loaded already?"); 202 assert(val_type != NULL, "need a type"); 203 204 if (ReduceInitialCardMarks 205 && satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) { 206 return; 207 } 208 209 } else { 210 // In this case both val_type and alias_idx are unused. 211 assert(pre_val != NULL, "must be loaded already"); 212 // Nothing to be done if pre_val is null. 213 if (pre_val->bottom_type() == TypePtr::NULL_PTR) return; 214 assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); 215 } 216 assert(bt == T_OBJECT, "or we shouldn't be here"); 217 218 IdealKit ideal(kit, true); 219 220 Node* tls = __ thread(); // ThreadLocalStorage 221 222 Node* no_base = __ top(); 223 Node* zero = __ ConI(0); 224 Node* zeroX = __ ConX(0); 225 226 float likely = PROB_LIKELY(0.999); 227 float unlikely = PROB_UNLIKELY(0.999); 228 229 // Offsets into the thread 230 const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()); 231 const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); 232 233 // Now the actual pointers into the thread 234 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset)); 235 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); 236 237 // Now some of the values 238 Node* marking; 239 Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()))); 240 Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw); 241 marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING)); 242 assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape"); 243 244 // if (!marking) 245 __ if_then(marking, BoolTest::ne, zero, unlikely); { 246 BasicType index_bt = TypeX_X->basic_type(); 247 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size."); 248 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw); 249 250 if (do_load) { 251 // load original value 252 // alias_idx correct?? 253 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx); 254 } 255 256 // if (pre_val != NULL) 257 __ if_then(pre_val, BoolTest::ne, kit->null()); { 258 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); 259 260 // is the queue for this thread full? 261 __ if_then(index, BoolTest::ne, zeroX, likely); { 262 263 // decrement the index 264 Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); 265 266 // Now get the buffer location we will log the previous value into and store it 267 Node *log_addr = __ AddP(no_base, buffer, next_index); 268 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered); 269 // update the index 270 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered); 271 272 } __ else_(); { 273 274 // logging buffer is full, call the runtime 275 const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(); 276 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls); 277 } __ end_if(); // (!index) 278 } __ end_if(); // (pre_val != NULL) 279 } __ end_if(); // (!marking) 280 281 // Final sync IdealKit and GraphKit. 282 kit->final_sync(ideal); 283 284 if (ShenandoahSATBBarrier && adr != NULL) { 285 Node* c = kit->control(); 286 Node* call = c->in(1)->in(1)->in(1)->in(0); 287 assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected"); 288 call->add_req(adr); 289 } 290 } 291 292 bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) { 293 return call->is_CallLeaf() && 294 call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry); 295 } 296 297 bool ShenandoahBarrierSetC2::is_shenandoah_wb_call(Node* call) { 298 return call->is_CallLeaf() && 299 call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT); 300 } 301 302 bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) { 303 if (n->Opcode() != Op_If) { 304 return false; 305 } 306 307 Node* bol = n->in(1); 308 assert(bol->is_Bool(), ""); 309 Node* cmpx = bol->in(1); 310 if (bol->as_Bool()->_test._test == BoolTest::ne && 311 cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) && 312 is_shenandoah_state_load(cmpx->in(1)->in(1)) && 313 cmpx->in(1)->in(2)->is_Con() && 314 cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) { 315 return true; 316 } 317 318 return false; 319 } 320 321 bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) { 322 if (!n->is_Load()) return false; 323 const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset()); 324 return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal 325 && n->in(2)->in(3)->is_Con() 326 && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset; 327 } 328 329 void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit, 330 bool do_load, 331 Node* obj, 332 Node* adr, 333 uint alias_idx, 334 Node* val, 335 const TypeOopPtr* val_type, 336 Node* pre_val, 337 BasicType bt) const { 338 if (ShenandoahSATBBarrier) { 339 IdealKit ideal(kit); 340 kit->sync_kit(ideal); 341 342 satb_write_barrier_pre(kit, do_load, obj, adr, alias_idx, val, val_type, pre_val, bt); 343 344 ideal.sync_kit(kit); 345 kit->final_sync(ideal); 346 } 347 } 348 349 Node* ShenandoahBarrierSetC2::shenandoah_enqueue_barrier(GraphKit* kit, Node* pre_val) const { 350 return kit->gvn().transform(new ShenandoahEnqueueBarrierNode(pre_val)); 351 } 352 353 // Helper that guards and inserts a pre-barrier. 354 void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, 355 Node* pre_val, bool need_mem_bar) const { 356 // We could be accessing the referent field of a reference object. If so, when G1 357 // is enabled, we need to log the value in the referent field in an SATB buffer. 358 // This routine performs some compile time filters and generates suitable 359 // runtime filters that guard the pre-barrier code. 360 // Also add memory barrier for non volatile load from the referent field 361 // to prevent commoning of loads across safepoint. 362 363 // Some compile time checks. 364 365 // If offset is a constant, is it java_lang_ref_Reference::_reference_offset? 366 const TypeX* otype = offset->find_intptr_t_type(); 367 if (otype != NULL && otype->is_con() && 368 otype->get_con() != java_lang_ref_Reference::referent_offset) { 369 // Constant offset but not the reference_offset so just return 370 return; 371 } 372 373 // We only need to generate the runtime guards for instances. 374 const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr(); 375 if (btype != NULL) { 376 if (btype->isa_aryptr()) { 377 // Array type so nothing to do 378 return; 379 } 380 381 const TypeInstPtr* itype = btype->isa_instptr(); 382 if (itype != NULL) { 383 // Can the klass of base_oop be statically determined to be 384 // _not_ a sub-class of Reference and _not_ Object? 385 ciKlass* klass = itype->klass(); 386 if ( klass->is_loaded() && 387 !klass->is_subtype_of(kit->env()->Reference_klass()) && 388 !kit->env()->Object_klass()->is_subtype_of(klass)) { 389 return; 390 } 391 } 392 } 393 394 // The compile time filters did not reject base_oop/offset so 395 // we need to generate the following runtime filters 396 // 397 // if (offset == java_lang_ref_Reference::_reference_offset) { 398 // if (instance_of(base, java.lang.ref.Reference)) { 399 // pre_barrier(_, pre_val, ...); 400 // } 401 // } 402 403 float likely = PROB_LIKELY( 0.999); 404 float unlikely = PROB_UNLIKELY(0.999); 405 406 IdealKit ideal(kit); 407 408 Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset); 409 410 __ if_then(offset, BoolTest::eq, referent_off, unlikely); { 411 // Update graphKit memory and control from IdealKit. 412 kit->sync_kit(ideal); 413 414 Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass())); 415 Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con); 416 417 // Update IdealKit memory and control from graphKit. 418 __ sync_kit(kit); 419 420 Node* one = __ ConI(1); 421 // is_instof == 0 if base_oop == NULL 422 __ if_then(is_instof, BoolTest::eq, one, unlikely); { 423 424 // Update graphKit from IdeakKit. 425 kit->sync_kit(ideal); 426 427 // Use the pre-barrier to record the value in the referent field 428 satb_write_barrier_pre(kit, false /* do_load */, 429 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, 430 pre_val /* pre_val */, 431 T_OBJECT); 432 if (need_mem_bar) { 433 // Add memory barrier to prevent commoning reads from this field 434 // across safepoint since GC can change its value. 435 kit->insert_mem_bar(Op_MemBarCPUOrder); 436 } 437 // Update IdealKit from graphKit. 438 __ sync_kit(kit); 439 440 } __ end_if(); // _ref_type != ref_none 441 } __ end_if(); // offset == referent_offset 442 443 // Final sync IdealKit and GraphKit. 444 kit->final_sync(ideal); 445 } 446 447 #undef __ 448 449 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() { 450 const Type **fields = TypeTuple::fields(2); 451 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 452 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread 453 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 454 455 // create result type (range) 456 fields = TypeTuple::fields(0); 457 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 458 459 return TypeFunc::make(domain, range); 460 } 461 462 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() { 463 const Type **fields = TypeTuple::fields(1); 464 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 465 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 466 467 // create result type (range) 468 fields = TypeTuple::fields(0); 469 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 470 471 return TypeFunc::make(domain, range); 472 } 473 474 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_write_barrier_Type() { 475 const Type **fields = TypeTuple::fields(1); 476 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 477 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 478 479 // create result type (range) 480 fields = TypeTuple::fields(1); 481 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; 482 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 483 484 return TypeFunc::make(domain, range); 485 } 486 487 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { 488 DecoratorSet decorators = access.decorators(); 489 490 const TypePtr* adr_type = access.addr().type(); 491 Node* adr = access.addr().node(); 492 493 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; 494 bool on_heap = (decorators & IN_HEAP) != 0; 495 496 if (!access.is_oop() || (!on_heap && !anonymous)) { 497 return BarrierSetC2::store_at_resolved(access, val); 498 } 499 500 if (access.is_parse_access()) { 501 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); 502 GraphKit* kit = parse_access.kit(); 503 504 uint adr_idx = kit->C->get_alias_index(adr_type); 505 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); 506 Node* value = val.node(); 507 value = shenandoah_storeval_barrier(kit, value); 508 val.set_node(value); 509 shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(), 510 static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type()); 511 } else { 512 assert(access.is_opt_access(), "only for optimization passes"); 513 assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code"); 514 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access); 515 PhaseGVN& gvn = opt_access.gvn(); 516 MergeMemNode* mm = opt_access.mem(); 517 518 if (ShenandoahStoreValEnqueueBarrier) { 519 Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(val.node())); 520 val.set_node(enqueue); 521 } 522 } 523 return BarrierSetC2::store_at_resolved(access, val); 524 } 525 526 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { 527 DecoratorSet decorators = access.decorators(); 528 529 Node* adr = access.addr().node(); 530 Node* obj = access.base(); 531 532 bool mismatched = (decorators & C2_MISMATCHED) != 0; 533 bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0; 534 bool on_heap = (decorators & IN_HEAP) != 0; 535 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 536 bool is_unordered = (decorators & MO_UNORDERED) != 0; 537 bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap; 538 539 Node* top = Compile::current()->top(); 540 541 Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top; 542 Node* load = BarrierSetC2::load_at_resolved(access, val_type); 543 544 if (access.is_oop()) { 545 if (ShenandoahLoadRefBarrier) { 546 load = new ShenandoahLoadReferenceBarrierNode(NULL, load); 547 if (access.is_parse_access()) { 548 load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load); 549 } else { 550 load = static_cast<C2OptAccess &>(access).gvn().transform(load); 551 } 552 } 553 } 554 555 // If we are reading the value of the referent field of a Reference 556 // object (either by using Unsafe directly or through reflection) 557 // then, if SATB is enabled, we need to record the referent in an 558 // SATB log buffer using the pre-barrier mechanism. 559 // Also we need to add memory barrier to prevent commoning reads 560 // from this field across safepoint since GC can change its value. 561 bool need_read_barrier = ShenandoahKeepAliveBarrier && 562 (on_heap && (on_weak || (unknown && offset != top && obj != top))); 563 564 if (!access.is_oop() || !need_read_barrier) { 565 return load; 566 } 567 568 assert(access.is_parse_access(), "entry not supported at optimization time"); 569 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); 570 GraphKit* kit = parse_access.kit(); 571 572 if (on_weak) { 573 // Use the pre-barrier to record the value in the referent field 574 satb_write_barrier_pre(kit, false /* do_load */, 575 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, 576 load /* pre_val */, T_OBJECT); 577 // Add memory barrier to prevent commoning reads from this field 578 // across safepoint since GC can change its value. 579 kit->insert_mem_bar(Op_MemBarCPUOrder); 580 } else if (unknown) { 581 // We do not require a mem bar inside pre_barrier if need_mem_bar 582 // is set: the barriers would be emitted by us. 583 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar); 584 } 585 586 return load; 587 } 588 589 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, 590 Node* new_val, const Type* value_type) const { 591 GraphKit* kit = access.kit(); 592 if (access.is_oop()) { 593 new_val = shenandoah_storeval_barrier(kit, new_val); 594 shenandoah_write_barrier_pre(kit, false /* do_load */, 595 NULL, NULL, max_juint, NULL, NULL, 596 expected_val /* pre_val */, T_OBJECT); 597 598 MemNode::MemOrd mo = access.mem_node_mo(); 599 Node* mem = access.memory(); 600 Node* adr = access.addr().node(); 601 const TypePtr* adr_type = access.addr().type(); 602 Node* load_store = NULL; 603 604 #ifdef _LP64 605 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 606 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); 607 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); 608 if (ShenandoahCASBarrier) { 609 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo)); 610 } else { 611 load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo)); 612 } 613 } else 614 #endif 615 { 616 if (ShenandoahCASBarrier) { 617 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo)); 618 } else { 619 load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo)); 620 } 621 } 622 623 access.set_raw_access(load_store); 624 pin_atomic_op(access); 625 626 #ifdef _LP64 627 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 628 load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); 629 } 630 #endif 631 load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store)); 632 return load_store; 633 } 634 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); 635 } 636 637 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val, 638 Node* new_val, const Type* value_type) const { 639 GraphKit* kit = access.kit(); 640 if (access.is_oop()) { 641 new_val = shenandoah_storeval_barrier(kit, new_val); 642 shenandoah_write_barrier_pre(kit, false /* do_load */, 643 NULL, NULL, max_juint, NULL, NULL, 644 expected_val /* pre_val */, T_OBJECT); 645 DecoratorSet decorators = access.decorators(); 646 MemNode::MemOrd mo = access.mem_node_mo(); 647 Node* mem = access.memory(); 648 bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0; 649 Node* load_store = NULL; 650 Node* adr = access.addr().node(); 651 #ifdef _LP64 652 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 653 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); 654 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); 655 if (ShenandoahCASBarrier) { 656 if (is_weak_cas) { 657 load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 658 } else { 659 load_store = kit->gvn().transform(new ShenandoahCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 660 } 661 } else { 662 if (is_weak_cas) { 663 load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 664 } else { 665 load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 666 } 667 } 668 } else 669 #endif 670 { 671 if (ShenandoahCASBarrier) { 672 if (is_weak_cas) { 673 load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 674 } else { 675 load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 676 } 677 } else { 678 if (is_weak_cas) { 679 load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 680 } else { 681 load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 682 } 683 } 684 } 685 access.set_raw_access(load_store); 686 pin_atomic_op(access); 687 return load_store; 688 } 689 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); 690 } 691 692 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const { 693 GraphKit* kit = access.kit(); 694 if (access.is_oop()) { 695 val = shenandoah_storeval_barrier(kit, val); 696 } 697 Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type); 698 if (access.is_oop()) { 699 result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result)); 700 shenandoah_write_barrier_pre(kit, false /* do_load */, 701 NULL, NULL, max_juint, NULL, NULL, 702 result /* pre_val */, T_OBJECT); 703 } 704 return result; 705 } 706 707 void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const { 708 assert(!src->is_AddP(), "unexpected input"); 709 BarrierSetC2::clone(kit, src, dst, size, is_array); 710 } 711 712 Node* ShenandoahBarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes, 713 Node*& i_o, Node*& needgc_ctrl, 714 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem, 715 intx prefetch_lines) const { 716 PhaseIterGVN& igvn = macro->igvn(); 717 718 // Allocate several words more for the Shenandoah brooks pointer. 719 size_in_bytes = new AddXNode(size_in_bytes, igvn.MakeConX(ShenandoahBrooksPointer::byte_size())); 720 macro->transform_later(size_in_bytes); 721 722 Node* fast_oop = BarrierSetC2::obj_allocate(macro, ctrl, mem, toobig_false, size_in_bytes, 723 i_o, needgc_ctrl, fast_oop_ctrl, fast_oop_rawmem, 724 prefetch_lines); 725 726 // Bump up object for Shenandoah brooks pointer. 727 fast_oop = new AddPNode(macro->top(), fast_oop, igvn.MakeConX(ShenandoahBrooksPointer::byte_size())); 728 macro->transform_later(fast_oop); 729 730 // Initialize Shenandoah brooks pointer to point to the object itself. 731 fast_oop_rawmem = macro->make_store(fast_oop_ctrl, fast_oop_rawmem, fast_oop, ShenandoahBrooksPointer::byte_offset(), fast_oop, T_OBJECT); 732 733 return fast_oop; 734 } 735 736 // Support for GC barriers emitted during parsing 737 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const { 738 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) return true; 739 if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) { 740 return false; 741 } 742 CallLeafNode *call = node->as_CallLeaf(); 743 if (call->_name == NULL) { 744 return false; 745 } 746 747 return strcmp(call->_name, "shenandoah_clone_barrier") == 0 || 748 strcmp(call->_name, "shenandoah_cas_obj") == 0 || 749 strcmp(call->_name, "shenandoah_wb_pre") == 0; 750 } 751 752 Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const { 753 if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 754 return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn); 755 } 756 if (c->Opcode() == Op_ShenandoahEnqueueBarrier) { 757 c = c->in(1); 758 } 759 return c; 760 } 761 762 bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const { 763 return !ShenandoahBarrierC2Support::expand(C, igvn); 764 } 765 766 bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { 767 if (mode == LoopOptsShenandoahExpand) { 768 assert(UseShenandoahGC, "only for shenandoah"); 769 ShenandoahBarrierC2Support::pin_and_expand(phase); 770 return true; 771 } else if (mode == LoopOptsShenandoahPostExpand) { 772 assert(UseShenandoahGC, "only for shenandoah"); 773 visited.Clear(); 774 ShenandoahBarrierC2Support::optimize_after_expansion(visited, nstack, worklist, phase); 775 return true; 776 } 777 return false; 778 } 779 780 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { 781 bool is_oop = type == T_OBJECT || type == T_ARRAY; 782 if (!is_oop) { 783 return false; 784 } 785 if (tightly_coupled_alloc) { 786 if (phase == Optimization) { 787 return false; 788 } 789 return !is_clone; 790 } 791 if (phase == Optimization) { 792 return !ShenandoahStoreValEnqueueBarrier; 793 } 794 return true; 795 } 796 797 bool ShenandoahBarrierSetC2::clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIterGVN& igvn) { 798 Node* src = ac->in(ArrayCopyNode::Src); 799 const TypeOopPtr* src_type = igvn.type(src)->is_oopptr(); 800 if (src_type->isa_instptr() != NULL) { 801 ciInstanceKlass* ik = src_type->klass()->as_instance_klass(); 802 if ((src_type->klass_is_exact() || (!ik->is_interface() && !ik->has_subklass())) && !ik->has_injected_fields()) { 803 if (ik->has_object_fields()) { 804 return true; 805 } else { 806 if (!src_type->klass_is_exact()) { 807 igvn.C->dependencies()->assert_leaf_type(ik); 808 } 809 } 810 } else { 811 return true; 812 } 813 } else if (src_type->isa_aryptr()) { 814 BasicType src_elem = src_type->klass()->as_array_klass()->element_type()->basic_type(); 815 if (src_elem == T_OBJECT || src_elem == T_ARRAY) { 816 return true; 817 } 818 } else { 819 return true; 820 } 821 return false; 822 } 823 824 void ShenandoahBarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const { 825 assert(ac->is_clonebasic(), "no other kind of arraycopy here"); 826 827 if (!clone_needs_postbarrier(ac, igvn)) { 828 BarrierSetC2::clone_barrier_at_expansion(ac, call, igvn); 829 return; 830 } 831 832 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 833 Node* c = new ProjNode(call,TypeFunc::Control); 834 c = igvn.transform(c); 835 Node* m = new ProjNode(call, TypeFunc::Memory); 836 m = igvn.transform(m); 837 838 Node* dest = ac->in(ArrayCopyNode::Dest); 839 assert(dest->is_AddP(), "bad input"); 840 Node* barrier_call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(), 841 CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier), 842 "shenandoah_clone_barrier", raw_adr_type); 843 barrier_call->init_req(TypeFunc::Control, c); 844 barrier_call->init_req(TypeFunc::I_O , igvn.C->top()); 845 barrier_call->init_req(TypeFunc::Memory , m); 846 barrier_call->init_req(TypeFunc::ReturnAdr, igvn.C->top()); 847 barrier_call->init_req(TypeFunc::FramePtr, igvn.C->top()); 848 barrier_call->init_req(TypeFunc::Parms+0, dest->in(AddPNode::Base)); 849 850 barrier_call = igvn.transform(barrier_call); 851 c = new ProjNode(barrier_call,TypeFunc::Control); 852 c = igvn.transform(c); 853 m = new ProjNode(barrier_call, TypeFunc::Memory); 854 m = igvn.transform(m); 855 856 Node* out_c = ac->proj_out(TypeFunc::Control); 857 Node* out_m = ac->proj_out(TypeFunc::Memory); 858 igvn.replace_node(out_c, c); 859 igvn.replace_node(out_m, m); 860 } 861 862 863 // Support for macro expanded GC barriers 864 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const { 865 if (node->Opcode() == Op_ShenandoahEnqueueBarrier) { 866 state()->add_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node); 867 } 868 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 869 state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node); 870 } 871 } 872 873 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const { 874 if (node->Opcode() == Op_ShenandoahEnqueueBarrier) { 875 state()->remove_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node); 876 } 877 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 878 state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node); 879 } 880 } 881 882 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const { 883 if (is_shenandoah_wb_pre_call(n)) { 884 shenandoah_eliminate_wb_pre(n, ¯o->igvn()); 885 } 886 } 887 888 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const { 889 assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), ""); 890 Node* c = call->as_Call()->proj_out(TypeFunc::Control); 891 c = c->unique_ctrl_out(); 892 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); 893 c = c->unique_ctrl_out(); 894 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); 895 Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0); 896 assert(iff->is_If(), "expect test"); 897 if (!is_shenandoah_marking_if(igvn, iff)) { 898 c = c->unique_ctrl_out(); 899 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); 900 iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0); 901 assert(is_shenandoah_marking_if(igvn, iff), "expect marking test"); 902 } 903 Node* cmpx = iff->in(1)->in(1); 904 igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ)); 905 igvn->rehash_node_delayed(call); 906 call->del_req(call->req()-1); 907 } 908 909 void ShenandoahBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const { 910 if (node->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(node)) { 911 igvn->add_users_to_worklist(node); 912 } 913 } 914 915 void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const { 916 for (uint i = 0; i < useful.size(); i++) { 917 Node* n = useful.at(i); 918 if (n->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(n)) { 919 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 920 C->record_for_igvn(n->fast_out(i)); 921 } 922 } 923 } 924 for (int i = state()->enqueue_barriers_count() - 1; i >= 0; i--) { 925 ShenandoahEnqueueBarrierNode* n = state()->enqueue_barrier(i); 926 if (!useful.member(n)) { 927 state()->remove_enqueue_barrier(n); 928 } 929 } 930 for (int i = state()->load_reference_barriers_count() - 1; i >= 0; i--) { 931 ShenandoahLoadReferenceBarrierNode* n = state()->load_reference_barrier(i); 932 if (!useful.member(n)) { 933 state()->remove_load_reference_barrier(n); 934 } 935 } 936 } 937 938 void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {} 939 940 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const { 941 return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena); 942 } 943 944 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const { 945 return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state()); 946 } 947 948 // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be 949 // expanded later, then now is the time to do so. 950 bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const { return false; } 951 952 #ifdef ASSERT 953 void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const { 954 if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeExpand) { 955 ShenandoahBarrierC2Support::verify(Compile::current()->root()); 956 } else if (phase == BarrierSetC2::BeforeCodeGen) { 957 // Verify G1 pre-barriers 958 const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()); 959 960 ResourceArea *area = Thread::current()->resource_area(); 961 Unique_Node_List visited(area); 962 Node_List worklist(area); 963 // We're going to walk control flow backwards starting from the Root 964 worklist.push(compile->root()); 965 while (worklist.size() > 0) { 966 Node *x = worklist.pop(); 967 if (x == NULL || x == compile->top()) continue; 968 if (visited.member(x)) { 969 continue; 970 } else { 971 visited.push(x); 972 } 973 974 if (x->is_Region()) { 975 for (uint i = 1; i < x->req(); i++) { 976 worklist.push(x->in(i)); 977 } 978 } else { 979 worklist.push(x->in(0)); 980 // We are looking for the pattern: 981 // /->ThreadLocal 982 // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset) 983 // \->ConI(0) 984 // We want to verify that the If and the LoadB have the same control 985 // See GraphKit::g1_write_barrier_pre() 986 if (x->is_If()) { 987 IfNode *iff = x->as_If(); 988 if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) { 989 CmpNode *cmp = iff->in(1)->in(1)->as_Cmp(); 990 if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0 991 && cmp->in(1)->is_Load()) { 992 LoadNode *load = cmp->in(1)->as_Load(); 993 if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal 994 && load->in(2)->in(3)->is_Con() 995 && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) { 996 997 Node *if_ctrl = iff->in(0); 998 Node *load_ctrl = load->in(0); 999 1000 if (if_ctrl != load_ctrl) { 1001 // Skip possible CProj->NeverBranch in infinite loops 1002 if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj) 1003 && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) { 1004 if_ctrl = if_ctrl->in(0)->in(0); 1005 } 1006 } 1007 assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match"); 1008 } 1009 } 1010 } 1011 } 1012 } 1013 } 1014 } 1015 } 1016 #endif 1017 1018 Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { 1019 if (is_shenandoah_wb_pre_call(n)) { 1020 uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt(); 1021 if (n->req() > cnt) { 1022 Node* addp = n->in(cnt); 1023 if (has_only_shenandoah_wb_pre_uses(addp)) { 1024 n->del_req(cnt); 1025 if (can_reshape) { 1026 phase->is_IterGVN()->_worklist.push(addp); 1027 } 1028 return n; 1029 } 1030 } 1031 } 1032 if (n->Opcode() == Op_CmpP) { 1033 Node* in1 = n->in(1); 1034 Node* in2 = n->in(2); 1035 if (in1->bottom_type() == TypePtr::NULL_PTR) { 1036 in2 = step_over_gc_barrier(in2); 1037 } 1038 if (in2->bottom_type() == TypePtr::NULL_PTR) { 1039 in1 = step_over_gc_barrier(in1); 1040 } 1041 PhaseIterGVN* igvn = phase->is_IterGVN(); 1042 if (in1 != n->in(1)) { 1043 if (igvn != NULL) { 1044 n->set_req_X(1, in1, igvn); 1045 } else { 1046 n->set_req(1, in1); 1047 } 1048 assert(in2 == n->in(2), "only one change"); 1049 return n; 1050 } 1051 if (in2 != n->in(2)) { 1052 if (igvn != NULL) { 1053 n->set_req_X(2, in2, igvn); 1054 } else { 1055 n->set_req(2, in2); 1056 } 1057 return n; 1058 } 1059 } else if (can_reshape && 1060 n->Opcode() == Op_If && 1061 ShenandoahBarrierC2Support::is_heap_stable_test(n) && 1062 n->in(0) != NULL) { 1063 Node* dom = n->in(0); 1064 Node* prev_dom = n; 1065 int op = n->Opcode(); 1066 int dist = 16; 1067 // Search up the dominator tree for another heap stable test 1068 while (dom->Opcode() != op || // Not same opcode? 1069 !ShenandoahBarrierC2Support::is_heap_stable_test(dom) || // Not same input 1? 1070 prev_dom->in(0) != dom) { // One path of test does not dominate? 1071 if (dist < 0) return NULL; 1072 1073 dist--; 1074 prev_dom = dom; 1075 dom = IfNode::up_one_dom(dom); 1076 if (!dom) return NULL; 1077 } 1078 1079 // Check that we did not follow a loop back to ourselves 1080 if (n == dom) { 1081 return NULL; 1082 } 1083 1084 return n->as_If()->dominated_by(prev_dom, phase->is_IterGVN()); 1085 } 1086 1087 return NULL; 1088 } 1089 1090 bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) { 1091 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1092 Node* u = n->fast_out(i); 1093 if (!is_shenandoah_wb_pre_call(u)) { 1094 return false; 1095 } 1096 } 1097 return n->outcnt() > 0; 1098 } 1099 1100 bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const { 1101 switch (opcode) { 1102 case Op_CallLeaf: 1103 case Op_CallLeafNoFP: { 1104 assert (n->is_Call(), ""); 1105 CallNode *call = n->as_Call(); 1106 if (ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) { 1107 uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt(); 1108 if (call->req() > cnt) { 1109 assert(call->req() == cnt + 1, "only one extra input"); 1110 Node *addp = call->in(cnt); 1111 assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?"); 1112 call->del_req(cnt); 1113 } 1114 } 1115 return false; 1116 } 1117 case Op_ShenandoahCompareAndSwapP: 1118 case Op_ShenandoahCompareAndSwapN: 1119 case Op_ShenandoahWeakCompareAndSwapN: 1120 case Op_ShenandoahWeakCompareAndSwapP: 1121 case Op_ShenandoahCompareAndExchangeP: 1122 case Op_ShenandoahCompareAndExchangeN: 1123 #ifdef ASSERT 1124 if( VerifyOptoOopOffsets ) { 1125 MemNode* mem = n->as_Mem(); 1126 // Check to see if address types have grounded out somehow. 1127 const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr(); 1128 ciInstanceKlass *k = tp->klass()->as_instance_klass(); 1129 bool oop_offset_is_sane = k->contains_field_offset(tp->offset()); 1130 assert( !tp || oop_offset_is_sane, "" ); 1131 } 1132 #endif 1133 return true; 1134 case Op_ShenandoahLoadReferenceBarrier: 1135 assert(false, "should have been expanded already"); 1136 return true; 1137 default: 1138 return false; 1139 } 1140 } 1141 1142 bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const { 1143 switch (opcode) { 1144 case Op_ShenandoahCompareAndExchangeP: 1145 case Op_ShenandoahCompareAndExchangeN: 1146 conn_graph->add_objload_to_connection_graph(n, delayed_worklist); 1147 // fallthrough 1148 case Op_ShenandoahWeakCompareAndSwapP: 1149 case Op_ShenandoahWeakCompareAndSwapN: 1150 case Op_ShenandoahCompareAndSwapP: 1151 case Op_ShenandoahCompareAndSwapN: 1152 conn_graph->add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 1153 return true; 1154 case Op_StoreP: { 1155 Node* adr = n->in(MemNode::Address); 1156 const Type* adr_type = gvn->type(adr); 1157 // Pointer stores in G1 barriers looks like unsafe access. 1158 // Ignore such stores to be able scalar replace non-escaping 1159 // allocations. 1160 if (adr_type->isa_rawptr() && adr->is_AddP()) { 1161 Node* base = conn_graph->get_addp_base(adr); 1162 if (base->Opcode() == Op_LoadP && 1163 base->in(MemNode::Address)->is_AddP()) { 1164 adr = base->in(MemNode::Address); 1165 Node* tls = conn_graph->get_addp_base(adr); 1166 if (tls->Opcode() == Op_ThreadLocal) { 1167 int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 1168 const int buf_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); 1169 if (offs == buf_offset) { 1170 return true; // Pre barrier previous oop value store. 1171 } 1172 } 1173 } 1174 } 1175 return false; 1176 } 1177 case Op_ShenandoahEnqueueBarrier: 1178 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 1179 break; 1180 case Op_ShenandoahLoadReferenceBarrier: 1181 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist); 1182 return true; 1183 default: 1184 // Nothing 1185 break; 1186 } 1187 return false; 1188 } 1189 1190 bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const { 1191 switch (opcode) { 1192 case Op_ShenandoahCompareAndExchangeP: 1193 case Op_ShenandoahCompareAndExchangeN: { 1194 Node *adr = n->in(MemNode::Address); 1195 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 1196 // fallthrough 1197 } 1198 case Op_ShenandoahCompareAndSwapP: 1199 case Op_ShenandoahCompareAndSwapN: 1200 case Op_ShenandoahWeakCompareAndSwapP: 1201 case Op_ShenandoahWeakCompareAndSwapN: 1202 return conn_graph->add_final_edges_unsafe_access(n, opcode); 1203 case Op_ShenandoahEnqueueBarrier: 1204 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL); 1205 return true; 1206 case Op_ShenandoahLoadReferenceBarrier: 1207 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL); 1208 return true; 1209 default: 1210 // Nothing 1211 break; 1212 } 1213 return false; 1214 } 1215 1216 bool ShenandoahBarrierSetC2::escape_has_out_with_unsafe_object(Node* n) const { 1217 return n->has_out_with(Op_ShenandoahCompareAndExchangeP) || n->has_out_with(Op_ShenandoahCompareAndExchangeN) || 1218 n->has_out_with(Op_ShenandoahCompareAndSwapP, Op_ShenandoahCompareAndSwapN, Op_ShenandoahWeakCompareAndSwapP, Op_ShenandoahWeakCompareAndSwapN); 1219 1220 } 1221 1222 bool ShenandoahBarrierSetC2::escape_is_barrier_node(Node* n) const { 1223 return n->Opcode() == Op_ShenandoahLoadReferenceBarrier; 1224 } 1225 1226 bool ShenandoahBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const { 1227 switch (opcode) { 1228 case Op_ShenandoahCompareAndExchangeP: 1229 case Op_ShenandoahCompareAndExchangeN: 1230 case Op_ShenandoahWeakCompareAndSwapP: 1231 case Op_ShenandoahWeakCompareAndSwapN: 1232 case Op_ShenandoahCompareAndSwapP: 1233 case Op_ShenandoahCompareAndSwapN: { // Convert trinary to binary-tree 1234 Node* newval = n->in(MemNode::ValueIn); 1235 Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn); 1236 Node* pair = new BinaryNode(oldval, newval); 1237 n->set_req(MemNode::ValueIn,pair); 1238 n->del_req(LoadStoreConditionalNode::ExpectedIn); 1239 return true; 1240 } 1241 default: 1242 break; 1243 } 1244 return false; 1245 } 1246 1247 bool ShenandoahBarrierSetC2::matcher_is_store_load_barrier(Node* x, uint xop) const { 1248 return xop == Op_ShenandoahCompareAndExchangeP || 1249 xop == Op_ShenandoahCompareAndExchangeN || 1250 xop == Op_ShenandoahWeakCompareAndSwapP || 1251 xop == Op_ShenandoahWeakCompareAndSwapN || 1252 xop == Op_ShenandoahCompareAndSwapN || 1253 xop == Op_ShenandoahCompareAndSwapP; 1254 }