1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/barrierSet.hpp" 26 #include "gc/shenandoah/shenandoahForwarding.hpp" 27 #include "gc/shenandoah/shenandoahHeap.hpp" 28 #include "gc/shenandoah/shenandoahHeuristics.hpp" 29 #include "gc/shenandoah/shenandoahRuntime.hpp" 30 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 31 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" 32 #include "gc/shenandoah/c2/shenandoahSupport.hpp" 33 #include "opto/arraycopynode.hpp" 34 #include "opto/escape.hpp" 35 #include "opto/graphKit.hpp" 36 #include "opto/idealKit.hpp" 37 #include "opto/macro.hpp" 38 #include "opto/movenode.hpp" 39 #include "opto/narrowptrnode.hpp" 40 #include "opto/rootnode.hpp" 41 42 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() { 43 return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2()); 44 } 45 46 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena) 47 : _enqueue_barriers(new (comp_arena) GrowableArray<ShenandoahEnqueueBarrierNode*>(comp_arena, 8, 0, NULL)), 48 _load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, NULL)) { 49 } 50 51 int ShenandoahBarrierSetC2State::enqueue_barriers_count() const { 52 return _enqueue_barriers->length(); 53 } 54 55 ShenandoahEnqueueBarrierNode* ShenandoahBarrierSetC2State::enqueue_barrier(int idx) const { 56 return _enqueue_barriers->at(idx); 57 } 58 59 void ShenandoahBarrierSetC2State::add_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) { 60 assert(!_enqueue_barriers->contains(n), "duplicate entry in barrier list"); 61 _enqueue_barriers->append(n); 62 } 63 64 void ShenandoahBarrierSetC2State::remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) { 65 if (_enqueue_barriers->contains(n)) { 66 _enqueue_barriers->remove(n); 67 } 68 } 69 70 int ShenandoahBarrierSetC2State::load_reference_barriers_count() const { 71 return _load_reference_barriers->length(); 72 } 73 74 ShenandoahLoadReferenceBarrierNode* ShenandoahBarrierSetC2State::load_reference_barrier(int idx) const { 75 return _load_reference_barriers->at(idx); 76 } 77 78 void ShenandoahBarrierSetC2State::add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) { 79 assert(!_load_reference_barriers->contains(n), "duplicate entry in barrier list"); 80 _load_reference_barriers->append(n); 81 } 82 83 void ShenandoahBarrierSetC2State::remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) { 84 if (_load_reference_barriers->contains(n)) { 85 _load_reference_barriers->remove(n); 86 } 87 } 88 89 Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const { 90 if (ShenandoahStoreValEnqueueBarrier) { 91 obj = shenandoah_enqueue_barrier(kit, obj); 92 } 93 return obj; 94 } 95 96 #define __ kit-> 97 98 bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr, 99 BasicType bt, uint adr_idx) const { 100 intptr_t offset = 0; 101 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); 102 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); 103 104 if (offset == Type::OffsetBot) { 105 return false; // cannot unalias unless there are precise offsets 106 } 107 108 if (alloc == NULL) { 109 return false; // No allocation found 110 } 111 112 intptr_t size_in_bytes = type2aelembytes(bt); 113 114 Node* mem = __ memory(adr_idx); // start searching here... 115 116 for (int cnt = 0; cnt < 50; cnt++) { 117 118 if (mem->is_Store()) { 119 120 Node* st_adr = mem->in(MemNode::Address); 121 intptr_t st_offset = 0; 122 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); 123 124 if (st_base == NULL) { 125 break; // inscrutable pointer 126 } 127 128 // Break we have found a store with same base and offset as ours so break 129 if (st_base == base && st_offset == offset) { 130 break; 131 } 132 133 if (st_offset != offset && st_offset != Type::OffsetBot) { 134 const int MAX_STORE = BytesPerLong; 135 if (st_offset >= offset + size_in_bytes || 136 st_offset <= offset - MAX_STORE || 137 st_offset <= offset - mem->as_Store()->memory_size()) { 138 // Success: The offsets are provably independent. 139 // (You may ask, why not just test st_offset != offset and be done? 140 // The answer is that stores of different sizes can co-exist 141 // in the same sequence of RawMem effects. We sometimes initialize 142 // a whole 'tile' of array elements with a single jint or jlong.) 143 mem = mem->in(MemNode::Memory); 144 continue; // advance through independent store memory 145 } 146 } 147 148 if (st_base != base 149 && MemNode::detect_ptr_independence(base, alloc, st_base, 150 AllocateNode::Ideal_allocation(st_base, phase), 151 phase)) { 152 // Success: The bases are provably independent. 153 mem = mem->in(MemNode::Memory); 154 continue; // advance through independent store memory 155 } 156 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { 157 158 InitializeNode* st_init = mem->in(0)->as_Initialize(); 159 AllocateNode* st_alloc = st_init->allocation(); 160 161 // Make sure that we are looking at the same allocation site. 162 // The alloc variable is guaranteed to not be null here from earlier check. 163 if (alloc == st_alloc) { 164 // Check that the initialization is storing NULL so that no previous store 165 // has been moved up and directly write a reference 166 Node* captured_store = st_init->find_captured_store(offset, 167 type2aelembytes(T_OBJECT), 168 phase); 169 if (captured_store == NULL || captured_store == st_init->zero_memory()) { 170 return true; 171 } 172 } 173 } 174 175 // Unless there is an explicit 'continue', we must bail out here, 176 // because 'mem' is an inscrutable memory state (e.g., a call). 177 break; 178 } 179 180 return false; 181 } 182 183 #undef __ 184 #define __ ideal. 185 186 void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit, 187 bool do_load, 188 Node* obj, 189 Node* adr, 190 uint alias_idx, 191 Node* val, 192 const TypeOopPtr* val_type, 193 Node* pre_val, 194 BasicType bt) const { 195 // Some sanity checks 196 // Note: val is unused in this routine. 197 198 if (do_load) { 199 // We need to generate the load of the previous value 200 assert(obj != NULL, "must have a base"); 201 assert(adr != NULL, "where are loading from?"); 202 assert(pre_val == NULL, "loaded already?"); 203 assert(val_type != NULL, "need a type"); 204 205 if (ReduceInitialCardMarks 206 && satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) { 207 return; 208 } 209 210 } else { 211 // In this case both val_type and alias_idx are unused. 212 assert(pre_val != NULL, "must be loaded already"); 213 // Nothing to be done if pre_val is null. 214 if (pre_val->bottom_type() == TypePtr::NULL_PTR) return; 215 assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); 216 } 217 assert(bt == T_OBJECT, "or we shouldn't be here"); 218 219 IdealKit ideal(kit, true); 220 221 Node* tls = __ thread(); // ThreadLocalStorage 222 223 Node* no_base = __ top(); 224 Node* zero = __ ConI(0); 225 Node* zeroX = __ ConX(0); 226 227 float likely = PROB_LIKELY(0.999); 228 float unlikely = PROB_UNLIKELY(0.999); 229 230 // Offsets into the thread 231 const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()); 232 const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); 233 234 // Now the actual pointers into the thread 235 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset)); 236 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); 237 238 // Now some of the values 239 Node* marking; 240 Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()))); 241 Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw); 242 marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING)); 243 assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape"); 244 245 // if (!marking) 246 __ if_then(marking, BoolTest::ne, zero, unlikely); { 247 BasicType index_bt = TypeX_X->basic_type(); 248 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size."); 249 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw); 250 251 if (do_load) { 252 // load original value 253 // alias_idx correct?? 254 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx); 255 } 256 257 // if (pre_val != NULL) 258 __ if_then(pre_val, BoolTest::ne, kit->null()); { 259 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); 260 261 // is the queue for this thread full? 262 __ if_then(index, BoolTest::ne, zeroX, likely); { 263 264 // decrement the index 265 Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); 266 267 // Now get the buffer location we will log the previous value into and store it 268 Node *log_addr = __ AddP(no_base, buffer, next_index); 269 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered); 270 // update the index 271 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered); 272 273 } __ else_(); { 274 275 // logging buffer is full, call the runtime 276 const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(); 277 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls); 278 } __ end_if(); // (!index) 279 } __ end_if(); // (pre_val != NULL) 280 } __ end_if(); // (!marking) 281 282 // Final sync IdealKit and GraphKit. 283 kit->final_sync(ideal); 284 285 if (ShenandoahSATBBarrier && adr != NULL) { 286 Node* c = kit->control(); 287 Node* call = c->in(1)->in(1)->in(1)->in(0); 288 assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected"); 289 call->add_req(adr); 290 } 291 } 292 293 bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) { 294 return call->is_CallLeaf() && 295 call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry); 296 } 297 298 bool ShenandoahBarrierSetC2::is_shenandoah_wb_call(Node* call) { 299 return call->is_CallLeaf() && 300 call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT); 301 } 302 303 bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) { 304 if (n->Opcode() != Op_If) { 305 return false; 306 } 307 308 Node* bol = n->in(1); 309 assert(bol->is_Bool(), ""); 310 Node* cmpx = bol->in(1); 311 if (bol->as_Bool()->_test._test == BoolTest::ne && 312 cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) && 313 is_shenandoah_state_load(cmpx->in(1)->in(1)) && 314 cmpx->in(1)->in(2)->is_Con() && 315 cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) { 316 return true; 317 } 318 319 return false; 320 } 321 322 bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) { 323 if (!n->is_Load()) return false; 324 const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset()); 325 return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal 326 && n->in(2)->in(3)->is_Con() 327 && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset; 328 } 329 330 void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit, 331 bool do_load, 332 Node* obj, 333 Node* adr, 334 uint alias_idx, 335 Node* val, 336 const TypeOopPtr* val_type, 337 Node* pre_val, 338 BasicType bt) const { 339 if (ShenandoahSATBBarrier) { 340 IdealKit ideal(kit); 341 kit->sync_kit(ideal); 342 343 satb_write_barrier_pre(kit, do_load, obj, adr, alias_idx, val, val_type, pre_val, bt); 344 345 ideal.sync_kit(kit); 346 kit->final_sync(ideal); 347 } 348 } 349 350 Node* ShenandoahBarrierSetC2::shenandoah_enqueue_barrier(GraphKit* kit, Node* pre_val) const { 351 return kit->gvn().transform(new ShenandoahEnqueueBarrierNode(pre_val)); 352 } 353 354 // Helper that guards and inserts a pre-barrier. 355 void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, 356 Node* pre_val, bool need_mem_bar) const { 357 // We could be accessing the referent field of a reference object. If so, when G1 358 // is enabled, we need to log the value in the referent field in an SATB buffer. 359 // This routine performs some compile time filters and generates suitable 360 // runtime filters that guard the pre-barrier code. 361 // Also add memory barrier for non volatile load from the referent field 362 // to prevent commoning of loads across safepoint. 363 364 // Some compile time checks. 365 366 // If offset is a constant, is it java_lang_ref_Reference::_reference_offset? 367 const TypeX* otype = offset->find_intptr_t_type(); 368 if (otype != NULL && otype->is_con() && 369 otype->get_con() != java_lang_ref_Reference::referent_offset) { 370 // Constant offset but not the reference_offset so just return 371 return; 372 } 373 374 // We only need to generate the runtime guards for instances. 375 const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr(); 376 if (btype != NULL) { 377 if (btype->isa_aryptr()) { 378 // Array type so nothing to do 379 return; 380 } 381 382 const TypeInstPtr* itype = btype->isa_instptr(); 383 if (itype != NULL) { 384 // Can the klass of base_oop be statically determined to be 385 // _not_ a sub-class of Reference and _not_ Object? 386 ciKlass* klass = itype->klass(); 387 if ( klass->is_loaded() && 388 !klass->is_subtype_of(kit->env()->Reference_klass()) && 389 !kit->env()->Object_klass()->is_subtype_of(klass)) { 390 return; 391 } 392 } 393 } 394 395 // The compile time filters did not reject base_oop/offset so 396 // we need to generate the following runtime filters 397 // 398 // if (offset == java_lang_ref_Reference::_reference_offset) { 399 // if (instance_of(base, java.lang.ref.Reference)) { 400 // pre_barrier(_, pre_val, ...); 401 // } 402 // } 403 404 float likely = PROB_LIKELY( 0.999); 405 float unlikely = PROB_UNLIKELY(0.999); 406 407 IdealKit ideal(kit); 408 409 Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset); 410 411 __ if_then(offset, BoolTest::eq, referent_off, unlikely); { 412 // Update graphKit memory and control from IdealKit. 413 kit->sync_kit(ideal); 414 415 Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass())); 416 Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con); 417 418 // Update IdealKit memory and control from graphKit. 419 __ sync_kit(kit); 420 421 Node* one = __ ConI(1); 422 // is_instof == 0 if base_oop == NULL 423 __ if_then(is_instof, BoolTest::eq, one, unlikely); { 424 425 // Update graphKit from IdeakKit. 426 kit->sync_kit(ideal); 427 428 // Use the pre-barrier to record the value in the referent field 429 satb_write_barrier_pre(kit, false /* do_load */, 430 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, 431 pre_val /* pre_val */, 432 T_OBJECT); 433 if (need_mem_bar) { 434 // Add memory barrier to prevent commoning reads from this field 435 // across safepoint since GC can change its value. 436 kit->insert_mem_bar(Op_MemBarCPUOrder); 437 } 438 // Update IdealKit from graphKit. 439 __ sync_kit(kit); 440 441 } __ end_if(); // _ref_type != ref_none 442 } __ end_if(); // offset == referent_offset 443 444 // Final sync IdealKit and GraphKit. 445 kit->final_sync(ideal); 446 } 447 448 #undef __ 449 450 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() { 451 const Type **fields = TypeTuple::fields(2); 452 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 453 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread 454 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 455 456 // create result type (range) 457 fields = TypeTuple::fields(0); 458 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 459 460 return TypeFunc::make(domain, range); 461 } 462 463 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() { 464 const Type **fields = TypeTuple::fields(1); 465 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 466 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 467 468 // create result type (range) 469 fields = TypeTuple::fields(0); 470 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 471 472 return TypeFunc::make(domain, range); 473 } 474 475 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_write_barrier_Type() { 476 const Type **fields = TypeTuple::fields(1); 477 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 478 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 479 480 // create result type (range) 481 fields = TypeTuple::fields(1); 482 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; 483 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 484 485 return TypeFunc::make(domain, range); 486 } 487 488 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { 489 DecoratorSet decorators = access.decorators(); 490 491 const TypePtr* adr_type = access.addr().type(); 492 Node* adr = access.addr().node(); 493 494 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; 495 bool on_heap = (decorators & IN_HEAP) != 0; 496 497 if (!access.is_oop() || (!on_heap && !anonymous)) { 498 return BarrierSetC2::store_at_resolved(access, val); 499 } 500 501 if (access.is_parse_access()) { 502 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); 503 GraphKit* kit = parse_access.kit(); 504 505 uint adr_idx = kit->C->get_alias_index(adr_type); 506 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); 507 Node* value = val.node(); 508 value = shenandoah_storeval_barrier(kit, value); 509 val.set_node(value); 510 shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(), 511 static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type()); 512 } else { 513 assert(access.is_opt_access(), "only for optimization passes"); 514 assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code"); 515 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access); 516 PhaseGVN& gvn = opt_access.gvn(); 517 MergeMemNode* mm = opt_access.mem(); 518 519 if (ShenandoahStoreValEnqueueBarrier) { 520 Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(val.node())); 521 val.set_node(enqueue); 522 } 523 } 524 return BarrierSetC2::store_at_resolved(access, val); 525 } 526 527 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { 528 DecoratorSet decorators = access.decorators(); 529 530 Node* adr = access.addr().node(); 531 Node* obj = access.base(); 532 533 bool mismatched = (decorators & C2_MISMATCHED) != 0; 534 bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0; 535 bool on_heap = (decorators & IN_HEAP) != 0; 536 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 537 bool is_unordered = (decorators & MO_UNORDERED) != 0; 538 bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap; 539 540 Node* top = Compile::current()->top(); 541 542 Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top; 543 Node* load = BarrierSetC2::load_at_resolved(access, val_type); 544 545 if (access.is_oop()) { 546 if (ShenandoahLoadRefBarrier) { 547 load = new ShenandoahLoadReferenceBarrierNode(NULL, load); 548 if (access.is_parse_access()) { 549 load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load); 550 } else { 551 load = static_cast<C2OptAccess &>(access).gvn().transform(load); 552 } 553 } 554 } 555 556 // If we are reading the value of the referent field of a Reference 557 // object (either by using Unsafe directly or through reflection) 558 // then, if SATB is enabled, we need to record the referent in an 559 // SATB log buffer using the pre-barrier mechanism. 560 // Also we need to add memory barrier to prevent commoning reads 561 // from this field across safepoint since GC can change its value. 562 bool need_read_barrier = ShenandoahKeepAliveBarrier && 563 (on_heap && (on_weak || (unknown && offset != top && obj != top))); 564 565 if (!access.is_oop() || !need_read_barrier) { 566 return load; 567 } 568 569 assert(access.is_parse_access(), "entry not supported at optimization time"); 570 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); 571 GraphKit* kit = parse_access.kit(); 572 573 if (on_weak) { 574 // Use the pre-barrier to record the value in the referent field 575 satb_write_barrier_pre(kit, false /* do_load */, 576 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, 577 load /* pre_val */, T_OBJECT); 578 // Add memory barrier to prevent commoning reads from this field 579 // across safepoint since GC can change its value. 580 kit->insert_mem_bar(Op_MemBarCPUOrder); 581 } else if (unknown) { 582 // We do not require a mem bar inside pre_barrier if need_mem_bar 583 // is set: the barriers would be emitted by us. 584 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar); 585 } 586 587 return load; 588 } 589 590 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, 591 Node* new_val, const Type* value_type) const { 592 GraphKit* kit = access.kit(); 593 if (access.is_oop()) { 594 new_val = shenandoah_storeval_barrier(kit, new_val); 595 shenandoah_write_barrier_pre(kit, false /* do_load */, 596 NULL, NULL, max_juint, NULL, NULL, 597 expected_val /* pre_val */, T_OBJECT); 598 599 MemNode::MemOrd mo = access.mem_node_mo(); 600 Node* mem = access.memory(); 601 Node* adr = access.addr().node(); 602 const TypePtr* adr_type = access.addr().type(); 603 Node* load_store = NULL; 604 605 #ifdef _LP64 606 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 607 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); 608 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); 609 if (ShenandoahCASBarrier) { 610 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo)); 611 } else { 612 load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo)); 613 } 614 } else 615 #endif 616 { 617 if (ShenandoahCASBarrier) { 618 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo)); 619 } else { 620 load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo)); 621 } 622 } 623 624 access.set_raw_access(load_store); 625 pin_atomic_op(access); 626 627 #ifdef _LP64 628 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 629 load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); 630 } 631 #endif 632 load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store)); 633 return load_store; 634 } 635 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); 636 } 637 638 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val, 639 Node* new_val, const Type* value_type) const { 640 GraphKit* kit = access.kit(); 641 if (access.is_oop()) { 642 new_val = shenandoah_storeval_barrier(kit, new_val); 643 shenandoah_write_barrier_pre(kit, false /* do_load */, 644 NULL, NULL, max_juint, NULL, NULL, 645 expected_val /* pre_val */, T_OBJECT); 646 DecoratorSet decorators = access.decorators(); 647 MemNode::MemOrd mo = access.mem_node_mo(); 648 Node* mem = access.memory(); 649 bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0; 650 Node* load_store = NULL; 651 Node* adr = access.addr().node(); 652 #ifdef _LP64 653 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 654 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); 655 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); 656 if (ShenandoahCASBarrier) { 657 if (is_weak_cas) { 658 load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 659 } else { 660 load_store = kit->gvn().transform(new ShenandoahCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 661 } 662 } else { 663 if (is_weak_cas) { 664 load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 665 } else { 666 load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 667 } 668 } 669 } else 670 #endif 671 { 672 if (ShenandoahCASBarrier) { 673 if (is_weak_cas) { 674 load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 675 } else { 676 load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 677 } 678 } else { 679 if (is_weak_cas) { 680 load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 681 } else { 682 load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 683 } 684 } 685 } 686 access.set_raw_access(load_store); 687 pin_atomic_op(access); 688 return load_store; 689 } 690 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); 691 } 692 693 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const { 694 GraphKit* kit = access.kit(); 695 if (access.is_oop()) { 696 val = shenandoah_storeval_barrier(kit, val); 697 } 698 Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type); 699 if (access.is_oop()) { 700 result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result)); 701 shenandoah_write_barrier_pre(kit, false /* do_load */, 702 NULL, NULL, max_juint, NULL, NULL, 703 result /* pre_val */, T_OBJECT); 704 } 705 return result; 706 } 707 708 void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const { 709 assert(!src->is_AddP(), "unexpected input"); 710 BarrierSetC2::clone(kit, src, dst, size, is_array); 711 } 712 713 Node* ShenandoahBarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes, 714 Node*& i_o, Node*& needgc_ctrl, 715 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem, 716 intx prefetch_lines) const { 717 PhaseIterGVN& igvn = macro->igvn(); 718 719 // Allocate several words more for the Shenandoah brooks pointer. 720 size_in_bytes = new AddXNode(size_in_bytes, igvn.MakeConX(ShenandoahForwarding::byte_size())); 721 macro->transform_later(size_in_bytes); 722 723 Node* fast_oop = BarrierSetC2::obj_allocate(macro, ctrl, mem, toobig_false, size_in_bytes, 724 i_o, needgc_ctrl, fast_oop_ctrl, fast_oop_rawmem, 725 prefetch_lines); 726 727 // Bump up object for Shenandoah brooks pointer. 728 fast_oop = new AddPNode(macro->top(), fast_oop, igvn.MakeConX(ShenandoahForwarding::byte_size())); 729 macro->transform_later(fast_oop); 730 731 // Initialize Shenandoah brooks pointer to point to the object itself. 732 fast_oop_rawmem = macro->make_store(fast_oop_ctrl, fast_oop_rawmem, fast_oop, ShenandoahForwarding::byte_offset(), fast_oop, T_OBJECT); 733 734 return fast_oop; 735 } 736 737 // Support for GC barriers emitted during parsing 738 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const { 739 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) return true; 740 if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) { 741 return false; 742 } 743 CallLeafNode *call = node->as_CallLeaf(); 744 if (call->_name == NULL) { 745 return false; 746 } 747 748 return strcmp(call->_name, "shenandoah_clone_barrier") == 0 || 749 strcmp(call->_name, "shenandoah_cas_obj") == 0 || 750 strcmp(call->_name, "shenandoah_wb_pre") == 0; 751 } 752 753 Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const { 754 if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 755 return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn); 756 } 757 if (c->Opcode() == Op_ShenandoahEnqueueBarrier) { 758 c = c->in(1); 759 } 760 return c; 761 } 762 763 bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const { 764 return !ShenandoahBarrierC2Support::expand(C, igvn); 765 } 766 767 bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { 768 if (mode == LoopOptsShenandoahExpand) { 769 assert(UseShenandoahGC, "only for shenandoah"); 770 ShenandoahBarrierC2Support::pin_and_expand(phase); 771 return true; 772 } else if (mode == LoopOptsShenandoahPostExpand) { 773 assert(UseShenandoahGC, "only for shenandoah"); 774 visited.Clear(); 775 ShenandoahBarrierC2Support::optimize_after_expansion(visited, nstack, worklist, phase); 776 return true; 777 } 778 return false; 779 } 780 781 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { 782 bool is_oop = type == T_OBJECT || type == T_ARRAY; 783 if (!is_oop) { 784 return false; 785 } 786 if (tightly_coupled_alloc) { 787 if (phase == Optimization) { 788 return false; 789 } 790 return !is_clone; 791 } 792 if (phase == Optimization) { 793 return !ShenandoahStoreValEnqueueBarrier; 794 } 795 return true; 796 } 797 798 bool ShenandoahBarrierSetC2::clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIterGVN& igvn) { 799 Node* src = ac->in(ArrayCopyNode::Src); 800 const TypeOopPtr* src_type = igvn.type(src)->is_oopptr(); 801 if (src_type->isa_instptr() != NULL) { 802 ciInstanceKlass* ik = src_type->klass()->as_instance_klass(); 803 if ((src_type->klass_is_exact() || (!ik->is_interface() && !ik->has_subklass())) && !ik->has_injected_fields()) { 804 if (ik->has_object_fields()) { 805 return true; 806 } else { 807 if (!src_type->klass_is_exact()) { 808 igvn.C->dependencies()->assert_leaf_type(ik); 809 } 810 } 811 } else { 812 return true; 813 } 814 } else if (src_type->isa_aryptr()) { 815 BasicType src_elem = src_type->klass()->as_array_klass()->element_type()->basic_type(); 816 if (src_elem == T_OBJECT || src_elem == T_ARRAY) { 817 return true; 818 } 819 } else { 820 return true; 821 } 822 return false; 823 } 824 825 void ShenandoahBarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const { 826 assert(ac->is_clonebasic(), "no other kind of arraycopy here"); 827 828 if (!clone_needs_postbarrier(ac, igvn)) { 829 BarrierSetC2::clone_barrier_at_expansion(ac, call, igvn); 830 return; 831 } 832 833 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 834 Node* c = new ProjNode(call,TypeFunc::Control); 835 c = igvn.transform(c); 836 Node* m = new ProjNode(call, TypeFunc::Memory); 837 m = igvn.transform(m); 838 839 Node* dest = ac->in(ArrayCopyNode::Dest); 840 assert(dest->is_AddP(), "bad input"); 841 Node* barrier_call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(), 842 CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier), 843 "shenandoah_clone_barrier", raw_adr_type); 844 barrier_call->init_req(TypeFunc::Control, c); 845 barrier_call->init_req(TypeFunc::I_O , igvn.C->top()); 846 barrier_call->init_req(TypeFunc::Memory , m); 847 barrier_call->init_req(TypeFunc::ReturnAdr, igvn.C->top()); 848 barrier_call->init_req(TypeFunc::FramePtr, igvn.C->top()); 849 barrier_call->init_req(TypeFunc::Parms+0, dest->in(AddPNode::Base)); 850 851 barrier_call = igvn.transform(barrier_call); 852 c = new ProjNode(barrier_call,TypeFunc::Control); 853 c = igvn.transform(c); 854 m = new ProjNode(barrier_call, TypeFunc::Memory); 855 m = igvn.transform(m); 856 857 Node* out_c = ac->proj_out(TypeFunc::Control); 858 Node* out_m = ac->proj_out(TypeFunc::Memory); 859 igvn.replace_node(out_c, c); 860 igvn.replace_node(out_m, m); 861 } 862 863 864 // Support for macro expanded GC barriers 865 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const { 866 if (node->Opcode() == Op_ShenandoahEnqueueBarrier) { 867 state()->add_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node); 868 } 869 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 870 state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node); 871 } 872 } 873 874 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const { 875 if (node->Opcode() == Op_ShenandoahEnqueueBarrier) { 876 state()->remove_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node); 877 } 878 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 879 state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node); 880 } 881 } 882 883 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const { 884 if (is_shenandoah_wb_pre_call(n)) { 885 shenandoah_eliminate_wb_pre(n, ¯o->igvn()); 886 } 887 } 888 889 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const { 890 assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), ""); 891 Node* c = call->as_Call()->proj_out(TypeFunc::Control); 892 c = c->unique_ctrl_out(); 893 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); 894 c = c->unique_ctrl_out(); 895 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); 896 Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0); 897 assert(iff->is_If(), "expect test"); 898 if (!is_shenandoah_marking_if(igvn, iff)) { 899 c = c->unique_ctrl_out(); 900 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); 901 iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0); 902 assert(is_shenandoah_marking_if(igvn, iff), "expect marking test"); 903 } 904 Node* cmpx = iff->in(1)->in(1); 905 igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ)); 906 igvn->rehash_node_delayed(call); 907 call->del_req(call->req()-1); 908 } 909 910 void ShenandoahBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const { 911 if (node->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(node)) { 912 igvn->add_users_to_worklist(node); 913 } 914 } 915 916 void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const { 917 for (uint i = 0; i < useful.size(); i++) { 918 Node* n = useful.at(i); 919 if (n->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(n)) { 920 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 921 C->record_for_igvn(n->fast_out(i)); 922 } 923 } 924 } 925 for (int i = state()->enqueue_barriers_count() - 1; i >= 0; i--) { 926 ShenandoahEnqueueBarrierNode* n = state()->enqueue_barrier(i); 927 if (!useful.member(n)) { 928 state()->remove_enqueue_barrier(n); 929 } 930 } 931 for (int i = state()->load_reference_barriers_count() - 1; i >= 0; i--) { 932 ShenandoahLoadReferenceBarrierNode* n = state()->load_reference_barrier(i); 933 if (!useful.member(n)) { 934 state()->remove_load_reference_barrier(n); 935 } 936 } 937 } 938 939 void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {} 940 941 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const { 942 return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena); 943 } 944 945 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const { 946 return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state()); 947 } 948 949 // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be 950 // expanded later, then now is the time to do so. 951 bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const { return false; } 952 953 #ifdef ASSERT 954 void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const { 955 if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeExpand) { 956 ShenandoahBarrierC2Support::verify(Compile::current()->root()); 957 } else if (phase == BarrierSetC2::BeforeCodeGen) { 958 // Verify G1 pre-barriers 959 const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()); 960 961 ResourceArea *area = Thread::current()->resource_area(); 962 Unique_Node_List visited(area); 963 Node_List worklist(area); 964 // We're going to walk control flow backwards starting from the Root 965 worklist.push(compile->root()); 966 while (worklist.size() > 0) { 967 Node *x = worklist.pop(); 968 if (x == NULL || x == compile->top()) continue; 969 if (visited.member(x)) { 970 continue; 971 } else { 972 visited.push(x); 973 } 974 975 if (x->is_Region()) { 976 for (uint i = 1; i < x->req(); i++) { 977 worklist.push(x->in(i)); 978 } 979 } else { 980 worklist.push(x->in(0)); 981 // We are looking for the pattern: 982 // /->ThreadLocal 983 // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset) 984 // \->ConI(0) 985 // We want to verify that the If and the LoadB have the same control 986 // See GraphKit::g1_write_barrier_pre() 987 if (x->is_If()) { 988 IfNode *iff = x->as_If(); 989 if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) { 990 CmpNode *cmp = iff->in(1)->in(1)->as_Cmp(); 991 if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0 992 && cmp->in(1)->is_Load()) { 993 LoadNode *load = cmp->in(1)->as_Load(); 994 if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal 995 && load->in(2)->in(3)->is_Con() 996 && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) { 997 998 Node *if_ctrl = iff->in(0); 999 Node *load_ctrl = load->in(0); 1000 1001 if (if_ctrl != load_ctrl) { 1002 // Skip possible CProj->NeverBranch in infinite loops 1003 if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj) 1004 && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) { 1005 if_ctrl = if_ctrl->in(0)->in(0); 1006 } 1007 } 1008 assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match"); 1009 } 1010 } 1011 } 1012 } 1013 } 1014 } 1015 } 1016 } 1017 #endif 1018 1019 Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { 1020 if (is_shenandoah_wb_pre_call(n)) { 1021 uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt(); 1022 if (n->req() > cnt) { 1023 Node* addp = n->in(cnt); 1024 if (has_only_shenandoah_wb_pre_uses(addp)) { 1025 n->del_req(cnt); 1026 if (can_reshape) { 1027 phase->is_IterGVN()->_worklist.push(addp); 1028 } 1029 return n; 1030 } 1031 } 1032 } 1033 if (n->Opcode() == Op_CmpP) { 1034 Node* in1 = n->in(1); 1035 Node* in2 = n->in(2); 1036 if (in1->bottom_type() == TypePtr::NULL_PTR) { 1037 in2 = step_over_gc_barrier(in2); 1038 } 1039 if (in2->bottom_type() == TypePtr::NULL_PTR) { 1040 in1 = step_over_gc_barrier(in1); 1041 } 1042 PhaseIterGVN* igvn = phase->is_IterGVN(); 1043 if (in1 != n->in(1)) { 1044 if (igvn != NULL) { 1045 n->set_req_X(1, in1, igvn); 1046 } else { 1047 n->set_req(1, in1); 1048 } 1049 assert(in2 == n->in(2), "only one change"); 1050 return n; 1051 } 1052 if (in2 != n->in(2)) { 1053 if (igvn != NULL) { 1054 n->set_req_X(2, in2, igvn); 1055 } else { 1056 n->set_req(2, in2); 1057 } 1058 return n; 1059 } 1060 } else if (can_reshape && 1061 n->Opcode() == Op_If && 1062 ShenandoahBarrierC2Support::is_heap_stable_test(n) && 1063 n->in(0) != NULL) { 1064 Node* dom = n->in(0); 1065 Node* prev_dom = n; 1066 int op = n->Opcode(); 1067 int dist = 16; 1068 // Search up the dominator tree for another heap stable test 1069 while (dom->Opcode() != op || // Not same opcode? 1070 !ShenandoahBarrierC2Support::is_heap_stable_test(dom) || // Not same input 1? 1071 prev_dom->in(0) != dom) { // One path of test does not dominate? 1072 if (dist < 0) return NULL; 1073 1074 dist--; 1075 prev_dom = dom; 1076 dom = IfNode::up_one_dom(dom); 1077 if (!dom) return NULL; 1078 } 1079 1080 // Check that we did not follow a loop back to ourselves 1081 if (n == dom) { 1082 return NULL; 1083 } 1084 1085 return n->as_If()->dominated_by(prev_dom, phase->is_IterGVN()); 1086 } 1087 1088 return NULL; 1089 } 1090 1091 bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) { 1092 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1093 Node* u = n->fast_out(i); 1094 if (!is_shenandoah_wb_pre_call(u)) { 1095 return false; 1096 } 1097 } 1098 return n->outcnt() > 0; 1099 } 1100 1101 bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const { 1102 switch (opcode) { 1103 case Op_CallLeaf: 1104 case Op_CallLeafNoFP: { 1105 assert (n->is_Call(), ""); 1106 CallNode *call = n->as_Call(); 1107 if (ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) { 1108 uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt(); 1109 if (call->req() > cnt) { 1110 assert(call->req() == cnt + 1, "only one extra input"); 1111 Node *addp = call->in(cnt); 1112 assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?"); 1113 call->del_req(cnt); 1114 } 1115 } 1116 return false; 1117 } 1118 case Op_ShenandoahCompareAndSwapP: 1119 case Op_ShenandoahCompareAndSwapN: 1120 case Op_ShenandoahWeakCompareAndSwapN: 1121 case Op_ShenandoahWeakCompareAndSwapP: 1122 case Op_ShenandoahCompareAndExchangeP: 1123 case Op_ShenandoahCompareAndExchangeN: 1124 #ifdef ASSERT 1125 if( VerifyOptoOopOffsets ) { 1126 MemNode* mem = n->as_Mem(); 1127 // Check to see if address types have grounded out somehow. 1128 const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr(); 1129 ciInstanceKlass *k = tp->klass()->as_instance_klass(); 1130 bool oop_offset_is_sane = k->contains_field_offset(tp->offset()); 1131 assert( !tp || oop_offset_is_sane, "" ); 1132 } 1133 #endif 1134 return true; 1135 case Op_ShenandoahLoadReferenceBarrier: 1136 assert(false, "should have been expanded already"); 1137 return true; 1138 default: 1139 return false; 1140 } 1141 } 1142 1143 bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const { 1144 switch (opcode) { 1145 case Op_ShenandoahCompareAndExchangeP: 1146 case Op_ShenandoahCompareAndExchangeN: 1147 conn_graph->add_objload_to_connection_graph(n, delayed_worklist); 1148 // fallthrough 1149 case Op_ShenandoahWeakCompareAndSwapP: 1150 case Op_ShenandoahWeakCompareAndSwapN: 1151 case Op_ShenandoahCompareAndSwapP: 1152 case Op_ShenandoahCompareAndSwapN: 1153 conn_graph->add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 1154 return true; 1155 case Op_StoreP: { 1156 Node* adr = n->in(MemNode::Address); 1157 const Type* adr_type = gvn->type(adr); 1158 // Pointer stores in G1 barriers looks like unsafe access. 1159 // Ignore such stores to be able scalar replace non-escaping 1160 // allocations. 1161 if (adr_type->isa_rawptr() && adr->is_AddP()) { 1162 Node* base = conn_graph->get_addp_base(adr); 1163 if (base->Opcode() == Op_LoadP && 1164 base->in(MemNode::Address)->is_AddP()) { 1165 adr = base->in(MemNode::Address); 1166 Node* tls = conn_graph->get_addp_base(adr); 1167 if (tls->Opcode() == Op_ThreadLocal) { 1168 int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 1169 const int buf_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); 1170 if (offs == buf_offset) { 1171 return true; // Pre barrier previous oop value store. 1172 } 1173 } 1174 } 1175 } 1176 return false; 1177 } 1178 case Op_ShenandoahEnqueueBarrier: 1179 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 1180 break; 1181 case Op_ShenandoahLoadReferenceBarrier: 1182 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist); 1183 return true; 1184 default: 1185 // Nothing 1186 break; 1187 } 1188 return false; 1189 } 1190 1191 bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const { 1192 switch (opcode) { 1193 case Op_ShenandoahCompareAndExchangeP: 1194 case Op_ShenandoahCompareAndExchangeN: { 1195 Node *adr = n->in(MemNode::Address); 1196 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 1197 // fallthrough 1198 } 1199 case Op_ShenandoahCompareAndSwapP: 1200 case Op_ShenandoahCompareAndSwapN: 1201 case Op_ShenandoahWeakCompareAndSwapP: 1202 case Op_ShenandoahWeakCompareAndSwapN: 1203 return conn_graph->add_final_edges_unsafe_access(n, opcode); 1204 case Op_ShenandoahEnqueueBarrier: 1205 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL); 1206 return true; 1207 case Op_ShenandoahLoadReferenceBarrier: 1208 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL); 1209 return true; 1210 default: 1211 // Nothing 1212 break; 1213 } 1214 return false; 1215 } 1216 1217 bool ShenandoahBarrierSetC2::escape_has_out_with_unsafe_object(Node* n) const { 1218 return n->has_out_with(Op_ShenandoahCompareAndExchangeP) || n->has_out_with(Op_ShenandoahCompareAndExchangeN) || 1219 n->has_out_with(Op_ShenandoahCompareAndSwapP, Op_ShenandoahCompareAndSwapN, Op_ShenandoahWeakCompareAndSwapP, Op_ShenandoahWeakCompareAndSwapN); 1220 1221 } 1222 1223 bool ShenandoahBarrierSetC2::escape_is_barrier_node(Node* n) const { 1224 return n->Opcode() == Op_ShenandoahLoadReferenceBarrier; 1225 } 1226 1227 bool ShenandoahBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const { 1228 switch (opcode) { 1229 case Op_ShenandoahCompareAndExchangeP: 1230 case Op_ShenandoahCompareAndExchangeN: 1231 case Op_ShenandoahWeakCompareAndSwapP: 1232 case Op_ShenandoahWeakCompareAndSwapN: 1233 case Op_ShenandoahCompareAndSwapP: 1234 case Op_ShenandoahCompareAndSwapN: { // Convert trinary to binary-tree 1235 Node* newval = n->in(MemNode::ValueIn); 1236 Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn); 1237 Node* pair = new BinaryNode(oldval, newval); 1238 n->set_req(MemNode::ValueIn,pair); 1239 n->del_req(LoadStoreConditionalNode::ExpectedIn); 1240 return true; 1241 } 1242 default: 1243 break; 1244 } 1245 return false; 1246 } 1247 1248 bool ShenandoahBarrierSetC2::matcher_is_store_load_barrier(Node* x, uint xop) const { 1249 return xop == Op_ShenandoahCompareAndExchangeP || 1250 xop == Op_ShenandoahCompareAndExchangeN || 1251 xop == Op_ShenandoahWeakCompareAndSwapP || 1252 xop == Op_ShenandoahWeakCompareAndSwapN || 1253 xop == Op_ShenandoahCompareAndSwapN || 1254 xop == Op_ShenandoahCompareAndSwapP; 1255 }