1 /* 2 * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shenandoah/shenandoahHeap.hpp" 26 #include "gc/shenandoah/shenandoahHeuristics.hpp" 27 #include "gc/shenandoah/shenandoahRuntime.hpp" 28 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" 29 #include "gc/shenandoah/c2/shenandoahSupport.hpp" 30 #include "opto/arraycopynode.hpp" 31 #include "opto/graphKit.hpp" 32 #include "opto/idealKit.hpp" 33 #include "opto/macro.hpp" 34 #include "opto/narrowptrnode.hpp" 35 #include "opto/rootnode.hpp" 36 37 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() { 38 return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2()); 39 } 40 41 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena) 42 : _shenandoah_barriers(new (comp_arena) GrowableArray<ShenandoahWriteBarrierNode*>(comp_arena, 8, 0, NULL)) { 43 } 44 45 int ShenandoahBarrierSetC2State::shenandoah_barriers_count() const { 46 return _shenandoah_barriers->length(); 47 } 48 49 ShenandoahWriteBarrierNode* ShenandoahBarrierSetC2State::shenandoah_barrier(int idx) const { 50 return _shenandoah_barriers->at(idx); 51 } 52 53 void ShenandoahBarrierSetC2State::add_shenandoah_barrier(ShenandoahWriteBarrierNode * n) { 54 assert(!_shenandoah_barriers->contains(n), "duplicate entry in barrier list"); 55 _shenandoah_barriers->append(n); 56 } 57 58 void ShenandoahBarrierSetC2State::remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n) { 59 if (_shenandoah_barriers->contains(n)) { 60 _shenandoah_barriers->remove(n); 61 } 62 } 63 64 #define __ kit-> 65 66 Node* ShenandoahBarrierSetC2::shenandoah_read_barrier(GraphKit* kit, Node* obj) const { 67 if (ShenandoahReadBarrier) { 68 obj = shenandoah_read_barrier_impl(kit, obj, false, true, true); 69 } 70 return obj; 71 } 72 73 Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const { 74 if (ShenandoahStoreValEnqueueBarrier) { 75 obj = shenandoah_write_barrier(kit, obj); 76 obj = shenandoah_enqueue_barrier(kit, obj); 77 } 78 if (ShenandoahStoreValReadBarrier) { 79 obj = shenandoah_read_barrier_impl(kit, obj, true, false, false); 80 } 81 return obj; 82 } 83 84 Node* ShenandoahBarrierSetC2::shenandoah_read_barrier_acmp(GraphKit* kit, Node* obj) { 85 return shenandoah_read_barrier_impl(kit, obj, true, true, false); 86 } 87 88 Node* ShenandoahBarrierSetC2::shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const { 89 const Type* obj_type = obj->bottom_type(); 90 if (obj_type->higher_equal(TypePtr::NULL_PTR)) { 91 return obj; 92 } 93 const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type); 94 Node* mem = use_mem ? __ memory(adr_type) : __ immutable_memory(); 95 96 if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, mem, allow_fromspace)) { 97 // We know it is null, no barrier needed. 98 return obj; 99 } 100 101 if (obj_type->meet(TypePtr::NULL_PTR) == obj_type->remove_speculative()) { 102 103 // We don't know if it's null or not. Need null-check. 104 enum { _not_null_path = 1, _null_path, PATH_LIMIT }; 105 RegionNode* region = new RegionNode(PATH_LIMIT); 106 Node* phi = new PhiNode(region, obj_type); 107 Node* null_ctrl = __ top(); 108 Node* not_null_obj = __ null_check_oop(obj, &null_ctrl); 109 110 region->init_req(_null_path, null_ctrl); 111 phi ->init_req(_null_path, __ zerocon(T_OBJECT)); 112 113 Node* ctrl = use_ctrl ? __ control() : NULL; 114 ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, not_null_obj, allow_fromspace); 115 Node* n = __ gvn().transform(rb); 116 117 region->init_req(_not_null_path, __ control()); 118 phi ->init_req(_not_null_path, n); 119 120 __ set_control(__ gvn().transform(region)); 121 __ record_for_igvn(region); 122 return __ gvn().transform(phi); 123 124 } else { 125 // We know it is not null. Simple barrier is sufficient. 126 Node* ctrl = use_ctrl ? __ control() : NULL; 127 ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, obj, allow_fromspace); 128 Node* n = __ gvn().transform(rb); 129 __ record_for_igvn(n); 130 return n; 131 } 132 } 133 134 Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const { 135 ShenandoahWriteBarrierNode* wb = new ShenandoahWriteBarrierNode(kit->C, kit->control(), kit->memory(adr_type), obj); 136 Node* n = __ gvn().transform(wb); 137 if (n == wb) { // New barrier needs memory projection. 138 Node* proj = __ gvn().transform(new ShenandoahWBMemProjNode(n)); 139 __ set_memory(proj, adr_type); 140 } 141 return n; 142 } 143 144 Node* ShenandoahBarrierSetC2::shenandoah_write_barrier(GraphKit* kit, Node* obj) const { 145 if (ShenandoahWriteBarrier) { 146 obj = shenandoah_write_barrier_impl(kit, obj); 147 } 148 return obj; 149 } 150 151 Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const { 152 if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, NULL, true)) { 153 return obj; 154 } 155 const Type* obj_type = obj->bottom_type(); 156 const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type); 157 Node* n = shenandoah_write_barrier_helper(kit, obj, adr_type); 158 __ record_for_igvn(n); 159 return n; 160 } 161 162 bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr, 163 BasicType bt, uint adr_idx) const { 164 intptr_t offset = 0; 165 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); 166 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); 167 168 if (offset == Type::OffsetBot) { 169 return false; // cannot unalias unless there are precise offsets 170 } 171 172 if (alloc == NULL) { 173 return false; // No allocation found 174 } 175 176 intptr_t size_in_bytes = type2aelembytes(bt); 177 178 Node* mem = __ memory(adr_idx); // start searching here... 179 180 for (int cnt = 0; cnt < 50; cnt++) { 181 182 if (mem->is_Store()) { 183 184 Node* st_adr = mem->in(MemNode::Address); 185 intptr_t st_offset = 0; 186 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); 187 188 if (st_base == NULL) { 189 break; // inscrutable pointer 190 } 191 192 // Break we have found a store with same base and offset as ours so break 193 if (st_base == base && st_offset == offset) { 194 break; 195 } 196 197 if (st_offset != offset && st_offset != Type::OffsetBot) { 198 const int MAX_STORE = BytesPerLong; 199 if (st_offset >= offset + size_in_bytes || 200 st_offset <= offset - MAX_STORE || 201 st_offset <= offset - mem->as_Store()->memory_size()) { 202 // Success: The offsets are provably independent. 203 // (You may ask, why not just test st_offset != offset and be done? 204 // The answer is that stores of different sizes can co-exist 205 // in the same sequence of RawMem effects. We sometimes initialize 206 // a whole 'tile' of array elements with a single jint or jlong.) 207 mem = mem->in(MemNode::Memory); 208 continue; // advance through independent store memory 209 } 210 } 211 212 if (st_base != base 213 && MemNode::detect_ptr_independence(base, alloc, st_base, 214 AllocateNode::Ideal_allocation(st_base, phase), 215 phase)) { 216 // Success: The bases are provably independent. 217 mem = mem->in(MemNode::Memory); 218 continue; // advance through independent store memory 219 } 220 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { 221 222 InitializeNode* st_init = mem->in(0)->as_Initialize(); 223 AllocateNode* st_alloc = st_init->allocation(); 224 225 // Make sure that we are looking at the same allocation site. 226 // The alloc variable is guaranteed to not be null here from earlier check. 227 if (alloc == st_alloc) { 228 // Check that the initialization is storing NULL so that no previous store 229 // has been moved up and directly write a reference 230 Node* captured_store = st_init->find_captured_store(offset, 231 type2aelembytes(T_OBJECT), 232 phase); 233 if (captured_store == NULL || captured_store == st_init->zero_memory()) { 234 return true; 235 } 236 } 237 } 238 239 // Unless there is an explicit 'continue', we must bail out here, 240 // because 'mem' is an inscrutable memory state (e.g., a call). 241 break; 242 } 243 244 return false; 245 } 246 247 #undef __ 248 #define __ ideal. 249 250 void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit, 251 bool do_load, 252 Node* obj, 253 Node* adr, 254 uint alias_idx, 255 Node* val, 256 const TypeOopPtr* val_type, 257 Node* pre_val, 258 BasicType bt) const { 259 // Some sanity checks 260 // Note: val is unused in this routine. 261 262 if (do_load) { 263 // We need to generate the load of the previous value 264 assert(obj != NULL, "must have a base"); 265 assert(adr != NULL, "where are loading from?"); 266 assert(pre_val == NULL, "loaded already?"); 267 assert(val_type != NULL, "need a type"); 268 269 if (ReduceInitialCardMarks 270 && satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) { 271 return; 272 } 273 274 } else { 275 // In this case both val_type and alias_idx are unused. 276 assert(pre_val != NULL, "must be loaded already"); 277 // Nothing to be done if pre_val is null. 278 if (pre_val->bottom_type() == TypePtr::NULL_PTR) return; 279 assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); 280 } 281 assert(bt == T_OBJECT, "or we shouldn't be here"); 282 283 IdealKit ideal(kit, true); 284 285 Node* tls = __ thread(); // ThreadLocalStorage 286 287 Node* no_base = __ top(); 288 Node* zero = __ ConI(0); 289 Node* zeroX = __ ConX(0); 290 291 float likely = PROB_LIKELY(0.999); 292 float unlikely = PROB_UNLIKELY(0.999); 293 294 // Offsets into the thread 295 const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()); 296 const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); 297 298 // Now the actual pointers into the thread 299 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset)); 300 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); 301 302 // Now some of the values 303 Node* marking; 304 Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()))); 305 Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw); 306 marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING)); 307 assert(ShenandoahWriteBarrierNode::is_gc_state_load(ld), "Should match the shape"); 308 309 // if (!marking) 310 __ if_then(marking, BoolTest::ne, zero, unlikely); { 311 BasicType index_bt = TypeX_X->basic_type(); 312 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size."); 313 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw); 314 315 if (do_load) { 316 // load original value 317 // alias_idx correct?? 318 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx); 319 } 320 321 // if (pre_val != NULL) 322 __ if_then(pre_val, BoolTest::ne, kit->null()); { 323 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); 324 325 // is the queue for this thread full? 326 __ if_then(index, BoolTest::ne, zeroX, likely); { 327 328 // decrement the index 329 Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); 330 331 // Now get the buffer location we will log the previous value into and store it 332 Node *log_addr = __ AddP(no_base, buffer, next_index); 333 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered); 334 // update the index 335 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered); 336 337 } __ else_(); { 338 339 // logging buffer is full, call the runtime 340 const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(); 341 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls); 342 } __ end_if(); // (!index) 343 } __ end_if(); // (pre_val != NULL) 344 } __ end_if(); // (!marking) 345 346 // Final sync IdealKit and GraphKit. 347 kit->final_sync(ideal); 348 349 if (ShenandoahSATBBarrier && adr != NULL) { 350 Node* c = kit->control(); 351 Node* call = c->in(1)->in(1)->in(1)->in(0); 352 assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected"); 353 call->add_req(adr); 354 } 355 } 356 357 bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) { 358 return call->is_CallLeaf() && 359 call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry); 360 } 361 362 bool ShenandoahBarrierSetC2::is_shenandoah_wb_call(Node* call) { 363 return call->is_CallLeaf() && 364 call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT); 365 } 366 367 bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) { 368 if (n->Opcode() != Op_If) { 369 return false; 370 } 371 372 Node* bol = n->in(1); 373 assert(bol->is_Bool(), ""); 374 Node* cmpx = bol->in(1); 375 if (bol->as_Bool()->_test._test == BoolTest::ne && 376 cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) && 377 is_shenandoah_state_load(cmpx->in(1)->in(1)) && 378 cmpx->in(1)->in(2)->is_Con() && 379 cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) { 380 return true; 381 } 382 383 return false; 384 } 385 386 bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) { 387 if (!n->is_Load()) return false; 388 const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset()); 389 return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal 390 && n->in(2)->in(3)->is_Con() 391 && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset; 392 } 393 394 void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit, 395 bool do_load, 396 Node* obj, 397 Node* adr, 398 uint alias_idx, 399 Node* val, 400 const TypeOopPtr* val_type, 401 Node* pre_val, 402 BasicType bt) const { 403 if (ShenandoahSATBBarrier) { 404 IdealKit ideal(kit); 405 kit->sync_kit(ideal); 406 407 satb_write_barrier_pre(kit, do_load, obj, adr, alias_idx, val, val_type, pre_val, bt); 408 409 ideal.sync_kit(kit); 410 kit->final_sync(ideal); 411 } 412 } 413 414 Node* ShenandoahBarrierSetC2::shenandoah_enqueue_barrier(GraphKit* kit, Node* pre_val) const { 415 return kit->gvn().transform(new ShenandoahEnqueueBarrierNode(pre_val)); 416 } 417 418 // Helper that guards and inserts a pre-barrier. 419 void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, 420 Node* pre_val, bool need_mem_bar) const { 421 // We could be accessing the referent field of a reference object. If so, when G1 422 // is enabled, we need to log the value in the referent field in an SATB buffer. 423 // This routine performs some compile time filters and generates suitable 424 // runtime filters that guard the pre-barrier code. 425 // Also add memory barrier for non volatile load from the referent field 426 // to prevent commoning of loads across safepoint. 427 428 // Some compile time checks. 429 430 // If offset is a constant, is it java_lang_ref_Reference::_reference_offset? 431 const TypeX* otype = offset->find_intptr_t_type(); 432 if (otype != NULL && otype->is_con() && 433 otype->get_con() != java_lang_ref_Reference::referent_offset) { 434 // Constant offset but not the reference_offset so just return 435 return; 436 } 437 438 // We only need to generate the runtime guards for instances. 439 const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr(); 440 if (btype != NULL) { 441 if (btype->isa_aryptr()) { 442 // Array type so nothing to do 443 return; 444 } 445 446 const TypeInstPtr* itype = btype->isa_instptr(); 447 if (itype != NULL) { 448 // Can the klass of base_oop be statically determined to be 449 // _not_ a sub-class of Reference and _not_ Object? 450 ciKlass* klass = itype->klass(); 451 if ( klass->is_loaded() && 452 !klass->is_subtype_of(kit->env()->Reference_klass()) && 453 !kit->env()->Object_klass()->is_subtype_of(klass)) { 454 return; 455 } 456 } 457 } 458 459 // The compile time filters did not reject base_oop/offset so 460 // we need to generate the following runtime filters 461 // 462 // if (offset == java_lang_ref_Reference::_reference_offset) { 463 // if (instance_of(base, java.lang.ref.Reference)) { 464 // pre_barrier(_, pre_val, ...); 465 // } 466 // } 467 468 float likely = PROB_LIKELY( 0.999); 469 float unlikely = PROB_UNLIKELY(0.999); 470 471 IdealKit ideal(kit); 472 473 Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset); 474 475 __ if_then(offset, BoolTest::eq, referent_off, unlikely); { 476 // Update graphKit memory and control from IdealKit. 477 kit->sync_kit(ideal); 478 479 Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass())); 480 Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con); 481 482 // Update IdealKit memory and control from graphKit. 483 __ sync_kit(kit); 484 485 Node* one = __ ConI(1); 486 // is_instof == 0 if base_oop == NULL 487 __ if_then(is_instof, BoolTest::eq, one, unlikely); { 488 489 // Update graphKit from IdeakKit. 490 kit->sync_kit(ideal); 491 492 // Use the pre-barrier to record the value in the referent field 493 satb_write_barrier_pre(kit, false /* do_load */, 494 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, 495 pre_val /* pre_val */, 496 T_OBJECT); 497 if (need_mem_bar) { 498 // Add memory barrier to prevent commoning reads from this field 499 // across safepoint since GC can change its value. 500 kit->insert_mem_bar(Op_MemBarCPUOrder); 501 } 502 // Update IdealKit from graphKit. 503 __ sync_kit(kit); 504 505 } __ end_if(); // _ref_type != ref_none 506 } __ end_if(); // offset == referent_offset 507 508 // Final sync IdealKit and GraphKit. 509 kit->final_sync(ideal); 510 } 511 512 #undef __ 513 514 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() { 515 const Type **fields = TypeTuple::fields(2); 516 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 517 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread 518 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 519 520 // create result type (range) 521 fields = TypeTuple::fields(0); 522 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 523 524 return TypeFunc::make(domain, range); 525 } 526 527 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() { 528 const Type **fields = TypeTuple::fields(1); 529 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 530 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 531 532 // create result type (range) 533 fields = TypeTuple::fields(0); 534 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 535 536 return TypeFunc::make(domain, range); 537 } 538 539 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_write_barrier_Type() { 540 const Type **fields = TypeTuple::fields(1); 541 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 542 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 543 544 // create result type (range) 545 fields = TypeTuple::fields(1); 546 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; 547 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 548 549 return TypeFunc::make(domain, range); 550 } 551 552 void ShenandoahBarrierSetC2::resolve_address(C2Access& access) const { 553 const TypePtr* adr_type = access.addr().type(); 554 555 if ((access.decorators() & IN_NATIVE) == 0 && (adr_type->isa_instptr() || adr_type->isa_aryptr())) { 556 int off = adr_type->is_ptr()->offset(); 557 int base_off = adr_type->isa_instptr() ? instanceOopDesc::base_offset_in_bytes() : 558 arrayOopDesc::base_offset_in_bytes(adr_type->is_aryptr()->elem()->array_element_basic_type()); 559 assert(off != Type::OffsetTop, "unexpected offset"); 560 if (off == Type::OffsetBot || off >= base_off) { 561 DecoratorSet decorators = access.decorators(); 562 bool is_write = (decorators & C2_WRITE_ACCESS) != 0; 563 GraphKit* kit = NULL; 564 if (access.is_parse_access()) { 565 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); 566 kit = parse_access.kit(); 567 } 568 Node* adr = access.addr().node(); 569 assert(adr->is_AddP(), "unexpected address shape"); 570 Node* base = adr->in(AddPNode::Base); 571 572 if (is_write) { 573 if (kit != NULL) { 574 base = shenandoah_write_barrier(kit, base); 575 } else { 576 assert(access.is_opt_access(), "either parse or opt access"); 577 assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for clone"); 578 } 579 } else { 580 if (adr_type->isa_instptr()) { 581 Compile* C = access.gvn().C; 582 ciField* field = C->alias_type(adr_type)->field(); 583 584 // Insert read barrier for Shenandoah. 585 if (field != NULL && 586 ((ShenandoahOptimizeStaticFinals && field->is_static() && field->is_final()) || 587 (ShenandoahOptimizeInstanceFinals && !field->is_static() && field->is_final()) || 588 (ShenandoahOptimizeStableFinals && field->is_stable()))) { 589 // Skip the barrier for special fields 590 } else { 591 if (kit != NULL) { 592 base = shenandoah_read_barrier(kit, base); 593 } else { 594 assert(access.is_opt_access(), "either parse or opt access"); 595 assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for arraycopy"); 596 } 597 } 598 } else { 599 if (kit != NULL) { 600 base = shenandoah_read_barrier(kit, base); 601 } else { 602 assert(access.is_opt_access(), "either parse or opt access"); 603 assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for arraycopy"); 604 } 605 } 606 } 607 if (base != adr->in(AddPNode::Base)) { 608 assert(kit != NULL, "no barrier should have been added"); 609 610 Node* address = adr->in(AddPNode::Address); 611 612 if (address->is_AddP()) { 613 assert(address->in(AddPNode::Base) == adr->in(AddPNode::Base), "unexpected address shape"); 614 assert(!address->in(AddPNode::Address)->is_AddP(), "unexpected address shape"); 615 assert(address->in(AddPNode::Address) == adr->in(AddPNode::Base), "unexpected address shape"); 616 address = address->clone(); 617 address->set_req(AddPNode::Base, base); 618 address->set_req(AddPNode::Address, base); 619 address = kit->gvn().transform(address); 620 } else { 621 assert(address == adr->in(AddPNode::Base), "unexpected address shape"); 622 address = base; 623 } 624 adr = adr->clone(); 625 adr->set_req(AddPNode::Base, base); 626 adr->set_req(AddPNode::Address, address); 627 adr = kit->gvn().transform(adr); 628 access.addr().set_node(adr); 629 } 630 } 631 } 632 } 633 634 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { 635 DecoratorSet decorators = access.decorators(); 636 637 const TypePtr* adr_type = access.addr().type(); 638 Node* adr = access.addr().node(); 639 640 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; 641 bool on_heap = (decorators & IN_HEAP) != 0; 642 643 if (!access.is_oop() || (!on_heap && !anonymous)) { 644 return BarrierSetC2::store_at_resolved(access, val); 645 } 646 647 if (access.is_parse_access()) { 648 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); 649 GraphKit* kit = parse_access.kit(); 650 651 uint adr_idx = kit->C->get_alias_index(adr_type); 652 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); 653 Node* value = val.node(); 654 value = shenandoah_storeval_barrier(kit, value); 655 val.set_node(value); 656 shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(), 657 static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type()); 658 } else { 659 assert(access.is_opt_access(), "only for optimization passes"); 660 assert(((decorators & C2_TIGHLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code"); 661 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access); 662 PhaseGVN& gvn = opt_access.gvn(); 663 MergeMemNode* mm = opt_access.mem(); 664 665 if (ShenandoahStoreValReadBarrier) { 666 RegionNode* region = new RegionNode(3); 667 const Type* v_t = gvn.type(val.node()); 668 Node* phi = new PhiNode(region, v_t->isa_oopptr() ? v_t->is_oopptr()->cast_to_nonconst() : v_t); 669 Node* cmp = gvn.transform(new CmpPNode(val.node(), gvn.zerocon(T_OBJECT))); 670 Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::ne)); 671 IfNode* iff = new IfNode(opt_access.ctl(), bol, PROB_LIKELY_MAG(3), COUNT_UNKNOWN); 672 673 gvn.transform(iff); 674 if (gvn.is_IterGVN()) { 675 gvn.is_IterGVN()->_worklist.push(iff); 676 } else { 677 gvn.record_for_igvn(iff); 678 } 679 680 Node* null_true = gvn.transform(new IfFalseNode(iff)); 681 Node* null_false = gvn.transform(new IfTrueNode(iff)); 682 region->init_req(1, null_true); 683 region->init_req(2, null_false); 684 phi->init_req(1, gvn.zerocon(T_OBJECT)); 685 Node* cast = new CastPPNode(val.node(), gvn.type(val.node())->join_speculative(TypePtr::NOTNULL)); 686 cast->set_req(0, null_false); 687 cast = gvn.transform(cast); 688 Node* rb = gvn.transform(new ShenandoahReadBarrierNode(null_false, gvn.C->immutable_memory(), cast, false)); 689 phi->init_req(2, rb); 690 opt_access.set_ctl(gvn.transform(region)); 691 val.set_node(gvn.transform(phi)); 692 } 693 if (ShenandoahStoreValEnqueueBarrier) { 694 const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(gvn.type(val.node())); 695 int alias = gvn.C->get_alias_index(adr_type); 696 Node* wb = new ShenandoahWriteBarrierNode(gvn.C, opt_access.ctl(), mm->memory_at(alias), val.node()); 697 Node* wb_transformed = gvn.transform(wb); 698 Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(wb_transformed)); 699 if (wb_transformed == wb) { 700 Node* proj = gvn.transform(new ShenandoahWBMemProjNode(wb)); 701 mm->set_memory_at(alias, proj); 702 } 703 val.set_node(enqueue); 704 } 705 } 706 return BarrierSetC2::store_at_resolved(access, val); 707 } 708 709 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { 710 DecoratorSet decorators = access.decorators(); 711 712 Node* adr = access.addr().node(); 713 Node* obj = access.base(); 714 715 bool mismatched = (decorators & C2_MISMATCHED) != 0; 716 bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0; 717 bool on_heap = (decorators & IN_HEAP) != 0; 718 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 719 bool is_unordered = (decorators & MO_UNORDERED) != 0; 720 bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap; 721 722 Node* top = Compile::current()->top(); 723 724 Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top; 725 Node* load = BarrierSetC2::load_at_resolved(access, val_type); 726 727 // If we are reading the value of the referent field of a Reference 728 // object (either by using Unsafe directly or through reflection) 729 // then, if SATB is enabled, we need to record the referent in an 730 // SATB log buffer using the pre-barrier mechanism. 731 // Also we need to add memory barrier to prevent commoning reads 732 // from this field across safepoint since GC can change its value. 733 bool need_read_barrier = ShenandoahKeepAliveBarrier && 734 (on_heap && (on_weak || (unknown && offset != top && obj != top))); 735 736 if (!access.is_oop() || !need_read_barrier) { 737 return load; 738 } 739 740 assert(access.is_parse_access(), "entry not supported at optimization time"); 741 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); 742 GraphKit* kit = parse_access.kit(); 743 744 if (on_weak) { 745 // Use the pre-barrier to record the value in the referent field 746 satb_write_barrier_pre(kit, false /* do_load */, 747 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, 748 load /* pre_val */, T_OBJECT); 749 // Add memory barrier to prevent commoning reads from this field 750 // across safepoint since GC can change its value. 751 kit->insert_mem_bar(Op_MemBarCPUOrder); 752 } else if (unknown) { 753 // We do not require a mem bar inside pre_barrier if need_mem_bar 754 // is set: the barriers would be emitted by us. 755 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar); 756 } 757 758 return load; 759 } 760 761 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, 762 Node* new_val, const Type* value_type) const { 763 GraphKit* kit = access.kit(); 764 if (access.is_oop()) { 765 new_val = shenandoah_storeval_barrier(kit, new_val); 766 shenandoah_write_barrier_pre(kit, false /* do_load */, 767 NULL, NULL, max_juint, NULL, NULL, 768 expected_val /* pre_val */, T_OBJECT); 769 770 MemNode::MemOrd mo = access.mem_node_mo(); 771 Node* mem = access.memory(); 772 Node* adr = access.addr().node(); 773 const TypePtr* adr_type = access.addr().type(); 774 Node* load_store = NULL; 775 776 #ifdef _LP64 777 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 778 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); 779 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); 780 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo)); 781 } else 782 #endif 783 { 784 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo)); 785 } 786 787 access.set_raw_access(load_store); 788 pin_atomic_op(access); 789 790 #ifdef _LP64 791 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 792 return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); 793 } 794 #endif 795 return load_store; 796 } 797 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); 798 } 799 800 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val, 801 Node* new_val, const Type* value_type) const { 802 GraphKit* kit = access.kit(); 803 if (access.is_oop()) { 804 new_val = shenandoah_storeval_barrier(kit, new_val); 805 shenandoah_write_barrier_pre(kit, false /* do_load */, 806 NULL, NULL, max_juint, NULL, NULL, 807 expected_val /* pre_val */, T_OBJECT); 808 DecoratorSet decorators = access.decorators(); 809 MemNode::MemOrd mo = access.mem_node_mo(); 810 Node* mem = access.memory(); 811 bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0; 812 Node* load_store = NULL; 813 Node* adr = access.addr().node(); 814 #ifdef _LP64 815 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 816 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); 817 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); 818 if (is_weak_cas) { 819 load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 820 } else { 821 load_store = kit->gvn().transform(new ShenandoahCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); 822 } 823 } else 824 #endif 825 { 826 if (is_weak_cas) { 827 load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 828 } else { 829 load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); 830 } 831 } 832 access.set_raw_access(load_store); 833 pin_atomic_op(access); 834 return load_store; 835 } 836 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); 837 } 838 839 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const { 840 GraphKit* kit = access.kit(); 841 if (access.is_oop()) { 842 val = shenandoah_storeval_barrier(kit, val); 843 } 844 Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type); 845 if (access.is_oop()) { 846 shenandoah_write_barrier_pre(kit, false /* do_load */, 847 NULL, NULL, max_juint, NULL, NULL, 848 result /* pre_val */, T_OBJECT); 849 } 850 return result; 851 } 852 853 void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const { 854 assert(!src->is_AddP(), "unexpected input"); 855 src = shenandoah_read_barrier(kit, src); 856 BarrierSetC2::clone(kit, src, dst, size, is_array); 857 } 858 859 Node* ShenandoahBarrierSetC2::resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const { 860 bool is_write = decorators & ACCESS_WRITE; 861 if (is_write) { 862 return shenandoah_write_barrier(kit, n); 863 } else { 864 return shenandoah_read_barrier(kit, n); 865 } 866 } 867 868 Node* ShenandoahBarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes, 869 Node*& i_o, Node*& needgc_ctrl, 870 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem, 871 intx prefetch_lines) const { 872 PhaseIterGVN& igvn = macro->igvn(); 873 874 // Allocate several words more for the Shenandoah brooks pointer. 875 size_in_bytes = new AddXNode(size_in_bytes, igvn.MakeConX(ShenandoahBrooksPointer::byte_size())); 876 macro->transform_later(size_in_bytes); 877 878 Node* fast_oop = BarrierSetC2::obj_allocate(macro, ctrl, mem, toobig_false, size_in_bytes, 879 i_o, needgc_ctrl, fast_oop_ctrl, fast_oop_rawmem, 880 prefetch_lines); 881 882 // Bump up object for Shenandoah brooks pointer. 883 fast_oop = new AddPNode(macro->top(), fast_oop, igvn.MakeConX(ShenandoahBrooksPointer::byte_size())); 884 macro->transform_later(fast_oop); 885 886 // Initialize Shenandoah brooks pointer to point to the object itself. 887 fast_oop_rawmem = macro->make_store(fast_oop_ctrl, fast_oop_rawmem, fast_oop, ShenandoahBrooksPointer::byte_offset(), fast_oop, T_OBJECT); 888 889 return fast_oop; 890 } 891 892 // Support for GC barriers emitted during parsing 893 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const { 894 if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) { 895 return false; 896 } 897 CallLeafNode *call = node->as_CallLeaf(); 898 if (call->_name == NULL) { 899 return false; 900 } 901 902 return strcmp(call->_name, "shenandoah_clone_barrier") == 0 || 903 strcmp(call->_name, "shenandoah_cas_obj") == 0 || 904 strcmp(call->_name, "shenandoah_wb_pre") == 0; 905 } 906 907 Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const { 908 return ShenandoahBarrierNode::skip_through_barrier(c); 909 } 910 911 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { 912 bool is_oop = type == T_OBJECT || type == T_ARRAY; 913 if (!is_oop) { 914 return false; 915 } 916 917 if (tightly_coupled_alloc) { 918 if (phase == Optimization) { 919 return false; 920 } 921 return !is_clone; 922 } 923 if (phase == Optimization) { 924 return !ShenandoahStoreValEnqueueBarrier; 925 } 926 return true; 927 } 928 929 bool ShenandoahBarrierSetC2::clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIterGVN& igvn) { 930 Node* src = ac->in(ArrayCopyNode::Src); 931 const TypeOopPtr* src_type = igvn.type(src)->is_oopptr(); 932 if (src_type->isa_instptr() != NULL) { 933 ciInstanceKlass* ik = src_type->klass()->as_instance_klass(); 934 if ((src_type->klass_is_exact() || (!ik->is_interface() && !ik->has_subklass())) && !ik->has_injected_fields()) { 935 if (ik->has_object_fields()) { 936 return true; 937 } else { 938 if (!src_type->klass_is_exact()) { 939 igvn.C->dependencies()->assert_leaf_type(ik); 940 } 941 } 942 } else { 943 return true; 944 } 945 } else if (src_type->isa_aryptr()) { 946 BasicType src_elem = src_type->klass()->as_array_klass()->element_type()->basic_type(); 947 if (src_elem == T_OBJECT || src_elem == T_ARRAY) { 948 return true; 949 } 950 } else { 951 return true; 952 } 953 return false; 954 } 955 956 void ShenandoahBarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const { 957 assert(ac->is_clonebasic(), "no other kind of arraycopy here"); 958 959 if (!clone_needs_postbarrier(ac, igvn)) { 960 BarrierSetC2::clone_barrier_at_expansion(ac, call, igvn); 961 return; 962 } 963 964 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 965 Node* c = new ProjNode(call,TypeFunc::Control); 966 c = igvn.transform(c); 967 Node* m = new ProjNode(call, TypeFunc::Memory); 968 c = igvn.transform(m); 969 970 Node* dest = ac->in(ArrayCopyNode::Dest); 971 assert(dest->is_AddP(), "bad input"); 972 Node* barrier_call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(), 973 CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier), 974 "shenandoah_clone_barrier", raw_adr_type); 975 barrier_call->init_req(TypeFunc::Control, c); 976 barrier_call->init_req(TypeFunc::I_O , igvn.C->top()); 977 barrier_call->init_req(TypeFunc::Memory , m); 978 barrier_call->init_req(TypeFunc::ReturnAdr, igvn.C->top()); 979 barrier_call->init_req(TypeFunc::FramePtr, igvn.C->top()); 980 barrier_call->init_req(TypeFunc::Parms+0, dest->in(AddPNode::Base)); 981 982 barrier_call = igvn.transform(barrier_call); 983 c = new ProjNode(barrier_call,TypeFunc::Control); 984 c = igvn.transform(c); 985 m = new ProjNode(barrier_call, TypeFunc::Memory); 986 m = igvn.transform(m); 987 988 Node* out_c = ac->proj_out(TypeFunc::Control); 989 Node* out_m = ac->proj_out(TypeFunc::Memory); 990 igvn.replace_node(out_c, c); 991 igvn.replace_node(out_m, m); 992 } 993 994 995 // Support for macro expanded GC barriers 996 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const { 997 if (node->Opcode() == Op_ShenandoahWriteBarrier) { 998 state()->add_shenandoah_barrier((ShenandoahWriteBarrierNode*) node); 999 } 1000 } 1001 1002 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const { 1003 if (node->Opcode() == Op_ShenandoahWriteBarrier) { 1004 state()->remove_shenandoah_barrier((ShenandoahWriteBarrierNode*) node); 1005 } 1006 } 1007 1008 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const { 1009 if (is_shenandoah_wb_pre_call(n)) { 1010 shenandoah_eliminate_wb_pre(n, ¯o->igvn()); 1011 } 1012 } 1013 1014 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const { 1015 assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), ""); 1016 Node* c = call->as_Call()->proj_out(TypeFunc::Control); 1017 c = c->unique_ctrl_out(); 1018 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); 1019 c = c->unique_ctrl_out(); 1020 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); 1021 Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0); 1022 assert(iff->is_If(), "expect test"); 1023 if (!is_shenandoah_marking_if(igvn, iff)) { 1024 c = c->unique_ctrl_out(); 1025 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); 1026 iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0); 1027 assert(is_shenandoah_marking_if(igvn, iff), "expect marking test"); 1028 } 1029 Node* cmpx = iff->in(1)->in(1); 1030 igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ)); 1031 igvn->rehash_node_delayed(call); 1032 call->del_req(call->req()-1); 1033 } 1034 1035 void ShenandoahBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const { 1036 if (node->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(node)) { 1037 igvn->add_users_to_worklist(node); 1038 } 1039 } 1040 1041 void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const { 1042 for (uint i = 0; i < useful.size(); i++) { 1043 Node* n = useful.at(i); 1044 if (n->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(n)) { 1045 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1046 C->record_for_igvn(n->fast_out(i)); 1047 } 1048 } 1049 } 1050 for (int i = state()->shenandoah_barriers_count()-1; i >= 0; i--) { 1051 ShenandoahWriteBarrierNode* n = state()->shenandoah_barrier(i); 1052 if (!useful.member(n)) { 1053 state()->remove_shenandoah_barrier(n); 1054 } 1055 } 1056 1057 } 1058 1059 void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {} 1060 1061 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const { 1062 return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena); 1063 } 1064 1065 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const { 1066 return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state()); 1067 } 1068 1069 // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be 1070 // expanded later, then now is the time to do so. 1071 bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const { return false; } 1072 1073 #ifdef ASSERT 1074 void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const { 1075 if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeExpand) { 1076 ShenandoahBarrierNode::verify(Compile::current()->root()); 1077 } else if (phase == BarrierSetC2::BeforeCodeGen) { 1078 // Verify G1 pre-barriers 1079 const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()); 1080 1081 ResourceArea *area = Thread::current()->resource_area(); 1082 Unique_Node_List visited(area); 1083 Node_List worklist(area); 1084 // We're going to walk control flow backwards starting from the Root 1085 worklist.push(compile->root()); 1086 while (worklist.size() > 0) { 1087 Node *x = worklist.pop(); 1088 if (x == NULL || x == compile->top()) continue; 1089 if (visited.member(x)) { 1090 continue; 1091 } else { 1092 visited.push(x); 1093 } 1094 1095 if (x->is_Region()) { 1096 for (uint i = 1; i < x->req(); i++) { 1097 worklist.push(x->in(i)); 1098 } 1099 } else { 1100 worklist.push(x->in(0)); 1101 // We are looking for the pattern: 1102 // /->ThreadLocal 1103 // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset) 1104 // \->ConI(0) 1105 // We want to verify that the If and the LoadB have the same control 1106 // See GraphKit::g1_write_barrier_pre() 1107 if (x->is_If()) { 1108 IfNode *iff = x->as_If(); 1109 if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) { 1110 CmpNode *cmp = iff->in(1)->in(1)->as_Cmp(); 1111 if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0 1112 && cmp->in(1)->is_Load()) { 1113 LoadNode *load = cmp->in(1)->as_Load(); 1114 if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal 1115 && load->in(2)->in(3)->is_Con() 1116 && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) { 1117 1118 Node *if_ctrl = iff->in(0); 1119 Node *load_ctrl = load->in(0); 1120 1121 if (if_ctrl != load_ctrl) { 1122 // Skip possible CProj->NeverBranch in infinite loops 1123 if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj) 1124 && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) { 1125 if_ctrl = if_ctrl->in(0)->in(0); 1126 } 1127 } 1128 assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match"); 1129 } 1130 } 1131 } 1132 } 1133 } 1134 } 1135 } 1136 } 1137 #endif 1138 1139 Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { 1140 if (is_shenandoah_wb_pre_call(n)) { 1141 uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt(); 1142 if (n->req() > cnt) { 1143 Node* addp = n->in(cnt); 1144 if (has_only_shenandoah_wb_pre_uses(addp)) { 1145 n->del_req(cnt); 1146 if (can_reshape) { 1147 phase->is_IterGVN()->_worklist.push(addp); 1148 } 1149 return n; 1150 } 1151 } 1152 } 1153 if (n->Opcode() == Op_CmpP) { 1154 Node* in1 = n->in(1); 1155 Node* in2 = n->in(2); 1156 if (in1->bottom_type() == TypePtr::NULL_PTR) { 1157 in2 = step_over_gc_barrier(in2); 1158 } 1159 if (in2->bottom_type() == TypePtr::NULL_PTR) { 1160 in1 = step_over_gc_barrier(in1); 1161 } 1162 PhaseIterGVN* igvn = phase->is_IterGVN(); 1163 if (in1 != n->in(1)) { 1164 if (igvn != NULL) { 1165 n->set_req_X(1, in1, igvn); 1166 } else { 1167 n->set_req(1, in1); 1168 } 1169 assert(in2 == n->in(2), "only one change"); 1170 return n; 1171 } 1172 if (in2 != n->in(2)) { 1173 if (igvn != NULL) { 1174 n->set_req_X(2, in2, igvn); 1175 } else { 1176 n->set_req(2, in2); 1177 } 1178 return n; 1179 } 1180 } else if (can_reshape && 1181 n->Opcode() == Op_If && 1182 ShenandoahWriteBarrierNode::is_heap_stable_test(n) && 1183 n->in(0) != NULL) { 1184 Node* dom = n->in(0); 1185 Node* prev_dom = n; 1186 int op = n->Opcode(); 1187 int dist = 16; 1188 // Search up the dominator tree for another heap stable test 1189 while (dom->Opcode() != op || // Not same opcode? 1190 !ShenandoahWriteBarrierNode::is_heap_stable_test(dom) || // Not same input 1? 1191 prev_dom->in(0) != dom) { // One path of test does not dominate? 1192 if (dist < 0) return NULL; 1193 1194 dist--; 1195 prev_dom = dom; 1196 dom = IfNode::up_one_dom(dom); 1197 if (!dom) return NULL; 1198 } 1199 1200 // Check that we did not follow a loop back to ourselves 1201 if (n == dom) { 1202 return NULL; 1203 } 1204 1205 return n->as_If()->dominated_by(prev_dom, phase->is_IterGVN()); 1206 } 1207 1208 return NULL; 1209 } 1210 1211 Node* ShenandoahBarrierSetC2::identity_node(PhaseGVN* phase, Node* n) const { 1212 if (n->is_Load()) { 1213 Node *mem = n->in(MemNode::Memory); 1214 Node *value = n->as_Load()->can_see_stored_value(mem, phase); 1215 if (value) { 1216 PhaseIterGVN *igvn = phase->is_IterGVN(); 1217 if (igvn != NULL && 1218 value->is_Phi() && 1219 value->req() > 2 && 1220 value->in(1) != NULL && 1221 value->in(1)->is_ShenandoahBarrier()) { 1222 if (igvn->_worklist.member(value) || 1223 igvn->_worklist.member(value->in(0)) || 1224 (value->in(0)->in(1) != NULL && 1225 value->in(0)->in(1)->is_IfProj() && 1226 (igvn->_worklist.member(value->in(0)->in(1)) || 1227 (value->in(0)->in(1)->in(0) != NULL && 1228 igvn->_worklist.member(value->in(0)->in(1)->in(0)))))) { 1229 igvn->_worklist.push(n); 1230 return n; 1231 } 1232 } 1233 // (This works even when value is a Con, but LoadNode::Value 1234 // usually runs first, producing the singleton type of the Con.) 1235 Node *value_no_barrier = step_over_gc_barrier(value->Opcode() == Op_EncodeP ? value->in(1) : value); 1236 if (value->Opcode() == Op_EncodeP) { 1237 if (value_no_barrier != value->in(1)) { 1238 Node *encode = value->clone(); 1239 encode->set_req(1, value_no_barrier); 1240 encode = phase->transform(encode); 1241 return encode; 1242 } 1243 } else { 1244 return value_no_barrier; 1245 } 1246 } 1247 } 1248 return n; 1249 } 1250 1251 bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) { 1252 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1253 Node* u = n->fast_out(i); 1254 if (!is_shenandoah_wb_pre_call(u)) { 1255 return false; 1256 } 1257 } 1258 return n->outcnt() > 0; 1259 } 1260 1261 bool ShenandoahBarrierSetC2::flatten_gc_alias_type(const TypePtr*& adr_type) const { 1262 int offset = adr_type->offset(); 1263 if (offset == ShenandoahBrooksPointer::byte_offset()) { 1264 if (adr_type->isa_aryptr()) { 1265 adr_type = TypeAryPtr::make(adr_type->ptr(), adr_type->isa_aryptr()->ary(), adr_type->isa_aryptr()->klass(), false, offset); 1266 } else if (adr_type->isa_instptr()) { 1267 adr_type = TypeInstPtr::make(adr_type->ptr(), ciEnv::current()->Object_klass(), false, NULL, offset); 1268 } 1269 return true; 1270 } else { 1271 return false; 1272 } 1273 } 1274 1275 bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const { 1276 switch (opcode) { 1277 case Op_CallLeaf: 1278 case Op_CallLeafNoFP: { 1279 assert (n->is_Call(), ""); 1280 CallNode *call = n->as_Call(); 1281 if (ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) { 1282 uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt(); 1283 if (call->req() > cnt) { 1284 assert(call->req() == cnt + 1, "only one extra input"); 1285 Node *addp = call->in(cnt); 1286 assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?"); 1287 call->del_req(cnt); 1288 } 1289 } 1290 return false; 1291 } 1292 case Op_ShenandoahCompareAndSwapP: 1293 case Op_ShenandoahCompareAndSwapN: 1294 case Op_ShenandoahWeakCompareAndSwapN: 1295 case Op_ShenandoahWeakCompareAndSwapP: 1296 case Op_ShenandoahCompareAndExchangeP: 1297 case Op_ShenandoahCompareAndExchangeN: 1298 #ifdef ASSERT 1299 if( VerifyOptoOopOffsets ) { 1300 MemNode* mem = n->as_Mem(); 1301 // Check to see if address types have grounded out somehow. 1302 const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr(); 1303 ciInstanceKlass *k = tp->klass()->as_instance_klass(); 1304 bool oop_offset_is_sane = k->contains_field_offset(tp->offset()); 1305 assert( !tp || oop_offset_is_sane, "" ); 1306 } 1307 #endif 1308 return true; 1309 case Op_ShenandoahReadBarrier: 1310 return true; 1311 case Op_ShenandoahWriteBarrier: 1312 assert(false, "should have been expanded already"); 1313 return true; 1314 default: 1315 return false; 1316 } 1317 } 1318 1319 #ifdef ASSERT 1320 bool ShenandoahBarrierSetC2::verify_gc_alias_type(const TypePtr* adr_type, int offset) const { 1321 if (offset == ShenandoahBrooksPointer::byte_offset() && 1322 (adr_type->base() == Type::AryPtr || adr_type->base() == Type::OopPtr)) { 1323 return true; 1324 } else { 1325 return false; 1326 } 1327 } 1328 #endif 1329