1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/c2/g1BarrierSetC2.hpp" 27 #include "gc/g1/g1BarrierSet.hpp" 28 #include "gc/g1/g1BarrierSetRuntime.hpp" 29 #include "gc/g1/g1CardTable.hpp" 30 #include "gc/g1/g1ThreadLocalData.hpp" 31 #include "gc/g1/heapRegion.hpp" 32 #include "opto/arraycopynode.hpp" 33 #include "opto/compile.hpp" 34 #include "opto/graphKit.hpp" 35 #include "opto/idealKit.hpp" 36 #include "opto/macro.hpp" 37 #include "opto/rootnode.hpp" 38 #include "opto/type.hpp" 39 #include "utilities/macros.hpp" 40 41 const TypeFunc *G1BarrierSetC2::write_ref_field_pre_entry_Type() { 42 const Type **fields = TypeTuple::fields(2); 43 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 44 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread 45 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 46 47 // create result type (range) 48 fields = TypeTuple::fields(0); 49 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 50 51 return TypeFunc::make(domain, range); 52 } 53 54 const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() { 55 const Type **fields = TypeTuple::fields(2); 56 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Card addr 57 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread 58 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 59 60 // create result type (range) 61 fields = TypeTuple::fields(0); 62 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 63 64 return TypeFunc::make(domain, range); 65 } 66 67 #define __ ideal. 68 /* 69 * Determine if the G1 pre-barrier can be removed. The pre-barrier is 70 * required by SATB to make sure all objects live at the start of the 71 * marking are kept alive, all reference updates need to any previous 72 * reference stored before writing. 73 * 74 * If the previous value is NULL there is no need to save the old value. 75 * References that are NULL are filtered during runtime by the barrier 76 * code to avoid unnecessary queuing. 77 * 78 * However in the case of newly allocated objects it might be possible to 79 * prove that the reference about to be overwritten is NULL during compile 80 * time and avoid adding the barrier code completely. 81 * 82 * The compiler needs to determine that the object in which a field is about 83 * to be written is newly allocated, and that no prior store to the same field 84 * has happened since the allocation. 85 * 86 * Returns true if the pre-barrier can be removed 87 */ 88 bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit, 89 PhaseTransform* phase, 90 Node* adr, 91 BasicType bt, 92 uint adr_idx) const { 93 intptr_t offset = 0; 94 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); 95 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); 96 97 if (offset == Type::OffsetBot) { 98 return false; // cannot unalias unless there are precise offsets 99 } 100 101 if (alloc == NULL) { 102 return false; // No allocation found 103 } 104 105 intptr_t size_in_bytes = type2aelembytes(bt); 106 107 Node* mem = kit->memory(adr_idx); // start searching here... 108 109 for (int cnt = 0; cnt < 50; cnt++) { 110 111 if (mem->is_Store()) { 112 113 Node* st_adr = mem->in(MemNode::Address); 114 intptr_t st_offset = 0; 115 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); 116 117 if (st_base == NULL) { 118 break; // inscrutable pointer 119 } 120 121 // Break we have found a store with same base and offset as ours so break 122 if (st_base == base && st_offset == offset) { 123 break; 124 } 125 126 if (st_offset != offset && st_offset != Type::OffsetBot) { 127 const int MAX_STORE = BytesPerLong; 128 if (st_offset >= offset + size_in_bytes || 129 st_offset <= offset - MAX_STORE || 130 st_offset <= offset - mem->as_Store()->memory_size()) { 131 // Success: The offsets are provably independent. 132 // (You may ask, why not just test st_offset != offset and be done? 133 // The answer is that stores of different sizes can co-exist 134 // in the same sequence of RawMem effects. We sometimes initialize 135 // a whole 'tile' of array elements with a single jint or jlong.) 136 mem = mem->in(MemNode::Memory); 137 continue; // advance through independent store memory 138 } 139 } 140 141 if (st_base != base 142 && MemNode::detect_ptr_independence(base, alloc, st_base, 143 AllocateNode::Ideal_allocation(st_base, phase), 144 phase)) { 145 // Success: The bases are provably independent. 146 mem = mem->in(MemNode::Memory); 147 continue; // advance through independent store memory 148 } 149 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { 150 151 InitializeNode* st_init = mem->in(0)->as_Initialize(); 152 AllocateNode* st_alloc = st_init->allocation(); 153 154 // Make sure that we are looking at the same allocation site. 155 // The alloc variable is guaranteed to not be null here from earlier check. 156 if (alloc == st_alloc) { 157 // Check that the initialization is storing NULL so that no previous store 158 // has been moved up and directly write a reference 159 Node* captured_store = st_init->find_captured_store(offset, 160 type2aelembytes(T_OBJECT), 161 phase); 162 if (captured_store == NULL || captured_store == st_init->zero_memory()) { 163 return true; 164 } 165 } 166 } 167 168 // Unless there is an explicit 'continue', we must bail out here, 169 // because 'mem' is an inscrutable memory state (e.g., a call). 170 break; 171 } 172 173 return false; 174 } 175 176 // G1 pre/post barriers 177 void G1BarrierSetC2::pre_barrier(GraphKit* kit, 178 bool do_load, 179 Node* ctl, 180 Node* obj, 181 Node* adr, 182 uint alias_idx, 183 Node* val, 184 const TypeOopPtr* val_type, 185 Node* pre_val, 186 BasicType bt) const { 187 // Some sanity checks 188 // Note: val is unused in this routine. 189 190 if (do_load) { 191 // We need to generate the load of the previous value 192 assert(obj != NULL, "must have a base"); 193 assert(adr != NULL, "where are loading from?"); 194 assert(pre_val == NULL, "loaded already?"); 195 assert(val_type != NULL, "need a type"); 196 197 if (use_ReduceInitialCardMarks() 198 && g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) { 199 return; 200 } 201 202 } else { 203 // In this case both val_type and alias_idx are unused. 204 assert(pre_val != NULL, "must be loaded already"); 205 // Nothing to be done if pre_val is null. 206 if (pre_val->bottom_type() == TypePtr::NULL_PTR) return; 207 assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); 208 } 209 assert(bt == T_OBJECT, "or we shouldn't be here"); 210 211 IdealKit ideal(kit, true); 212 213 Node* tls = __ thread(); // ThreadLocalStorage 214 215 Node* no_base = __ top(); 216 Node* zero = __ ConI(0); 217 Node* zeroX = __ ConX(0); 218 219 float likely = PROB_LIKELY(0.999); 220 float unlikely = PROB_UNLIKELY(0.999); 221 222 BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE; 223 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width"); 224 225 // Offsets into the thread 226 const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()); 227 const int index_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()); 228 const int buffer_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()); 229 230 // Now the actual pointers into the thread 231 Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset)); 232 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset)); 233 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); 234 235 // Now some of the values 236 Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw); 237 238 // if (!marking) 239 __ if_then(marking, BoolTest::ne, zero, unlikely); { 240 BasicType index_bt = TypeX_X->basic_type(); 241 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size."); 242 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw); 243 244 if (do_load) { 245 // load original value 246 // alias_idx correct?? 247 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx); 248 } 249 250 // if (pre_val != NULL) 251 __ if_then(pre_val, BoolTest::ne, kit->null()); { 252 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); 253 254 // is the queue for this thread full? 255 __ if_then(index, BoolTest::ne, zeroX, likely); { 256 257 // decrement the index 258 Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); 259 260 // Now get the buffer location we will log the previous value into and store it 261 Node *log_addr = __ AddP(no_base, buffer, next_index); 262 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered); 263 // update the index 264 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered); 265 266 } __ else_(); { 267 268 // logging buffer is full, call the runtime 269 const TypeFunc *tf = write_ref_field_pre_entry_Type(); 270 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), "write_ref_field_pre_entry", pre_val, tls); 271 } __ end_if(); // (!index) 272 } __ end_if(); // (pre_val != NULL) 273 } __ end_if(); // (!marking) 274 275 // Final sync IdealKit and GraphKit. 276 kit->final_sync(ideal); 277 } 278 279 /* 280 * G1 similar to any GC with a Young Generation requires a way to keep track of 281 * references from Old Generation to Young Generation to make sure all live 282 * objects are found. G1 also requires to keep track of object references 283 * between different regions to enable evacuation of old regions, which is done 284 * as part of mixed collections. References are tracked in remembered sets and 285 * is continuously updated as reference are written to with the help of the 286 * post-barrier. 287 * 288 * To reduce the number of updates to the remembered set the post-barrier 289 * filters updates to fields in objects located in the Young Generation, 290 * the same region as the reference, when the NULL is being written or 291 * if the card is already marked as dirty by an earlier write. 292 * 293 * Under certain circumstances it is possible to avoid generating the 294 * post-barrier completely if it is possible during compile time to prove 295 * the object is newly allocated and that no safepoint exists between the 296 * allocation and the store. 297 * 298 * In the case of slow allocation the allocation code must handle the barrier 299 * as part of the allocation in the case the allocated object is not located 300 * in the nursery, this would happen for humongous objects. This is similar to 301 * how CMS is required to handle this case, see the comments for the method 302 * CollectedHeap::new_deferred_store_barrier and OptoRuntime::new_deferred_store_barrier. 303 * A deferred card mark is required for these objects and handled in the above 304 * mentioned methods. 305 * 306 * Returns true if the post barrier can be removed 307 */ 308 bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit, 309 PhaseTransform* phase, Node* store, 310 Node* adr) const { 311 intptr_t offset = 0; 312 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); 313 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); 314 315 if (offset == Type::OffsetBot) { 316 return false; // cannot unalias unless there are precise offsets 317 } 318 319 if (alloc == NULL) { 320 return false; // No allocation found 321 } 322 323 // Start search from Store node 324 Node* mem = store->in(MemNode::Control); 325 if (mem->is_Proj() && mem->in(0)->is_Initialize()) { 326 327 InitializeNode* st_init = mem->in(0)->as_Initialize(); 328 AllocateNode* st_alloc = st_init->allocation(); 329 330 // Make sure we are looking at the same allocation 331 if (alloc == st_alloc) { 332 return true; 333 } 334 } 335 336 return false; 337 } 338 339 // 340 // Update the card table and add card address to the queue 341 // 342 void G1BarrierSetC2::g1_mark_card(GraphKit* kit, 343 IdealKit& ideal, 344 Node* card_adr, 345 Node* oop_store, 346 uint oop_alias_idx, 347 Node* index, 348 Node* index_adr, 349 Node* buffer, 350 const TypeFunc* tf) const { 351 Node* zero = __ ConI(0); 352 Node* zeroX = __ ConX(0); 353 Node* no_base = __ top(); 354 BasicType card_bt = T_BYTE; 355 // Smash zero into card. MUST BE ORDERED WRT TO STORE 356 __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw); 357 358 // Now do the queue work 359 __ if_then(index, BoolTest::ne, zeroX); { 360 361 Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); 362 Node* log_addr = __ AddP(no_base, buffer, next_index); 363 364 // Order, see storeCM. 365 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); 366 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered); 367 368 } __ else_(); { 369 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), "write_ref_field_post_entry", card_adr, __ thread()); 370 } __ end_if(); 371 372 } 373 374 void G1BarrierSetC2::post_barrier(GraphKit* kit, 375 Node* ctl, 376 Node* oop_store, 377 Node* obj, 378 Node* adr, 379 uint alias_idx, 380 Node* val, 381 BasicType bt, 382 bool use_precise) const { 383 // If we are writing a NULL then we need no post barrier 384 385 if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) { 386 // Must be NULL 387 const Type* t = val->bottom_type(); 388 assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL"); 389 // No post barrier if writing NULLx 390 return; 391 } 392 393 if (use_ReduceInitialCardMarks() && obj == kit->just_allocated_object(kit->control())) { 394 // We can skip marks on a freshly-allocated object in Eden. 395 // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp. 396 // That routine informs GC to take appropriate compensating steps, 397 // upon a slow-path allocation, so as to make this card-mark 398 // elision safe. 399 return; 400 } 401 402 if (use_ReduceInitialCardMarks() 403 && g1_can_remove_post_barrier(kit, &kit->gvn(), oop_store, adr)) { 404 return; 405 } 406 407 if (!use_precise) { 408 // All card marks for a (non-array) instance are in one place: 409 adr = obj; 410 } 411 // (Else it's an array (or unknown), and we want more precise card marks.) 412 assert(adr != NULL, ""); 413 414 IdealKit ideal(kit, true); 415 416 Node* tls = __ thread(); // ThreadLocalStorage 417 418 Node* no_base = __ top(); 419 float unlikely = PROB_UNLIKELY(0.999); 420 Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val()); 421 Node* dirty_card = __ ConI((jint)G1CardTable::dirty_card_val()); 422 Node* zeroX = __ ConX(0); 423 424 const TypeFunc *tf = write_ref_field_post_entry_Type(); 425 426 // Offsets into the thread 427 const int index_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()); 428 const int buffer_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()); 429 430 // Pointers into the thread 431 432 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset)); 433 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); 434 435 // Now some values 436 // Use ctrl to avoid hoisting these values past a safepoint, which could 437 // potentially reset these fields in the JavaThread. 438 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw); 439 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); 440 441 // Convert the store obj pointer to an int prior to doing math on it 442 // Must use ctrl to prevent "integerized oop" existing across safepoint 443 Node* cast = __ CastPX(__ ctrl(), adr); 444 445 // Divide pointer by card size 446 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) ); 447 448 // Combine card table base and card offset 449 Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset ); 450 451 // If we know the value being stored does it cross regions? 452 453 if (val != NULL) { 454 // Does the store cause us to cross regions? 455 456 // Should be able to do an unsigned compare of region_size instead of 457 // and extra shift. Do we have an unsigned compare?? 458 // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes); 459 Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes)); 460 461 // if (xor_res == 0) same region so skip 462 __ if_then(xor_res, BoolTest::ne, zeroX); { 463 464 // No barrier if we are storing a NULL 465 __ if_then(val, BoolTest::ne, kit->null(), unlikely); { 466 467 // Ok must mark the card if not already dirty 468 469 // load the original value of the card 470 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 471 472 __ if_then(card_val, BoolTest::ne, young_card); { 473 kit->sync_kit(ideal); 474 kit->insert_mem_bar(Op_MemBarVolatile, oop_store); 475 __ sync_kit(kit); 476 477 Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 478 __ if_then(card_val_reload, BoolTest::ne, dirty_card); { 479 g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); 480 } __ end_if(); 481 } __ end_if(); 482 } __ end_if(); 483 } __ end_if(); 484 } else { 485 // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks. 486 // We don't need a barrier here if the destination is a newly allocated object 487 // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden 488 // are set to 'g1_young_gen' (see G1CardTable::verify_g1_young_region()). 489 assert(!use_ReduceInitialCardMarks(), "can only happen with card marking"); 490 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 491 __ if_then(card_val, BoolTest::ne, young_card); { 492 g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); 493 } __ end_if(); 494 } 495 496 // Final sync IdealKit and GraphKit. 497 kit->final_sync(ideal); 498 } 499 500 // Helper that guards and inserts a pre-barrier. 501 void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, 502 Node* pre_val, bool need_mem_bar) const { 503 // We could be accessing the referent field of a reference object. If so, when G1 504 // is enabled, we need to log the value in the referent field in an SATB buffer. 505 // This routine performs some compile time filters and generates suitable 506 // runtime filters that guard the pre-barrier code. 507 // Also add memory barrier for non volatile load from the referent field 508 // to prevent commoning of loads across safepoint. 509 510 // Some compile time checks. 511 512 // If offset is a constant, is it java_lang_ref_Reference::_reference_offset? 513 const TypeX* otype = offset->find_intptr_t_type(); 514 if (otype != NULL && otype->is_con() && 515 otype->get_con() != java_lang_ref_Reference::referent_offset) { 516 // Constant offset but not the reference_offset so just return 517 return; 518 } 519 520 // We only need to generate the runtime guards for instances. 521 const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr(); 522 if (btype != NULL) { 523 if (btype->isa_aryptr()) { 524 // Array type so nothing to do 525 return; 526 } 527 528 const TypeInstPtr* itype = btype->isa_instptr(); 529 if (itype != NULL) { 530 // Can the klass of base_oop be statically determined to be 531 // _not_ a sub-class of Reference and _not_ Object? 532 ciKlass* klass = itype->klass(); 533 if ( klass->is_loaded() && 534 !klass->is_subtype_of(kit->env()->Reference_klass()) && 535 !kit->env()->Object_klass()->is_subtype_of(klass)) { 536 return; 537 } 538 } 539 } 540 541 // The compile time filters did not reject base_oop/offset so 542 // we need to generate the following runtime filters 543 // 544 // if (offset == java_lang_ref_Reference::_reference_offset) { 545 // if (instance_of(base, java.lang.ref.Reference)) { 546 // pre_barrier(_, pre_val, ...); 547 // } 548 // } 549 550 float likely = PROB_LIKELY( 0.999); 551 float unlikely = PROB_UNLIKELY(0.999); 552 553 IdealKit ideal(kit); 554 555 Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset); 556 557 __ if_then(offset, BoolTest::eq, referent_off, unlikely); { 558 // Update graphKit memory and control from IdealKit. 559 kit->sync_kit(ideal); 560 561 Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass())); 562 Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con); 563 564 // Update IdealKit memory and control from graphKit. 565 __ sync_kit(kit); 566 567 Node* one = __ ConI(1); 568 // is_instof == 0 if base_oop == NULL 569 __ if_then(is_instof, BoolTest::eq, one, unlikely); { 570 571 // Update graphKit from IdeakKit. 572 kit->sync_kit(ideal); 573 574 // Use the pre-barrier to record the value in the referent field 575 pre_barrier(kit, false /* do_load */, 576 __ ctrl(), 577 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, 578 pre_val /* pre_val */, 579 T_OBJECT); 580 if (need_mem_bar) { 581 // Add memory barrier to prevent commoning reads from this field 582 // across safepoint since GC can change its value. 583 kit->insert_mem_bar(Op_MemBarCPUOrder); 584 } 585 // Update IdealKit from graphKit. 586 __ sync_kit(kit); 587 588 } __ end_if(); // _ref_type != ref_none 589 } __ end_if(); // offset == referent_offset 590 591 // Final sync IdealKit and GraphKit. 592 kit->final_sync(ideal); 593 } 594 595 #undef __ 596 597 Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { 598 DecoratorSet decorators = access.decorators(); 599 GraphKit* kit = access.kit(); 600 601 Node* adr = access.addr().node(); 602 Node* obj = access.base(); 603 604 bool mismatched = (decorators & C2_MISMATCHED) != 0; 605 bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0; 606 bool in_heap = (decorators & IN_HEAP) != 0; 607 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 608 bool is_unordered = (decorators & MO_UNORDERED) != 0; 609 bool need_cpu_mem_bar = !is_unordered || mismatched || !in_heap; 610 611 Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : kit->top(); 612 Node* load = CardTableBarrierSetC2::load_at_resolved(access, val_type); 613 614 // If we are reading the value of the referent field of a Reference 615 // object (either by using Unsafe directly or through reflection) 616 // then, if G1 is enabled, we need to record the referent in an 617 // SATB log buffer using the pre-barrier mechanism. 618 // Also we need to add memory barrier to prevent commoning reads 619 // from this field across safepoint since GC can change its value. 620 bool need_read_barrier = in_heap && (on_weak || 621 (unknown && offset != kit->top() && obj != kit->top())); 622 623 if (!access.is_oop() || !need_read_barrier) { 624 return load; 625 } 626 627 if (on_weak) { 628 // Use the pre-barrier to record the value in the referent field 629 pre_barrier(kit, false /* do_load */, 630 kit->control(), 631 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, 632 load /* pre_val */, T_OBJECT); 633 // Add memory barrier to prevent commoning reads from this field 634 // across safepoint since GC can change its value. 635 kit->insert_mem_bar(Op_MemBarCPUOrder); 636 } else if (unknown) { 637 // We do not require a mem bar inside pre_barrier if need_mem_bar 638 // is set: the barriers would be emitted by us. 639 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar); 640 } 641 642 return load; 643 } 644 645 bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const { 646 if (CardTableBarrierSetC2::is_gc_barrier_node(node)) { 647 return true; 648 } 649 if (node->Opcode() != Op_CallLeaf) { 650 return false; 651 } 652 CallLeafNode *call = node->as_CallLeaf(); 653 if (call->_name == NULL) { 654 return false; 655 } 656 657 return strcmp(call->_name, "write_ref_field_pre_entry") == 0 || strcmp(call->_name, "write_ref_field_post_entry") == 0; 658 } 659 660 void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { 661 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required"); 662 assert(node->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes"); 663 // It could be only one user, URShift node, in Object.clone() intrinsic 664 // but the new allocation is passed to arraycopy stub and it could not 665 // be scalar replaced. So we don't check the case. 666 667 // An other case of only one user (Xor) is when the value check for NULL 668 // in G1 post barrier is folded after CCP so the code which used URShift 669 // is removed. 670 671 // Take Region node before eliminating post barrier since it also 672 // eliminates CastP2X node when it has only one user. 673 Node* this_region = node->in(0); 674 assert(this_region != NULL, ""); 675 676 // Remove G1 post barrier. 677 678 // Search for CastP2X->Xor->URShift->Cmp path which 679 // checks if the store done to a different from the value's region. 680 // And replace Cmp with #0 (false) to collapse G1 post barrier. 681 Node* xorx = node->find_out_with(Op_XorX); 682 if (xorx != NULL) { 683 Node* shift = xorx->unique_out(); 684 Node* cmpx = shift->unique_out(); 685 assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() && 686 cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne, 687 "missing region check in G1 post barrier"); 688 macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ)); 689 690 // Remove G1 pre barrier. 691 692 // Search "if (marking != 0)" check and set it to "false". 693 // There is no G1 pre barrier if previous stored value is NULL 694 // (for example, after initialization). 695 if (this_region->is_Region() && this_region->req() == 3) { 696 int ind = 1; 697 if (!this_region->in(ind)->is_IfFalse()) { 698 ind = 2; 699 } 700 if (this_region->in(ind)->is_IfFalse() && 701 this_region->in(ind)->in(0)->Opcode() == Op_If) { 702 Node* bol = this_region->in(ind)->in(0)->in(1); 703 assert(bol->is_Bool(), ""); 704 cmpx = bol->in(1); 705 if (bol->as_Bool()->_test._test == BoolTest::ne && 706 cmpx->is_Cmp() && cmpx->in(2) == macro->intcon(0) && 707 cmpx->in(1)->is_Load()) { 708 Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address); 709 const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()); 710 if (adr->is_AddP() && adr->in(AddPNode::Base) == macro->top() && 711 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && 712 adr->in(AddPNode::Offset) == macro->MakeConX(marking_offset)) { 713 macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ)); 714 } 715 } 716 } 717 } 718 } else { 719 assert(!use_ReduceInitialCardMarks(), "can only happen with card marking"); 720 // This is a G1 post barrier emitted by the Object.clone() intrinsic. 721 // Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card 722 // is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier. 723 Node* shift = node->find_out_with(Op_URShiftX); 724 assert(shift != NULL, "missing G1 post barrier"); 725 Node* addp = shift->unique_out(); 726 Node* load = addp->find_out_with(Op_LoadB); 727 assert(load != NULL, "missing G1 post barrier"); 728 Node* cmpx = load->unique_out(); 729 assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() && 730 cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne, 731 "missing card value check in G1 post barrier"); 732 macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ)); 733 // There is no G1 pre barrier in this case 734 } 735 // Now CastP2X can be removed since it is used only on dead path 736 // which currently still alive until igvn optimize it. 737 assert(node->outcnt() == 0 || node->unique_out()->Opcode() == Op_URShiftX, ""); 738 macro->replace_node(node, macro->top()); 739 } 740 741 Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const { 742 if (!use_ReduceInitialCardMarks() && 743 c != NULL && c->is_Region() && c->req() == 3) { 744 for (uint i = 1; i < c->req(); i++) { 745 if (c->in(i) != NULL && c->in(i)->is_Region() && 746 c->in(i)->req() == 3) { 747 Node* r = c->in(i); 748 for (uint j = 1; j < r->req(); j++) { 749 if (r->in(j) != NULL && r->in(j)->is_Proj() && 750 r->in(j)->in(0) != NULL && 751 r->in(j)->in(0)->Opcode() == Op_CallLeaf && 752 r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)) { 753 Node* call = r->in(j)->in(0); 754 c = c->in(i == 1 ? 2 : 1); 755 if (c != NULL) { 756 c = c->in(0); 757 if (c != NULL) { 758 c = c->in(0); 759 assert(call->in(0) == NULL || 760 call->in(0)->in(0) == NULL || 761 call->in(0)->in(0)->in(0) == NULL || 762 call->in(0)->in(0)->in(0)->in(0) == NULL || 763 call->in(0)->in(0)->in(0)->in(0)->in(0) == NULL || 764 c == call->in(0)->in(0)->in(0)->in(0)->in(0), "bad barrier shape"); 765 return c; 766 } 767 } 768 } 769 } 770 } 771 } 772 } 773 return c; 774 } 775 776 #ifdef ASSERT 777 void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const { 778 // Verify G1 pre-barriers 779 const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()); 780 781 ResourceArea *area = Thread::current()->resource_area(); 782 Unique_Node_List visited(area); 783 Node_List worklist(area); 784 // We're going to walk control flow backwards starting from the Root 785 worklist.push(compile->root()); 786 while (worklist.size() > 0) { 787 Node* x = worklist.pop(); 788 if (x == NULL || x == compile->top()) continue; 789 if (visited.member(x)) { 790 continue; 791 } else { 792 visited.push(x); 793 } 794 795 if (x->is_Region()) { 796 for (uint i = 1; i < x->req(); i++) { 797 worklist.push(x->in(i)); 798 } 799 } else { 800 worklist.push(x->in(0)); 801 // We are looking for the pattern: 802 // /->ThreadLocal 803 // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset) 804 // \->ConI(0) 805 // We want to verify that the If and the LoadB have the same control 806 // See GraphKit::g1_write_barrier_pre() 807 if (x->is_If()) { 808 IfNode *iff = x->as_If(); 809 if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) { 810 CmpNode *cmp = iff->in(1)->in(1)->as_Cmp(); 811 if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0 812 && cmp->in(1)->is_Load()) { 813 LoadNode* load = cmp->in(1)->as_Load(); 814 if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal 815 && load->in(2)->in(3)->is_Con() 816 && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) { 817 818 Node* if_ctrl = iff->in(0); 819 Node* load_ctrl = load->in(0); 820 821 if (if_ctrl != load_ctrl) { 822 // Skip possible CProj->NeverBranch in infinite loops 823 if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj) 824 && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) { 825 if_ctrl = if_ctrl->in(0)->in(0); 826 } 827 } 828 assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match"); 829 } 830 } 831 } 832 } 833 } 834 } 835 } 836 #endif