1 /*
  2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/c2/g1BarrierSetC2.hpp"
 27 #include "gc/g1/g1BarrierSet.hpp"
 28 #include "gc/g1/g1CardTable.hpp"
 29 #include "gc/g1/g1ThreadLocalData.hpp"
 30 #include "gc/g1/heapRegion.hpp"
 31 #include "opto/arraycopynode.hpp"
 32 #include "opto/graphKit.hpp"
 33 #include "opto/idealKit.hpp"
 34 #include "opto/macro.hpp"
 35 #include "opto/type.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "utilities/macros.hpp"
 38 
 39 const TypeFunc *G1BarrierSetC2::g1_wb_pre_Type() {
 40   const Type **fields = TypeTuple::fields(2);
 41   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
 42   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
 43   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 44 
 45   // create result type (range)
 46   fields = TypeTuple::fields(0);
 47   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
 48 
 49   return TypeFunc::make(domain, range);
 50 }
 51 
 52 const TypeFunc *G1BarrierSetC2::g1_wb_post_Type() {
 53   const Type **fields = TypeTuple::fields(2);
 54   fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL;  // Card addr
 55   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL;  // thread
 56   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 57 
 58   // create result type (range)
 59   fields = TypeTuple::fields(0);
 60   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
 61 
 62   return TypeFunc::make(domain, range);
 63 }
 64 
 65 #define __ ideal.
 66 /*
 67  * Determine if the G1 pre-barrier can be removed. The pre-barrier is
 68  * required by SATB to make sure all objects live at the start of the
 69  * marking are kept alive, all reference updates need to any previous
 70  * reference stored before writing.
 71  *
 72  * If the previous value is NULL there is no need to save the old value.
 73  * References that are NULL are filtered during runtime by the barrier
 74  * code to avoid unnecessary queuing.
 75  *
 76  * However in the case of newly allocated objects it might be possible to
 77  * prove that the reference about to be overwritten is NULL during compile
 78  * time and avoid adding the barrier code completely.
 79  *
 80  * The compiler needs to determine that the object in which a field is about
 81  * to be written is newly allocated, and that no prior store to the same field
 82  * has happened since the allocation.
 83  *
 84  * Returns true if the pre-barrier can be removed
 85  */
 86 bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
 87                                                PhaseTransform* phase,
 88                                                Node* adr,
 89                                                BasicType bt,
 90                                                uint adr_idx) const {
 91   intptr_t offset = 0;
 92   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 93   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
 94 
 95   if (offset == Type::OffsetBot) {
 96     return false; // cannot unalias unless there are precise offsets
 97   }
 98 
 99   if (alloc == NULL) {
100     return false; // No allocation found
101   }
102 
103   intptr_t size_in_bytes = type2aelembytes(bt);
104 
105   Node* mem = kit->memory(adr_idx); // start searching here...
106 
107   for (int cnt = 0; cnt < 50; cnt++) {
108 
109     if (mem->is_Store()) {
110 
111       Node* st_adr = mem->in(MemNode::Address);
112       intptr_t st_offset = 0;
113       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
114 
115       if (st_base == NULL) {
116         break; // inscrutable pointer
117       }
118 
119       // Break we have found a store with same base and offset as ours so break
120       if (st_base == base && st_offset == offset) {
121         break;
122       }
123 
124       if (st_offset != offset && st_offset != Type::OffsetBot) {
125         const int MAX_STORE = BytesPerLong;
126         if (st_offset >= offset + size_in_bytes ||
127             st_offset <= offset - MAX_STORE ||
128             st_offset <= offset - mem->as_Store()->memory_size()) {
129           // Success:  The offsets are provably independent.
130           // (You may ask, why not just test st_offset != offset and be done?
131           // The answer is that stores of different sizes can co-exist
132           // in the same sequence of RawMem effects.  We sometimes initialize
133           // a whole 'tile' of array elements with a single jint or jlong.)
134           mem = mem->in(MemNode::Memory);
135           continue; // advance through independent store memory
136         }
137       }
138 
139       if (st_base != base
140           && MemNode::detect_ptr_independence(base, alloc, st_base,
141                                               AllocateNode::Ideal_allocation(st_base, phase),
142                                               phase)) {
143         // Success:  The bases are provably independent.
144         mem = mem->in(MemNode::Memory);
145         continue; // advance through independent store memory
146       }
147     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
148 
149       InitializeNode* st_init = mem->in(0)->as_Initialize();
150       AllocateNode* st_alloc = st_init->allocation();
151 
152       // Make sure that we are looking at the same allocation site.
153       // The alloc variable is guaranteed to not be null here from earlier check.
154       if (alloc == st_alloc) {
155         // Check that the initialization is storing NULL so that no previous store
156         // has been moved up and directly write a reference
157         Node* captured_store = st_init->find_captured_store(offset,
158                                                             type2aelembytes(T_OBJECT),
159                                                             phase);
160         if (captured_store == NULL || captured_store == st_init->zero_memory()) {
161           return true;
162         }
163       }
164     }
165 
166     // Unless there is an explicit 'continue', we must bail out here,
167     // because 'mem' is an inscrutable memory state (e.g., a call).
168     break;
169   }
170 
171   return false;
172 }
173 
174 // G1 pre/post barriers
175 void G1BarrierSetC2::pre_barrier(GraphKit* kit,
176                                  bool do_load,
177                                  Node* ctl,
178                                  Node* obj,
179                                  Node* adr,
180                                  uint alias_idx,
181                                  Node* val,
182                                  const TypeOopPtr* val_type,
183                                  Node* pre_val,
184                                  BasicType bt) const {
185   // Some sanity checks
186   // Note: val is unused in this routine.
187 
188   if (do_load) {
189     // We need to generate the load of the previous value
190     assert(obj != NULL, "must have a base");
191     assert(adr != NULL, "where are loading from?");
192     assert(pre_val == NULL, "loaded already?");
193     assert(val_type != NULL, "need a type");
194 
195     if (use_ReduceInitialCardMarks()
196         && g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
197       return;
198     }
199 
200   } else {
201     // In this case both val_type and alias_idx are unused.
202     assert(pre_val != NULL, "must be loaded already");
203     // Nothing to be done if pre_val is null.
204     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
205     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
206   }
207   assert(bt == T_OBJECT, "or we shouldn't be here");
208 
209   IdealKit ideal(kit, true);
210 
211   Node* tls = __ thread(); // ThreadLocalStorage
212 
213   Node* no_base = __ top();
214   Node* zero  = __ ConI(0);
215   Node* zeroX = __ ConX(0);
216 
217   float likely  = PROB_LIKELY(0.999);
218   float unlikely  = PROB_UNLIKELY(0.999);
219 
220   BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
221   assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width");
222 
223   // Offsets into the thread
224   const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
225   const int index_offset   = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
226   const int buffer_offset  = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
227 
228   // Now the actual pointers into the thread
229   Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
230   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
231   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
232 
233   // Now some of the values
234   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
235 
236   // if (!marking)
237   __ if_then(marking, BoolTest::ne, zero, unlikely); {
238     BasicType index_bt = TypeX_X->basic_type();
239     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
240     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
241 
242     if (do_load) {
243       // load original value
244       // alias_idx correct??
245       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
246     }
247 
248     // if (pre_val != NULL)
249     __ if_then(pre_val, BoolTest::ne, kit->null()); {
250       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
251 
252       // is the queue for this thread full?
253       __ if_then(index, BoolTest::ne, zeroX, likely); {
254 
255         // decrement the index
256         Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
257 
258         // Now get the buffer location we will log the previous value into and store it
259         Node *log_addr = __ AddP(no_base, buffer, next_index);
260         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
261         // update the index
262         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
263 
264       } __ else_(); {
265 
266         // logging buffer is full, call the runtime
267         const TypeFunc *tf = g1_wb_pre_Type();
268         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
269       } __ end_if();  // (!index)
270     } __ end_if();  // (pre_val != NULL)
271   } __ end_if();  // (!marking)
272 
273   // Final sync IdealKit and GraphKit.
274   kit->final_sync(ideal);
275 }
276 
277 /*
278  * G1 similar to any GC with a Young Generation requires a way to keep track of
279  * references from Old Generation to Young Generation to make sure all live
280  * objects are found. G1 also requires to keep track of object references
281  * between different regions to enable evacuation of old regions, which is done
282  * as part of mixed collections. References are tracked in remembered sets and
283  * is continuously updated as reference are written to with the help of the
284  * post-barrier.
285  *
286  * To reduce the number of updates to the remembered set the post-barrier
287  * filters updates to fields in objects located in the Young Generation,
288  * the same region as the reference, when the NULL is being written or
289  * if the card is already marked as dirty by an earlier write.
290  *
291  * Under certain circumstances it is possible to avoid generating the
292  * post-barrier completely if it is possible during compile time to prove
293  * the object is newly allocated and that no safepoint exists between the
294  * allocation and the store.
295  *
296  * In the case of slow allocation the allocation code must handle the barrier
297  * as part of the allocation in the case the allocated object is not located
298  * in the nursery, this would happen for humongous objects. This is similar to
299  * how CMS is required to handle this case, see the comments for the method
300  * CollectedHeap::new_deferred_store_barrier and OptoRuntime::new_deferred_store_barrier.
301  * A deferred card mark is required for these objects and handled in the above
302  * mentioned methods.
303  *
304  * Returns true if the post barrier can be removed
305  */
306 bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit,
307                                                 PhaseTransform* phase, Node* store,
308                                                 Node* adr) const {
309   intptr_t      offset = 0;
310   Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
311   AllocateNode* alloc  = AllocateNode::Ideal_allocation(base, phase);
312 
313   if (offset == Type::OffsetBot) {
314     return false; // cannot unalias unless there are precise offsets
315   }
316 
317   if (alloc == NULL) {
318      return false; // No allocation found
319   }
320 
321   // Start search from Store node
322   Node* mem = store->in(MemNode::Control);
323   if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
324 
325     InitializeNode* st_init = mem->in(0)->as_Initialize();
326     AllocateNode*  st_alloc = st_init->allocation();
327 
328     // Make sure we are looking at the same allocation
329     if (alloc == st_alloc) {
330       return true;
331     }
332   }
333 
334   return false;
335 }
336 
337 //
338 // Update the card table and add card address to the queue
339 //
340 void G1BarrierSetC2::g1_mark_card(GraphKit* kit,
341                                   IdealKit& ideal,
342                                   Node* card_adr,
343                                   Node* oop_store,
344                                   uint oop_alias_idx,
345                                   Node* index,
346                                   Node* index_adr,
347                                   Node* buffer,
348                                   const TypeFunc* tf) const {
349   Node* zero  = __ ConI(0);
350   Node* zeroX = __ ConX(0);
351   Node* no_base = __ top();
352   BasicType card_bt = T_BYTE;
353   // Smash zero into card. MUST BE ORDERED WRT TO STORE
354   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
355 
356   //  Now do the queue work
357   __ if_then(index, BoolTest::ne, zeroX); {
358 
359     Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
360     Node* log_addr = __ AddP(no_base, buffer, next_index);
361 
362     // Order, see storeCM.
363     __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
364     __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
365 
366   } __ else_(); {
367     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
368   } __ end_if();
369 
370 }
371 
372 void G1BarrierSetC2::post_barrier(GraphKit* kit,
373                                   Node* ctl,
374                                   Node* oop_store,
375                                   Node* obj,
376                                   Node* adr,
377                                   uint alias_idx,
378                                   Node* val,
379                                   BasicType bt,
380                                   bool use_precise) const {
381   // If we are writing a NULL then we need no post barrier
382 
383   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
384     // Must be NULL
385     const Type* t = val->bottom_type();
386     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
387     // No post barrier if writing NULLx
388     return;
389   }
390 
391   if (use_ReduceInitialCardMarks() && obj == kit->just_allocated_object(kit->control())) {
392     // We can skip marks on a freshly-allocated object in Eden.
393     // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
394     // That routine informs GC to take appropriate compensating steps,
395     // upon a slow-path allocation, so as to make this card-mark
396     // elision safe.
397     return;
398   }
399 
400   if (use_ReduceInitialCardMarks()
401       && g1_can_remove_post_barrier(kit, &kit->gvn(), oop_store, adr)) {
402     return;
403   }
404 
405   if (!use_precise) {
406     // All card marks for a (non-array) instance are in one place:
407     adr = obj;
408   }
409   // (Else it's an array (or unknown), and we want more precise card marks.)
410   assert(adr != NULL, "");
411 
412   IdealKit ideal(kit, true);
413 
414   Node* tls = __ thread(); // ThreadLocalStorage
415 
416   Node* no_base = __ top();
417   float unlikely  = PROB_UNLIKELY(0.999);
418   Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val());
419   Node* dirty_card = __ ConI((jint)G1CardTable::dirty_card_val());
420   Node* zeroX = __ ConX(0);
421 
422   const TypeFunc *tf = g1_wb_post_Type();
423 
424   // Offsets into the thread
425   const int index_offset  = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
426   const int buffer_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
427 
428   // Pointers into the thread
429 
430   Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
431   Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));
432 
433   // Now some values
434   // Use ctrl to avoid hoisting these values past a safepoint, which could
435   // potentially reset these fields in the JavaThread.
436   Node* index  = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
437   Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
438 
439   // Convert the store obj pointer to an int prior to doing math on it
440   // Must use ctrl to prevent "integerized oop" existing across safepoint
441   Node* cast =  __ CastPX(__ ctrl(), adr);
442 
443   // Divide pointer by card size
444   Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
445 
446   // Combine card table base and card offset
447   Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset );
448 
449   // If we know the value being stored does it cross regions?
450 
451   if (val != NULL) {
452     // Does the store cause us to cross regions?
453 
454     // Should be able to do an unsigned compare of region_size instead of
455     // and extra shift. Do we have an unsigned compare??
456     // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
457     Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
458 
459     // if (xor_res == 0) same region so skip
460     __ if_then(xor_res, BoolTest::ne, zeroX); {
461 
462       // No barrier if we are storing a NULL
463       __ if_then(val, BoolTest::ne, kit->null(), unlikely); {
464 
465         // Ok must mark the card if not already dirty
466 
467         // load the original value of the card
468         Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
469 
470         __ if_then(card_val, BoolTest::ne, young_card); {
471           kit->sync_kit(ideal);
472           kit->insert_mem_bar(Op_MemBarVolatile, oop_store);
473           __ sync_kit(kit);
474 
475           Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
476           __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
477             g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
478           } __ end_if();
479         } __ end_if();
480       } __ end_if();
481     } __ end_if();
482   } else {
483     // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.
484     // We don't need a barrier here if the destination is a newly allocated object
485     // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden
486     // are set to 'g1_young_gen' (see G1CardTable::verify_g1_young_region()).
487     assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
488     Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
489     __ if_then(card_val, BoolTest::ne, young_card); {
490       g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
491     } __ end_if();
492   }
493 
494   // Final sync IdealKit and GraphKit.
495   kit->final_sync(ideal);
496 }
497 
498 // Helper that guards and inserts a pre-barrier.
499 void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
500                                         Node* pre_val, bool need_mem_bar) const {
501   // We could be accessing the referent field of a reference object. If so, when G1
502   // is enabled, we need to log the value in the referent field in an SATB buffer.
503   // This routine performs some compile time filters and generates suitable
504   // runtime filters that guard the pre-barrier code.
505   // Also add memory barrier for non volatile load from the referent field
506   // to prevent commoning of loads across safepoint.
507 
508   // Some compile time checks.
509 
510   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
511   const TypeX* otype = offset->find_intptr_t_type();
512   if (otype != NULL && otype->is_con() &&
513       otype->get_con() != java_lang_ref_Reference::referent_offset) {
514     // Constant offset but not the reference_offset so just return
515     return;
516   }
517 
518   // We only need to generate the runtime guards for instances.
519   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
520   if (btype != NULL) {
521     if (btype->isa_aryptr()) {
522       // Array type so nothing to do
523       return;
524     }
525 
526     const TypeInstPtr* itype = btype->isa_instptr();
527     if (itype != NULL) {
528       // Can the klass of base_oop be statically determined to be
529       // _not_ a sub-class of Reference and _not_ Object?
530       ciKlass* klass = itype->klass();
531       if ( klass->is_loaded() &&
532           !klass->is_subtype_of(kit->env()->Reference_klass()) &&
533           !kit->env()->Object_klass()->is_subtype_of(klass)) {
534         return;
535       }
536     }
537   }
538 
539   // The compile time filters did not reject base_oop/offset so
540   // we need to generate the following runtime filters
541   //
542   // if (offset == java_lang_ref_Reference::_reference_offset) {
543   //   if (instance_of(base, java.lang.ref.Reference)) {
544   //     pre_barrier(_, pre_val, ...);
545   //   }
546   // }
547 
548   float likely   = PROB_LIKELY(  0.999);
549   float unlikely = PROB_UNLIKELY(0.999);
550 
551   IdealKit ideal(kit);
552 
553   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
554 
555   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
556       // Update graphKit memory and control from IdealKit.
557       kit->sync_kit(ideal);
558 
559       Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
560       Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
561 
562       // Update IdealKit memory and control from graphKit.
563       __ sync_kit(kit);
564 
565       Node* one = __ ConI(1);
566       // is_instof == 0 if base_oop == NULL
567       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
568 
569         // Update graphKit from IdeakKit.
570         kit->sync_kit(ideal);
571 
572         // Use the pre-barrier to record the value in the referent field
573         pre_barrier(kit, false /* do_load */,
574                     __ ctrl(),
575                     NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
576                     pre_val /* pre_val */,
577                     T_OBJECT);
578         if (need_mem_bar) {
579           // Add memory barrier to prevent commoning reads from this field
580           // across safepoint since GC can change its value.
581           kit->insert_mem_bar(Op_MemBarCPUOrder);
582         }
583         // Update IdealKit from graphKit.
584         __ sync_kit(kit);
585 
586       } __ end_if(); // _ref_type != ref_none
587   } __ end_if(); // offset == referent_offset
588 
589   // Final sync IdealKit and GraphKit.
590   kit->final_sync(ideal);
591 }
592 
593 #undef __
594 
595 Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
596   DecoratorSet decorators = access.decorators();
597   GraphKit* kit = access.kit();
598 
599   Node* adr = access.addr().node();
600   Node* obj = access.base();
601 
602   bool mismatched = (decorators & C2_MISMATCHED) != 0;
603   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
604   bool on_heap = (decorators & IN_HEAP) != 0;
605   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
606   bool is_unordered = (decorators & MO_UNORDERED) != 0;
607   bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap;
608 
609   Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : kit->top();
610   Node* load = CardTableBarrierSetC2::load_at_resolved(access, val_type);
611 
612   // If we are reading the value of the referent field of a Reference
613   // object (either by using Unsafe directly or through reflection)
614   // then, if G1 is enabled, we need to record the referent in an
615   // SATB log buffer using the pre-barrier mechanism.
616   // Also we need to add memory barrier to prevent commoning reads
617   // from this field across safepoint since GC can change its value.
618   bool need_read_barrier = on_heap && (on_weak ||
619                                        (unknown && offset != kit->top() && obj != kit->top()));
620 
621   if (!access.is_oop() || !need_read_barrier) {
622     return load;
623   }
624 
625   if (on_weak) {
626     // Use the pre-barrier to record the value in the referent field
627     pre_barrier(kit, false /* do_load */,
628                 kit->control(),
629                 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
630                 load /* pre_val */, T_OBJECT);
631     // Add memory barrier to prevent commoning reads from this field
632     // across safepoint since GC can change its value.
633     kit->insert_mem_bar(Op_MemBarCPUOrder);
634   } else if (unknown) {
635     // We do not require a mem bar inside pre_barrier if need_mem_bar
636     // is set: the barriers would be emitted by us.
637     insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
638   }
639 
640   return load;
641 }
642 
643 bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const {
644   if (CardTableBarrierSetC2::is_gc_barrier_node(node)) {
645     return true;
646   }
647   if (node->Opcode() != Op_CallLeaf) {
648     return false;
649   }
650   CallLeafNode *call = node->as_CallLeaf();
651   if (call->_name == NULL) {
652     return false;
653   }
654 
655   return strcmp(call->_name, "g1_wb_pre") == 0 || strcmp(call->_name, "g1_wb_post") == 0;
656 }
657 
658 void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
659   assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
660     assert(node->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
661     // It could be only one user, URShift node, in Object.clone() intrinsic
662     // but the new allocation is passed to arraycopy stub and it could not
663     // be scalar replaced. So we don't check the case.
664 
665     // An other case of only one user (Xor) is when the value check for NULL
666     // in G1 post barrier is folded after CCP so the code which used URShift
667     // is removed.
668 
669     // Take Region node before eliminating post barrier since it also
670     // eliminates CastP2X node when it has only one user.
671     Node* this_region = node->in(0);
672     assert(this_region != NULL, "");
673 
674     // Remove G1 post barrier.
675 
676     // Search for CastP2X->Xor->URShift->Cmp path which
677     // checks if the store done to a different from the value's region.
678     // And replace Cmp with #0 (false) to collapse G1 post barrier.
679     Node* xorx = node->find_out_with(Op_XorX);
680     if (xorx != NULL) {
681       Node* shift = xorx->unique_out();
682       Node* cmpx = shift->unique_out();
683       assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
684       cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
685       "missing region check in G1 post barrier");
686       macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
687 
688       // Remove G1 pre barrier.
689 
690       // Search "if (marking != 0)" check and set it to "false".
691       // There is no G1 pre barrier if previous stored value is NULL
692       // (for example, after initialization).
693       if (this_region->is_Region() && this_region->req() == 3) {
694         int ind = 1;
695         if (!this_region->in(ind)->is_IfFalse()) {
696           ind = 2;
697         }
698         if (this_region->in(ind)->is_IfFalse()) {
699           Node* bol = this_region->in(ind)->in(0)->in(1);
700           assert(bol->is_Bool(), "");
701           cmpx = bol->in(1);
702           if (bol->as_Bool()->_test._test == BoolTest::ne &&
703               cmpx->is_Cmp() && cmpx->in(2) == macro->intcon(0) &&
704               cmpx->in(1)->is_Load()) {
705             Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
706             const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
707             if (adr->is_AddP() && adr->in(AddPNode::Base) == macro->top() &&
708                 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
709                 adr->in(AddPNode::Offset) == macro->MakeConX(marking_offset)) {
710               macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
711             }
712           }
713         }
714       }
715     } else {
716       assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
717       // This is a G1 post barrier emitted by the Object.clone() intrinsic.
718       // Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card
719       // is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier.
720       Node* shift = node->find_out_with(Op_URShiftX);
721       assert(shift != NULL, "missing G1 post barrier");
722       Node* addp = shift->unique_out();
723       Node* load = addp->find_out_with(Op_LoadB);
724       assert(load != NULL, "missing G1 post barrier");
725       Node* cmpx = load->unique_out();
726       assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
727              cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
728              "missing card value check in G1 post barrier");
729       macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
730       // There is no G1 pre barrier in this case
731     }
732     // Now CastP2X can be removed since it is used only on dead path
733     // which currently still alive until igvn optimize it.
734     assert(node->outcnt() == 0 || node->unique_out()->Opcode() == Op_URShiftX, "");
735     macro->replace_node(node, macro->top());
736 }
737 
738 Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const {
739   if (!use_ReduceInitialCardMarks() &&
740       c != NULL && c->is_Region() && c->req() == 3) {
741     for (uint i = 1; i < c->req(); i++) {
742       if (c->in(i) != NULL && c->in(i)->is_Region() &&
743           c->in(i)->req() == 3) {
744         Node* r = c->in(i);
745         for (uint j = 1; j < r->req(); j++) {
746           if (r->in(j) != NULL && r->in(j)->is_Proj() &&
747               r->in(j)->in(0) != NULL &&
748               r->in(j)->in(0)->Opcode() == Op_CallLeaf &&
749               r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post)) {
750             Node* call = r->in(j)->in(0);
751             c = c->in(i == 1 ? 2 : 1);
752             if (c != NULL) {
753               c = c->in(0);
754               if (c != NULL) {
755                 c = c->in(0);
756                 assert(call->in(0) == NULL ||
757                        call->in(0)->in(0) == NULL ||
758                        call->in(0)->in(0)->in(0) == NULL ||
759                        call->in(0)->in(0)->in(0)->in(0) == NULL ||
760                        call->in(0)->in(0)->in(0)->in(0)->in(0) == NULL ||
761                        c == call->in(0)->in(0)->in(0)->in(0)->in(0), "bad barrier shape");
762                 return c;
763               }
764             }
765           }
766         }
767       }
768     }
769   }
770   return c;
771 }