1 /*
  2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/c2/g1BarrierSetC2.hpp"
 27 #include "gc/g1/g1BarrierSet.hpp"
 28 #include "gc/g1/g1BarrierSetRuntime.hpp"
 29 #include "gc/g1/g1CardTable.hpp"
 30 #include "gc/g1/g1ThreadLocalData.hpp"
 31 #include "gc/g1/heapRegion.hpp"
 32 #include "opto/arraycopynode.hpp"
 33 #include "opto/compile.hpp"
 34 #include "opto/escape.hpp"
 35 #include "opto/graphKit.hpp"
 36 #include "opto/idealKit.hpp"
 37 #include "opto/macro.hpp"
 38 #include "opto/rootnode.hpp"
 39 #include "opto/type.hpp"
 40 #include "utilities/macros.hpp"
 41 
 42 const TypeFunc *G1BarrierSetC2::write_ref_field_pre_entry_Type() {
 43   const Type **fields = TypeTuple::fields(2);
 44   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
 45   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
 46   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 47 
 48   // create result type (range)
 49   fields = TypeTuple::fields(0);
 50   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
 51 
 52   return TypeFunc::make(domain, range);
 53 }
 54 
 55 const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() {
 56   const Type **fields = TypeTuple::fields(2);
 57   fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL;  // Card addr
 58   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL;  // thread
 59   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 60 
 61   // create result type (range)
 62   fields = TypeTuple::fields(0);
 63   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
 64 
 65   return TypeFunc::make(domain, range);
 66 }
 67 
 68 #define __ ideal.
 69 /*
 70  * Determine if the G1 pre-barrier can be removed. The pre-barrier is
 71  * required by SATB to make sure all objects live at the start of the
 72  * marking are kept alive, all reference updates need to any previous
 73  * reference stored before writing.
 74  *
 75  * If the previous value is NULL there is no need to save the old value.
 76  * References that are NULL are filtered during runtime by the barrier
 77  * code to avoid unnecessary queuing.
 78  *
 79  * However in the case of newly allocated objects it might be possible to
 80  * prove that the reference about to be overwritten is NULL during compile
 81  * time and avoid adding the barrier code completely.
 82  *
 83  * The compiler needs to determine that the object in which a field is about
 84  * to be written is newly allocated, and that no prior store to the same field
 85  * has happened since the allocation.
 86  *
 87  * Returns true if the pre-barrier can be removed
 88  */
 89 bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
 90                                                PhaseTransform* phase,
 91                                                Node* adr,
 92                                                BasicType bt,
 93                                                uint adr_idx) const {
 94   intptr_t offset = 0;
 95   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 96   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
 97 
 98   if (offset == Type::OffsetBot) {
 99     return false; // cannot unalias unless there are precise offsets
100   }
101 
102   if (alloc == NULL) {
103     return false; // No allocation found
104   }
105 
106   intptr_t size_in_bytes = type2aelembytes(bt);
107 
108   Node* mem = kit->memory(adr_idx); // start searching here...
109 
110   for (int cnt = 0; cnt < 50; cnt++) {
111 
112     if (mem->is_Store()) {
113 
114       Node* st_adr = mem->in(MemNode::Address);
115       intptr_t st_offset = 0;
116       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
117 
118       if (st_base == NULL) {
119         break; // inscrutable pointer
120       }
121 
122       // Break we have found a store with same base and offset as ours so break
123       if (st_base == base && st_offset == offset) {
124         break;
125       }
126 
127       if (st_offset != offset && st_offset != Type::OffsetBot) {
128         const int MAX_STORE = BytesPerLong;
129         if (st_offset >= offset + size_in_bytes ||
130             st_offset <= offset - MAX_STORE ||
131             st_offset <= offset - mem->as_Store()->memory_size()) {
132           // Success:  The offsets are provably independent.
133           // (You may ask, why not just test st_offset != offset and be done?
134           // The answer is that stores of different sizes can co-exist
135           // in the same sequence of RawMem effects.  We sometimes initialize
136           // a whole 'tile' of array elements with a single jint or jlong.)
137           mem = mem->in(MemNode::Memory);
138           continue; // advance through independent store memory
139         }
140       }
141 
142       if (st_base != base
143           && MemNode::detect_ptr_independence(base, alloc, st_base,
144                                               AllocateNode::Ideal_allocation(st_base, phase),
145                                               phase)) {
146         // Success:  The bases are provably independent.
147         mem = mem->in(MemNode::Memory);
148         continue; // advance through independent store memory
149       }
150     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
151 
152       InitializeNode* st_init = mem->in(0)->as_Initialize();
153       AllocateNode* st_alloc = st_init->allocation();
154 
155       // Make sure that we are looking at the same allocation site.
156       // The alloc variable is guaranteed to not be null here from earlier check.
157       if (alloc == st_alloc) {
158         // Check that the initialization is storing NULL so that no previous store
159         // has been moved up and directly write a reference
160         Node* captured_store = st_init->find_captured_store(offset,
161                                                             type2aelembytes(T_OBJECT),
162                                                             phase);
163         if (captured_store == NULL || captured_store == st_init->zero_memory()) {
164           return true;
165         }
166       }
167     }
168 
169     // Unless there is an explicit 'continue', we must bail out here,
170     // because 'mem' is an inscrutable memory state (e.g., a call).
171     break;
172   }
173 
174   return false;
175 }
176 
177 // G1 pre/post barriers
178 void G1BarrierSetC2::pre_barrier(GraphKit* kit,
179                                  bool do_load,
180                                  Node* ctl,
181                                  Node* obj,
182                                  Node* adr,
183                                  uint alias_idx,
184                                  Node* val,
185                                  const TypeOopPtr* val_type,
186                                  Node* pre_val,
187                                  BasicType bt) const {
188   // Some sanity checks
189   // Note: val is unused in this routine.
190 
191   if (do_load) {
192     // We need to generate the load of the previous value
193     assert(obj != NULL, "must have a base");
194     assert(adr != NULL, "where are loading from?");
195     assert(pre_val == NULL, "loaded already?");
196     assert(val_type != NULL, "need a type");
197 
198     if (use_ReduceInitialCardMarks()
199         && g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
200       return;
201     }
202 
203   } else {
204     // In this case both val_type and alias_idx are unused.
205     assert(pre_val != NULL, "must be loaded already");
206     // Nothing to be done if pre_val is null.
207     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
208     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
209   }
210   assert(bt == T_OBJECT, "or we shouldn't be here");
211 
212   IdealKit ideal(kit, true);
213 
214   Node* tls = __ thread(); // ThreadLocalStorage
215 
216   Node* no_base = __ top();
217   Node* zero  = __ ConI(0);
218   Node* zeroX = __ ConX(0);
219 
220   float likely  = PROB_LIKELY(0.999);
221   float unlikely  = PROB_UNLIKELY(0.999);
222 
223   BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
224   assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width");
225 
226   // Offsets into the thread
227   const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
228   const int index_offset   = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
229   const int buffer_offset  = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
230 
231   // Now the actual pointers into the thread
232   Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
233   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
234   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
235 
236   // Now some of the values
237   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
238 
239   // if (!marking)
240   __ if_then(marking, BoolTest::ne, zero, unlikely); {
241     BasicType index_bt = TypeX_X->basic_type();
242     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
243     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
244 
245     if (do_load) {
246       // load original value
247       // alias_idx correct??
248       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
249     }
250 
251     // if (pre_val != NULL)
252     __ if_then(pre_val, BoolTest::ne, kit->null()); {
253       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
254 
255       // is the queue for this thread full?
256       __ if_then(index, BoolTest::ne, zeroX, likely); {
257 
258         // decrement the index
259         Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
260 
261         // Now get the buffer location we will log the previous value into and store it
262         Node *log_addr = __ AddP(no_base, buffer, next_index);
263         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
264         // update the index
265         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
266 
267       } __ else_(); {
268 
269         // logging buffer is full, call the runtime
270         const TypeFunc *tf = write_ref_field_pre_entry_Type();
271         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), "write_ref_field_pre_entry", pre_val, tls);
272       } __ end_if();  // (!index)
273     } __ end_if();  // (pre_val != NULL)
274   } __ end_if();  // (!marking)
275 
276   // Final sync IdealKit and GraphKit.
277   kit->final_sync(ideal);
278 }
279 
280 /*
281  * G1 similar to any GC with a Young Generation requires a way to keep track of
282  * references from Old Generation to Young Generation to make sure all live
283  * objects are found. G1 also requires to keep track of object references
284  * between different regions to enable evacuation of old regions, which is done
285  * as part of mixed collections. References are tracked in remembered sets and
286  * is continuously updated as reference are written to with the help of the
287  * post-barrier.
288  *
289  * To reduce the number of updates to the remembered set the post-barrier
290  * filters updates to fields in objects located in the Young Generation,
291  * the same region as the reference, when the NULL is being written or
292  * if the card is already marked as dirty by an earlier write.
293  *
294  * Under certain circumstances it is possible to avoid generating the
295  * post-barrier completely if it is possible during compile time to prove
296  * the object is newly allocated and that no safepoint exists between the
297  * allocation and the store.
298  *
299  * In the case of slow allocation the allocation code must handle the barrier
300  * as part of the allocation in the case the allocated object is not located
301  * in the nursery, this would happen for humongous objects.
302  *
303  * Returns true if the post barrier can be removed
304  */
305 bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit,
306                                                 PhaseTransform* phase, Node* store,
307                                                 Node* adr) const {
308   intptr_t      offset = 0;
309   Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
310   AllocateNode* alloc  = AllocateNode::Ideal_allocation(base, phase);
311 
312   if (offset == Type::OffsetBot) {
313     return false; // cannot unalias unless there are precise offsets
314   }
315 
316   if (alloc == NULL) {
317      return false; // No allocation found
318   }
319 
320   // Start search from Store node
321   Node* mem = store->in(MemNode::Control);
322   if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
323 
324     InitializeNode* st_init = mem->in(0)->as_Initialize();
325     AllocateNode*  st_alloc = st_init->allocation();
326 
327     // Make sure we are looking at the same allocation
328     if (alloc == st_alloc) {
329       return true;
330     }
331   }
332 
333   return false;
334 }
335 
336 //
337 // Update the card table and add card address to the queue
338 //
339 void G1BarrierSetC2::g1_mark_card(GraphKit* kit,
340                                   IdealKit& ideal,
341                                   Node* card_adr,
342                                   Node* oop_store,
343                                   uint oop_alias_idx,
344                                   Node* index,
345                                   Node* index_adr,
346                                   Node* buffer,
347                                   const TypeFunc* tf) const {
348   Node* zero  = __ ConI(0);
349   Node* zeroX = __ ConX(0);
350   Node* no_base = __ top();
351   BasicType card_bt = T_BYTE;
352   // Smash zero into card. MUST BE ORDERED WRT TO STORE
353   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
354 
355   //  Now do the queue work
356   __ if_then(index, BoolTest::ne, zeroX); {
357 
358     Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
359     Node* log_addr = __ AddP(no_base, buffer, next_index);
360 
361     // Order, see storeCM.
362     __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
363     __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
364 
365   } __ else_(); {
366     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), "write_ref_field_post_entry", card_adr, __ thread());
367   } __ end_if();
368 
369 }
370 
371 void G1BarrierSetC2::post_barrier(GraphKit* kit,
372                                   Node* ctl,
373                                   Node* oop_store,
374                                   Node* obj,
375                                   Node* adr,
376                                   uint alias_idx,
377                                   Node* val,
378                                   BasicType bt,
379                                   bool use_precise) const {
380   // If we are writing a NULL then we need no post barrier
381 
382   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
383     // Must be NULL
384     const Type* t = val->bottom_type();
385     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
386     // No post barrier if writing NULLx
387     return;
388   }
389 
390   if (use_ReduceInitialCardMarks() && obj == kit->just_allocated_object(kit->control())) {
391     // We can skip marks on a freshly-allocated object in Eden.
392     // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
393     // That routine informs GC to take appropriate compensating steps,
394     // upon a slow-path allocation, so as to make this card-mark
395     // elision safe.
396     return;
397   }
398 
399   if (use_ReduceInitialCardMarks()
400       && g1_can_remove_post_barrier(kit, &kit->gvn(), oop_store, adr)) {
401     return;
402   }
403 
404   if (!use_precise) {
405     // All card marks for a (non-array) instance are in one place:
406     adr = obj;
407   }
408   // (Else it's an array (or unknown), and we want more precise card marks.)
409   assert(adr != NULL, "");
410 
411   IdealKit ideal(kit, true);
412 
413   Node* tls = __ thread(); // ThreadLocalStorage
414 
415   Node* no_base = __ top();
416   float likely = PROB_LIKELY_MAG(3);
417   float unlikely = PROB_UNLIKELY_MAG(3);
418   Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val());
419   Node* dirty_card = __ ConI((jint)G1CardTable::dirty_card_val());
420   Node* zeroX = __ ConX(0);
421 
422   const TypeFunc *tf = write_ref_field_post_entry_Type();
423 
424   // Offsets into the thread
425   const int index_offset  = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
426   const int buffer_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
427 
428   // Pointers into the thread
429 
430   Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
431   Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));
432 
433   // Now some values
434   // Use ctrl to avoid hoisting these values past a safepoint, which could
435   // potentially reset these fields in the JavaThread.
436   Node* index  = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
437   Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
438 
439   // Convert the store obj pointer to an int prior to doing math on it
440   // Must use ctrl to prevent "integerized oop" existing across safepoint
441   Node* cast =  __ CastPX(__ ctrl(), adr);
442 
443   // Divide pointer by card size
444   Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
445 
446   // Combine card table base and card offset
447   Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset );
448 
449   // If we know the value being stored does it cross regions?
450 
451   if (val != NULL) {
452     // Does the store cause us to cross regions?
453 
454     // Should be able to do an unsigned compare of region_size instead of
455     // and extra shift. Do we have an unsigned compare??
456     // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
457     Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
458 
459     // if (xor_res == 0) same region so skip
460     __ if_then(xor_res, BoolTest::ne, zeroX, likely); {
461 
462       // No barrier if we are storing a NULL
463       __ if_then(val, BoolTest::ne, kit->null(), likely); {
464 
465         // Ok must mark the card if not already dirty
466 
467         // load the original value of the card
468         Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
469 
470         __ if_then(card_val, BoolTest::ne, young_card, unlikely); {
471           kit->sync_kit(ideal);
472           kit->insert_mem_bar(Op_MemBarVolatile, oop_store);
473           __ sync_kit(kit);
474 
475           Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
476           __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
477             g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
478           } __ end_if();
479         } __ end_if();
480       } __ end_if();
481     } __ end_if();
482   } else {
483     // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.
484     // We don't need a barrier here if the destination is a newly allocated object
485     // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden
486     // are set to 'g1_young_gen' (see G1CardTable::verify_g1_young_region()).
487     assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
488     Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
489     __ if_then(card_val, BoolTest::ne, young_card); {
490       g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
491     } __ end_if();
492   }
493 
494   // Final sync IdealKit and GraphKit.
495   kit->final_sync(ideal);
496 }
497 
498 // Helper that guards and inserts a pre-barrier.
499 void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
500                                         Node* pre_val, bool need_mem_bar) const {
501   // We could be accessing the referent field of a reference object. If so, when G1
502   // is enabled, we need to log the value in the referent field in an SATB buffer.
503   // This routine performs some compile time filters and generates suitable
504   // runtime filters that guard the pre-barrier code.
505   // Also add memory barrier for non volatile load from the referent field
506   // to prevent commoning of loads across safepoint.
507 
508   // Some compile time checks.
509 
510   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
511   const TypeX* otype = offset->find_intptr_t_type();
512   if (otype != NULL && otype->is_con() &&
513       otype->get_con() != java_lang_ref_Reference::referent_offset) {
514     // Constant offset but not the reference_offset so just return
515     return;
516   }
517 
518   // We only need to generate the runtime guards for instances.
519   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
520   if (btype != NULL) {
521     if (btype->isa_aryptr()) {
522       // Array type so nothing to do
523       return;
524     }
525 
526     const TypeInstPtr* itype = btype->isa_instptr();
527     if (itype != NULL) {
528       // Can the klass of base_oop be statically determined to be
529       // _not_ a sub-class of Reference and _not_ Object?
530       ciKlass* klass = itype->klass();
531       if ( klass->is_loaded() &&
532           !klass->is_subtype_of(kit->env()->Reference_klass()) &&
533           !kit->env()->Object_klass()->is_subtype_of(klass)) {
534         return;
535       }
536     }
537   }
538 
539   // The compile time filters did not reject base_oop/offset so
540   // we need to generate the following runtime filters
541   //
542   // if (offset == java_lang_ref_Reference::_reference_offset) {
543   //   if (instance_of(base, java.lang.ref.Reference)) {
544   //     pre_barrier(_, pre_val, ...);
545   //   }
546   // }
547 
548   float likely   = PROB_LIKELY(  0.999);
549   float unlikely = PROB_UNLIKELY(0.999);
550 
551   IdealKit ideal(kit);
552 
553   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
554 
555   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
556       // Update graphKit memory and control from IdealKit.
557       kit->sync_kit(ideal);
558 
559       Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
560       Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
561 
562       // Update IdealKit memory and control from graphKit.
563       __ sync_kit(kit);
564 
565       Node* one = __ ConI(1);
566       // is_instof == 0 if base_oop == NULL
567       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
568 
569         // Update graphKit from IdeakKit.
570         kit->sync_kit(ideal);
571 
572         // Use the pre-barrier to record the value in the referent field
573         pre_barrier(kit, false /* do_load */,
574                     __ ctrl(),
575                     NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
576                     pre_val /* pre_val */,
577                     T_OBJECT);
578         if (need_mem_bar) {
579           // Add memory barrier to prevent commoning reads from this field
580           // across safepoint since GC can change its value.
581           kit->insert_mem_bar(Op_MemBarCPUOrder);
582         }
583         // Update IdealKit from graphKit.
584         __ sync_kit(kit);
585 
586       } __ end_if(); // _ref_type != ref_none
587   } __ end_if(); // offset == referent_offset
588 
589   // Final sync IdealKit and GraphKit.
590   kit->final_sync(ideal);
591 }
592 
593 #undef __
594 
595 Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
596   DecoratorSet decorators = access.decorators();
597   Node* adr = access.addr().node();
598   Node* obj = access.base();
599 
600   bool mismatched = (decorators & C2_MISMATCHED) != 0;
601   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
602   bool in_heap = (decorators & IN_HEAP) != 0;
603   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
604   bool is_unordered = (decorators & MO_UNORDERED) != 0;
605   bool need_cpu_mem_bar = !is_unordered || mismatched || !in_heap;
606 
607   Node* top = Compile::current()->top();
608   Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
609   Node* load = CardTableBarrierSetC2::load_at_resolved(access, val_type);
610 
611   // If we are reading the value of the referent field of a Reference
612   // object (either by using Unsafe directly or through reflection)
613   // then, if G1 is enabled, we need to record the referent in an
614   // SATB log buffer using the pre-barrier mechanism.
615   // Also we need to add memory barrier to prevent commoning reads
616   // from this field across safepoint since GC can change its value.
617   bool need_read_barrier = in_heap && (on_weak ||
618                                        (unknown && offset != top && obj != top));
619 
620   if (!access.is_oop() || !need_read_barrier) {
621     return load;
622   }
623 
624   assert(access.is_parse_access(), "entry not supported at optimization time");
625   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
626   GraphKit* kit = parse_access.kit();
627 
628   if (on_weak) {
629     // Use the pre-barrier to record the value in the referent field
630     pre_barrier(kit, false /* do_load */,
631                 kit->control(),
632                 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
633                 load /* pre_val */, T_OBJECT);
634     // Add memory barrier to prevent commoning reads from this field
635     // across safepoint since GC can change its value.
636     kit->insert_mem_bar(Op_MemBarCPUOrder);
637   } else if (unknown) {
638     // We do not require a mem bar inside pre_barrier if need_mem_bar
639     // is set: the barriers would be emitted by us.
640     insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
641   }
642 
643   return load;
644 }
645 
646 bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const {
647   if (CardTableBarrierSetC2::is_gc_barrier_node(node)) {
648     return true;
649   }
650   if (node->Opcode() != Op_CallLeaf) {
651     return false;
652   }
653   CallLeafNode *call = node->as_CallLeaf();
654   if (call->_name == NULL) {
655     return false;
656   }
657 
658   return strcmp(call->_name, "write_ref_field_pre_entry") == 0 || strcmp(call->_name, "write_ref_field_post_entry") == 0;
659 }
660 
661 void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
662   assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
663   assert(node->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
664   // It could be only one user, URShift node, in Object.clone() intrinsic
665   // but the new allocation is passed to arraycopy stub and it could not
666   // be scalar replaced. So we don't check the case.
667 
668   // An other case of only one user (Xor) is when the value check for NULL
669   // in G1 post barrier is folded after CCP so the code which used URShift
670   // is removed.
671 
672   // Take Region node before eliminating post barrier since it also
673   // eliminates CastP2X node when it has only one user.
674   Node* this_region = node->in(0);
675   assert(this_region != NULL, "");
676 
677   // Remove G1 post barrier.
678 
679   // Search for CastP2X->Xor->URShift->Cmp path which
680   // checks if the store done to a different from the value's region.
681   // And replace Cmp with #0 (false) to collapse G1 post barrier.
682   Node* xorx = node->find_out_with(Op_XorX);
683   if (xorx != NULL) {
684     Node* shift = xorx->unique_out();
685     Node* cmpx = shift->unique_out();
686     assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
687     cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
688     "missing region check in G1 post barrier");
689     macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
690 
691     // Remove G1 pre barrier.
692 
693     // Search "if (marking != 0)" check and set it to "false".
694     // There is no G1 pre barrier if previous stored value is NULL
695     // (for example, after initialization).
696     if (this_region->is_Region() && this_region->req() == 3) {
697       int ind = 1;
698       if (!this_region->in(ind)->is_IfFalse()) {
699         ind = 2;
700       }
701       if (this_region->in(ind)->is_IfFalse() &&
702           this_region->in(ind)->in(0)->Opcode() == Op_If) {
703         Node* bol = this_region->in(ind)->in(0)->in(1);
704         assert(bol->is_Bool(), "");
705         cmpx = bol->in(1);
706         if (bol->as_Bool()->_test._test == BoolTest::ne &&
707             cmpx->is_Cmp() && cmpx->in(2) == macro->intcon(0) &&
708             cmpx->in(1)->is_Load()) {
709           Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
710           const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
711           if (adr->is_AddP() && adr->in(AddPNode::Base) == macro->top() &&
712               adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
713               adr->in(AddPNode::Offset) == macro->MakeConX(marking_offset)) {
714             macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
715           }
716         }
717       }
718     }
719   } else {
720     assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
721     // This is a G1 post barrier emitted by the Object.clone() intrinsic.
722     // Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card
723     // is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier.
724     Node* shift = node->find_out_with(Op_URShiftX);
725     assert(shift != NULL, "missing G1 post barrier");
726     Node* addp = shift->unique_out();
727     Node* load = addp->find_out_with(Op_LoadB);
728     assert(load != NULL, "missing G1 post barrier");
729     Node* cmpx = load->unique_out();
730     assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
731            cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
732            "missing card value check in G1 post barrier");
733     macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
734     // There is no G1 pre barrier in this case
735   }
736   // Now CastP2X can be removed since it is used only on dead path
737   // which currently still alive until igvn optimize it.
738   assert(node->outcnt() == 0 || node->unique_out()->Opcode() == Op_URShiftX, "");
739   macro->replace_node(node, macro->top());
740 }
741 
742 Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const {
743   if (!use_ReduceInitialCardMarks() &&
744       c != NULL && c->is_Region() && c->req() == 3) {
745     for (uint i = 1; i < c->req(); i++) {
746       if (c->in(i) != NULL && c->in(i)->is_Region() &&
747           c->in(i)->req() == 3) {
748         Node* r = c->in(i);
749         for (uint j = 1; j < r->req(); j++) {
750           if (r->in(j) != NULL && r->in(j)->is_Proj() &&
751               r->in(j)->in(0) != NULL &&
752               r->in(j)->in(0)->Opcode() == Op_CallLeaf &&
753               r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)) {
754             Node* call = r->in(j)->in(0);
755             c = c->in(i == 1 ? 2 : 1);
756             if (c != NULL) {
757               c = c->in(0);
758               if (c != NULL) {
759                 c = c->in(0);
760                 assert(call->in(0) == NULL ||
761                        call->in(0)->in(0) == NULL ||
762                        call->in(0)->in(0)->in(0) == NULL ||
763                        call->in(0)->in(0)->in(0)->in(0) == NULL ||
764                        call->in(0)->in(0)->in(0)->in(0)->in(0) == NULL ||
765                        c == call->in(0)->in(0)->in(0)->in(0)->in(0), "bad barrier shape");
766                 return c;
767               }
768             }
769           }
770         }
771       }
772     }
773   }
774   return c;
775 }
776 
777 #ifdef ASSERT
778 void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
779   if (phase != BarrierSetC2::BeforeCodeGen) {
780     return;
781   }
782   // Verify G1 pre-barriers
783   const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
784 
785   ResourceArea *area = Thread::current()->resource_area();
786   Unique_Node_List visited(area);
787   Node_List worklist(area);
788   // We're going to walk control flow backwards starting from the Root
789   worklist.push(compile->root());
790   while (worklist.size() > 0) {
791     Node* x = worklist.pop();
792     if (x == NULL || x == compile->top()) continue;
793     if (visited.member(x)) {
794       continue;
795     } else {
796       visited.push(x);
797     }
798 
799     if (x->is_Region()) {
800       for (uint i = 1; i < x->req(); i++) {
801         worklist.push(x->in(i));
802       }
803     } else {
804       worklist.push(x->in(0));
805       // We are looking for the pattern:
806       //                            /->ThreadLocal
807       // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
808       //              \->ConI(0)
809       // We want to verify that the If and the LoadB have the same control
810       // See GraphKit::g1_write_barrier_pre()
811       if (x->is_If()) {
812         IfNode *iff = x->as_If();
813         if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
814           CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
815           if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
816               && cmp->in(1)->is_Load()) {
817             LoadNode* load = cmp->in(1)->as_Load();
818             if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
819                 && load->in(2)->in(3)->is_Con()
820                 && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
821 
822               Node* if_ctrl = iff->in(0);
823               Node* load_ctrl = load->in(0);
824 
825               if (if_ctrl != load_ctrl) {
826                 // Skip possible CProj->NeverBranch in infinite loops
827                 if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
828                     && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {
829                   if_ctrl = if_ctrl->in(0)->in(0);
830                 }
831               }
832               assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
833             }
834           }
835         }
836       }
837     }
838   }
839 }
840 #endif
841 
842 bool G1BarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
843   if (opcode == Op_StoreP) {
844     Node* adr = n->in(MemNode::Address);
845     const Type* adr_type = gvn->type(adr);
846     // Pointer stores in G1 barriers looks like unsafe access.
847     // Ignore such stores to be able scalar replace non-escaping
848     // allocations.
849     if (adr_type->isa_rawptr() && adr->is_AddP()) {
850       Node* base = conn_graph->get_addp_base(adr);
851       if (base->Opcode() == Op_LoadP &&
852           base->in(MemNode::Address)->is_AddP()) {
853         adr = base->in(MemNode::Address);
854         Node* tls = conn_graph->get_addp_base(adr);
855         if (tls->Opcode() == Op_ThreadLocal) {
856           int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
857           const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
858           if (offs == buf_offset) {
859             return true; // G1 pre barrier previous oop value store.
860           }
861           if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
862             return true; // G1 post barrier card address store.
863           }
864         }
865       }
866     }
867   }
868   return false;
869 }