src/share/vm/opto/graphKit.cpp

Print this page




3810     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3811     __ if_then(card_val, BoolTest::ne, zero);
3812   }
3813 
3814   // Smash zero into card
3815   if( !UseConcMarkSweepGC ) {
3816     __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release);
3817   } else {
3818     // Specialized path for CM store barrier
3819     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3820   }
3821 
3822   if (UseCondCardMark) {
3823     __ end_if();
3824   }
3825 
3826   // Final sync IdealKit and GraphKit.
3827   final_sync(ideal);
3828 }
3829 












































































































3830 // G1 pre/post barriers
3831 void GraphKit::g1_write_barrier_pre(bool do_load,
3832                                     Node* obj,
3833                                     Node* adr,
3834                                     uint alias_idx,
3835                                     Node* val,
3836                                     const TypeOopPtr* val_type,
3837                                     Node* pre_val,
3838                                     BasicType bt) {
3839 
3840   // Some sanity checks
3841   // Note: val is unused in this routine.
3842 
3843   if (do_load) {
3844     // We need to generate the load of the previous value
3845     assert(obj != NULL, "must have a base");
3846     assert(adr != NULL, "where are loading from?");
3847     assert(pre_val == NULL, "loaded already?");
3848     assert(val_type != NULL, "need a type");






3849   } else {
3850     // In this case both val_type and alias_idx are unused.
3851     assert(pre_val != NULL, "must be loaded already");
3852     // Nothing to be done if pre_val is null.
3853     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
3854     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
3855   }
3856   assert(bt == T_OBJECT, "or we shouldn't be here");
3857 
3858   IdealKit ideal(this, true);
3859 
3860   Node* tls = __ thread(); // ThreadLocalStorage
3861 
3862   Node* no_ctrl = NULL;
3863   Node* no_base = __ top();
3864   Node* zero  = __ ConI(0);
3865   Node* zeroX = __ ConX(0);
3866 
3867   float likely  = PROB_LIKELY(0.999);
3868   float unlikely  = PROB_UNLIKELY(0.999);


3910 
3911         // Now get the buffer location we will log the previous value into and store it
3912         Node *log_addr = __ AddP(no_base, buffer, next_index);
3913         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
3914         // update the index
3915         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
3916 
3917       } __ else_(); {
3918 
3919         // logging buffer is full, call the runtime
3920         const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3921         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
3922       } __ end_if();  // (!index)
3923     } __ end_if();  // (pre_val != NULL)
3924   } __ end_if();  // (!marking)
3925 
3926   // Final sync IdealKit and GraphKit.
3927   final_sync(ideal);
3928 }
3929 











































































3930 //
3931 // Update the card table and add card address to the queue
3932 //
3933 void GraphKit::g1_mark_card(IdealKit& ideal,
3934                             Node* card_adr,
3935                             Node* oop_store,
3936                             uint oop_alias_idx,
3937                             Node* index,
3938                             Node* index_adr,
3939                             Node* buffer,
3940                             const TypeFunc* tf) {
3941 
3942   Node* zero  = __ ConI(0);
3943   Node* zeroX = __ ConX(0);
3944   Node* no_base = __ top();
3945   BasicType card_bt = T_BYTE;
3946   // Smash zero into card. MUST BE ORDERED WRT TO STORE
3947   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
3948 
3949   //  Now do the queue work


3962 
3963 }
3964 
3965 void GraphKit::g1_write_barrier_post(Node* oop_store,
3966                                      Node* obj,
3967                                      Node* adr,
3968                                      uint alias_idx,
3969                                      Node* val,
3970                                      BasicType bt,
3971                                      bool use_precise) {
3972   // If we are writing a NULL then we need no post barrier
3973 
3974   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
3975     // Must be NULL
3976     const Type* t = val->bottom_type();
3977     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
3978     // No post barrier if writing NULLx
3979     return;
3980   }
3981 














3982   if (!use_precise) {
3983     // All card marks for a (non-array) instance are in one place:
3984     adr = obj;
3985   }
3986   // (Else it's an array (or unknown), and we want more precise card marks.)
3987   assert(adr != NULL, "");
3988 
3989   IdealKit ideal(this, true);
3990 
3991   Node* tls = __ thread(); // ThreadLocalStorage
3992 
3993   Node* no_base = __ top();
3994   float likely  = PROB_LIKELY(0.999);
3995   float unlikely  = PROB_UNLIKELY(0.999);
3996   Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
3997   Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
3998   Node* zeroX = __ ConX(0);
3999 
4000   // Get the alias_index for raw card-mark memory
4001   const TypePtr* card_type = TypeRawPtr::BOTTOM;




3810     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3811     __ if_then(card_val, BoolTest::ne, zero);
3812   }
3813 
3814   // Smash zero into card
3815   if( !UseConcMarkSweepGC ) {
3816     __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release);
3817   } else {
3818     // Specialized path for CM store barrier
3819     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3820   }
3821 
3822   if (UseCondCardMark) {
3823     __ end_if();
3824   }
3825 
3826   // Final sync IdealKit and GraphKit.
3827   final_sync(ideal);
3828 }
3829 
3830 /*
3831  * Determine if the G1 pre-barrier can be removed. The pre-barrier is 
3832  * required by SATB to make sure all objects live at the start of the 
3833  * marking are kept alive, all reference updates need to any previous 
3834  * reference stored before writing.
3835  * 
3836  * If the previous value is NULL there is no need to save the old value.
3837  * References that are NULL are filtered during runtime by the barrier 
3838  * code to avoid unnecessary queuing.
3839  * 
3840  * However in the case of newly allocated objects it might be possible to 
3841  * prove that the reference about to be overwritten is NULL during compile 
3842  * time and avoid adding the barrier code completely.
3843  * 
3844  * The compiler needs to determine that the object in which a field is about 
3845  * to be written is newly allocated, and that no prior store to the same field 
3846  * has happened since the allocation.
3847  * 
3848  * Returns true iff the pre-barrier can be removed
3849  */
3850 bool GraphKit::g1_can_remove_pre_barrier(PhaseTransform* phase, Node* adr, 
3851                                          BasicType bt, uint adr_idx) {
3852   intptr_t      offset = 0;
3853   Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
3854   AllocateNode* alloc  = AllocateNode::Ideal_allocation(base, phase);
3855 
3856   if (offset == Type::OffsetBot) {
3857     return false; // cannot unalias unless there are precise offsets
3858   }
3859   
3860   if (alloc == NULL) {
3861     return false; // No allocation found
3862   }
3863           
3864   intptr_t size_in_bytes = type2aelembytes(bt);
3865 
3866   Node* mem = memory(adr_idx);   // start searching here...
3867   
3868   for (int cnt = 0; cnt < 50; cnt++) {
3869 
3870     if (mem->is_Store()) {
3871 
3872       Node* st_adr = mem->in(MemNode::Address);
3873       intptr_t st_offset = 0;
3874       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
3875 
3876       if (st_base == NULL) {
3877         break; // inscrutable pointer
3878       }
3879 
3880       // Break we have found a store with same base and offset as ours so break
3881       if (st_base == base && st_offset == offset) {
3882         break;
3883       }
3884 
3885       if (st_offset != offset && st_offset != Type::OffsetBot) {
3886         const int MAX_STORE = BytesPerLong;
3887         if (st_offset >= offset + size_in_bytes ||
3888             st_offset <= offset - MAX_STORE ||
3889             st_offset <= offset - mem->as_Store()->memory_size()) {
3890           // Success:  The offsets are provably independent.
3891           // (You may ask, why not just test st_offset != offset and be done?
3892           // The answer is that stores of different sizes can co-exist
3893           // in the same sequence of RawMem effects.  We sometimes initialize
3894           // a whole 'tile' of array elements with a single jint or jlong.)
3895           mem = mem->in(MemNode::Memory);
3896           continue; // advance through independent store memory
3897         }
3898       }
3899 
3900       if (st_base != base &&
3901           MemNode::detect_ptr_independence(base, alloc, st_base,
3902                                   AllocateNode::Ideal_allocation(st_base, phase),
3903                                   phase)) {
3904         // Success:  The bases are provably independent.
3905         mem = mem->in(MemNode::Memory);
3906         continue; // advance through independent store memory
3907       }
3908     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
3909 
3910       InitializeNode* st_init = mem->in(0)->as_Initialize();
3911       AllocateNode*  st_alloc = st_init->allocation();
3912       
3913       if (st_alloc == NULL) {
3914         break; // something degenerated
3915       }
3916       
3917       // Make sure that we are looking at the same allocation site
3918       if (alloc == st_alloc) {
3919         // Check that the initialization is storing NULL so that no previous store 
3920         // has been moved up and directly write a reference
3921         Node* captured_store = st_init->find_captured_store(offset, type2aelembytes(T_OBJECT), phase);
3922         if (captured_store != NULL && captured_store != st_init->zero_memory()) {
3923           return false;
3924         }
3925         
3926         return true;
3927       }
3928     }
3929 
3930     // Unless there is an explicit 'continue', we must bail out here,
3931     // because 'mem' is an inscrutable memory state (e.g., a call).
3932     break;
3933   }
3934 
3935   return false;
3936 }
3937 
3938 // G1 pre/post barriers
3939 void GraphKit::g1_write_barrier_pre(bool do_load,
3940                                     Node* obj,
3941                                     Node* adr,
3942                                     uint alias_idx,
3943                                     Node* val,
3944                                     const TypeOopPtr* val_type,
3945                                     Node* pre_val,
3946                                     BasicType bt) {
3947 
3948   // Some sanity checks
3949   // Note: val is unused in this routine.
3950 
3951   if (do_load) {
3952     // We need to generate the load of the previous value
3953     assert(obj != NULL, "must have a base");
3954     assert(adr != NULL, "where are loading from?");
3955     assert(pre_val == NULL, "loaded already?");
3956     assert(val_type != NULL, "need a type");
3957 
3958     if (use_ReduceInitialCardMarks() && 
3959             g1_can_remove_pre_barrier(&_gvn, adr, bt, alias_idx)) {
3960       return;
3961     }
3962   
3963   } else {
3964     // In this case both val_type and alias_idx are unused.
3965     assert(pre_val != NULL, "must be loaded already");
3966     // Nothing to be done if pre_val is null.
3967     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
3968     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
3969   }
3970   assert(bt == T_OBJECT, "or we shouldn't be here");
3971 
3972   IdealKit ideal(this, true);
3973 
3974   Node* tls = __ thread(); // ThreadLocalStorage
3975 
3976   Node* no_ctrl = NULL;
3977   Node* no_base = __ top();
3978   Node* zero  = __ ConI(0);
3979   Node* zeroX = __ ConX(0);
3980 
3981   float likely  = PROB_LIKELY(0.999);
3982   float unlikely  = PROB_UNLIKELY(0.999);


4024 
4025         // Now get the buffer location we will log the previous value into and store it
4026         Node *log_addr = __ AddP(no_base, buffer, next_index);
4027         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
4028         // update the index
4029         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
4030 
4031       } __ else_(); {
4032 
4033         // logging buffer is full, call the runtime
4034         const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
4035         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
4036       } __ end_if();  // (!index)
4037     } __ end_if();  // (pre_val != NULL)
4038   } __ end_if();  // (!marking)
4039 
4040   // Final sync IdealKit and GraphKit.
4041   final_sync(ideal);
4042 }
4043 
4044 /*
4045  * G1 similar to any GC with a Young Generation requires a way to keep track of 
4046  * references from Old Generation to Young Generation to make sure all live 
4047  * objects are found. G1 also requires to keep track of object references 
4048  * between different regions to enable evacuation of old regions, which is done 
4049  * as part of mixed collections. References are tracked in remembered sets and 
4050  * is continuously updated as reference are written to with the help of the 
4051  * post-barrier.
4052  * 
4053  * To reduce the number of updates to the remembered set the post-barrier
4054  * filters updates to fields in objects located in the Young Generation, 
4055  * the same region as the reference, when the NULL is being written or
4056  * if the card is already marked as dirty by an earlier write.
4057  * 
4058  * Under certain circumstances it is possible to avoid generating the 
4059  * post-barrier completely if it is possible during compile time to prove 
4060  * the object is newly allocated and that no safepoint exists between the 
4061  * allocation and the store.
4062  * 
4063  * In the case of slow allocation the allocation code must handle the barrier 
4064  * as part of the allocation in the case the allocated object is not located 
4065  * in the nursery, this would happen for humongous objects. This is similar to 
4066  * how CMS is required to handle this case, see the comments for the method 
4067  * CollectedHeap::new_store_pre_barrier and OptoRuntime::new_store_pre_barrier. 
4068  * A deferred card mark is required for these objects and handled in the above 
4069  * mentioned methods.
4070  * 
4071  * Returns true iff the post barrier can be removed
4072  */
4073 bool GraphKit::g1_can_remove_post_barrier(PhaseTransform* phase, Node* store,
4074                                           Node* adr) {
4075   intptr_t      offset = 0;
4076   Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
4077   AllocateNode* alloc  = AllocateNode::Ideal_allocation(base, phase);
4078 
4079   if (offset == Type::OffsetBot) {
4080     return false; // cannot unalias unless there are precise offsets
4081   }
4082   
4083   if (alloc == NULL) {
4084      return false; // No allocation found
4085   }
4086 
4087   // Start search from Store node
4088   Node* mem = store;
4089   for (int cnt = 0; cnt < 50; cnt++) { // While we can dance past unrelated stores...
4090 
4091     if (mem->is_Store()) {
4092       // We can walk through all stores as we don't care about the previous value
4093       // Walk through the Control instead of the Memory to make sure we detect 
4094       // if there is another allocation between the store and the allocation 
4095       // we are looking for as this would imply a safepoint.
4096       mem = mem->in(MemNode::Control);
4097       continue;
4098     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
4099 
4100       InitializeNode* st_init = mem->in(0)->as_Initialize();
4101       AllocateNode*  st_alloc = st_init->allocation();
4102       if (st_alloc == NULL) {
4103         break; // something degenerated
4104       }
4105       // Make sure we are looking at the same allocation
4106       if (alloc == st_alloc) {
4107         return true;
4108       }
4109     }
4110 
4111     // Unless there is an explicit 'continue', we must bail out here,
4112     // because 'mem' is an inscrutable memory state (e.g., a call).
4113     break;
4114   }
4115 
4116   return false;
4117 }
4118 
4119 //
4120 // Update the card table and add card address to the queue
4121 //
4122 void GraphKit::g1_mark_card(IdealKit& ideal,
4123                             Node* card_adr,
4124                             Node* oop_store,
4125                             uint oop_alias_idx,
4126                             Node* index,
4127                             Node* index_adr,
4128                             Node* buffer,
4129                             const TypeFunc* tf) {
4130 
4131   Node* zero  = __ ConI(0);
4132   Node* zeroX = __ ConX(0);
4133   Node* no_base = __ top();
4134   BasicType card_bt = T_BYTE;
4135   // Smash zero into card. MUST BE ORDERED WRT TO STORE
4136   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
4137 
4138   //  Now do the queue work


4151 
4152 }
4153 
4154 void GraphKit::g1_write_barrier_post(Node* oop_store,
4155                                      Node* obj,
4156                                      Node* adr,
4157                                      uint alias_idx,
4158                                      Node* val,
4159                                      BasicType bt,
4160                                      bool use_precise) {
4161   // If we are writing a NULL then we need no post barrier
4162 
4163   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
4164     // Must be NULL
4165     const Type* t = val->bottom_type();
4166     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
4167     // No post barrier if writing NULLx
4168     return;
4169   }
4170 
4171   if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) {
4172     // We can skip marks on a freshly-allocated object in Eden.
4173     // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
4174     // That routine informs GC to take appropriate compensating steps,
4175     // upon a slow-path allocation, so as to make this card-mark
4176     // elision safe.
4177     return;
4178   }
4179 
4180   if (use_ReduceInitialCardMarks() && 
4181           g1_can_remove_post_barrier(&_gvn, oop_store, adr)) {
4182     return;
4183   }
4184   
4185   if (!use_precise) {
4186     // All card marks for a (non-array) instance are in one place:
4187     adr = obj;
4188   }
4189   // (Else it's an array (or unknown), and we want more precise card marks.)
4190   assert(adr != NULL, "");
4191 
4192   IdealKit ideal(this, true);
4193 
4194   Node* tls = __ thread(); // ThreadLocalStorage
4195 
4196   Node* no_base = __ top();
4197   float likely  = PROB_LIKELY(0.999);
4198   float unlikely  = PROB_UNLIKELY(0.999);
4199   Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
4200   Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
4201   Node* zeroX = __ ConX(0);
4202 
4203   // Get the alias_index for raw card-mark memory
4204   const TypePtr* card_type = TypeRawPtr::BOTTOM;