< prev index next >

src/share/vm/opto/graphKit.cpp

Print this page
rev 8303 : 8079315: UseCondCardMark broken in conjunction with CMS precleaning
Summary: Insert StoreLoad barriers in CondCardMark sequence
Reviewed-by: kvn


3786   assert(adr != NULL, "");
3787 
3788   IdealKit ideal(this, true);
3789 
3790   // Convert the pointer to an int prior to doing math on it
3791   Node* cast = __ CastPX(__ ctrl(), adr);
3792 
3793   // Divide by card size
3794   assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
3795          "Only one we handle so far.");
3796   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3797 
3798   // Combine card table base and card offset
3799   Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3800 
3801   // Get the alias_index for raw card-mark memory
3802   int adr_type = Compile::AliasIdxRaw;
3803   Node*   zero = __ ConI(0); // Dirty card value
3804   BasicType bt = T_BYTE;
3805 





3806   if (UseCondCardMark) {
3807     // The classic GC reference write barrier is typically implemented
3808     // as a store into the global card mark table.  Unfortunately
3809     // unconditional stores can result in false sharing and excessive
3810     // coherence traffic as well as false transactional aborts.
3811     // UseCondCardMark enables MP "polite" conditional card mark
3812     // stores.  In theory we could relax the load from ctrl() to
3813     // no_ctrl, but that doesn't buy much latitude.
3814     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3815     __ if_then(card_val, BoolTest::ne, zero);
3816   }
3817 
3818   // Smash zero into card
3819   if( !UseConcMarkSweepGC ) {
3820     __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::unordered);
3821   } else {
3822     // Specialized path for CM store barrier
3823     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3824   }
3825 




3786   assert(adr != NULL, "");
3787 
3788   IdealKit ideal(this, true);
3789 
3790   // Convert the pointer to an int prior to doing math on it
3791   Node* cast = __ CastPX(__ ctrl(), adr);
3792 
3793   // Divide by card size
3794   assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
3795          "Only one we handle so far.");
3796   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3797 
3798   // Combine card table base and card offset
3799   Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3800 
3801   // Get the alias_index for raw card-mark memory
3802   int adr_type = Compile::AliasIdxRaw;
3803   Node*   zero = __ ConI(0); // Dirty card value
3804   BasicType bt = T_BYTE;
3805 
3806   if (UseConcMarkSweepGC && UseCondCardMark) {
3807     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
3808     __ sync_kit(this);
3809   }
3810 
3811   if (UseCondCardMark) {
3812     // The classic GC reference write barrier is typically implemented
3813     // as a store into the global card mark table.  Unfortunately
3814     // unconditional stores can result in false sharing and excessive
3815     // coherence traffic as well as false transactional aborts.
3816     // UseCondCardMark enables MP "polite" conditional card mark
3817     // stores.  In theory we could relax the load from ctrl() to
3818     // no_ctrl, but that doesn't buy much latitude.
3819     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3820     __ if_then(card_val, BoolTest::ne, zero);
3821   }
3822 
3823   // Smash zero into card
3824   if( !UseConcMarkSweepGC ) {
3825     __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::unordered);
3826   } else {
3827     // Specialized path for CM store barrier
3828     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3829   }
3830 


< prev index next >