< prev index next >

src/share/vm/opto/graphKit.cpp

Print this page




3772   assert(adr != NULL, "");
3773 
3774   IdealKit ideal(this, true);
3775 
3776   // Convert the pointer to an int prior to doing math on it
3777   Node* cast = __ CastPX(__ ctrl(), adr);
3778 
3779   // Divide by card size
3780   assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
3781          "Only one we handle so far.");
3782   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3783 
3784   // Combine card table base and card offset
3785   Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3786 
3787   // Get the alias_index for raw card-mark memory
3788   int adr_type = Compile::AliasIdxRaw;
3789   Node*   zero = __ ConI(0); // Dirty card value
3790   BasicType bt = T_BYTE;
3791 



3792   if (UseCondCardMark) {
3793     // The classic GC reference write barrier is typically implemented
3794     // as a store into the global card mark table.  Unfortunately
3795     // unconditional stores can result in false sharing and excessive
3796     // coherence traffic as well as false transactional aborts.
3797     // UseCondCardMark enables MP "polite" conditional card mark
3798     // stores.  In theory we could relax the load from ctrl() to
3799     // no_ctrl, but that doesn't buy much latitude.
3800     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3801     __ if_then(card_val, BoolTest::ne, zero);
3802   }
3803 
3804   // Smash zero into card
3805   if( !UseConcMarkSweepGC ) {
3806     __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release);
3807   } else {
3808     // Specialized path for CM store barrier
3809     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3810   }
3811 




3772   assert(adr != NULL, "");
3773 
3774   IdealKit ideal(this, true);
3775 
3776   // Convert the pointer to an int prior to doing math on it
3777   Node* cast = __ CastPX(__ ctrl(), adr);
3778 
3779   // Divide by card size
3780   assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
3781          "Only one we handle so far.");
3782   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3783 
3784   // Combine card table base and card offset
3785   Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3786 
3787   // Get the alias_index for raw card-mark memory
3788   int adr_type = Compile::AliasIdxRaw;
3789   Node*   zero = __ ConI(0); // Dirty card value
3790   BasicType bt = T_BYTE;
3791 
3792   insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
3793   __ sync_kit(this);
3794   
3795   if (UseCondCardMark) {
3796     // The classic GC reference write barrier is typically implemented
3797     // as a store into the global card mark table.  Unfortunately
3798     // unconditional stores can result in false sharing and excessive
3799     // coherence traffic as well as false transactional aborts.
3800     // UseCondCardMark enables MP "polite" conditional card mark
3801     // stores.  In theory we could relax the load from ctrl() to
3802     // no_ctrl, but that doesn't buy much latitude.
3803     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3804     __ if_then(card_val, BoolTest::ne, zero);
3805   }
3806 
3807   // Smash zero into card
3808   if( !UseConcMarkSweepGC ) {
3809     __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release);
3810   } else {
3811     // Specialized path for CM store barrier
3812     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3813   }
3814 


< prev index next >