< prev index next >

src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp

Print this page




  88   // (Else it's an array (or unknown), and we want more precise card marks.)
  89   assert(adr != NULL, "");
  90 
  91   IdealKit ideal(kit, true);
  92 
  93   // Convert the pointer to an int prior to doing math on it
  94   Node* cast = __ CastPX(__ ctrl(), adr);
  95 
  96   // Divide by card size
  97   Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
  98 
  99   // Combine card table base and card offset
 100   Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset );
 101 
 102   // Get the alias_index for raw card-mark memory
 103   int adr_type = Compile::AliasIdxRaw;
 104   Node*   zero = __ ConI(0); // Dirty card value
 105 
 106   if (UseCondCardMark) {
 107     if (ct->scanned_concurrently()) {
 108       kit->insert_mem_bar(Op_MemBarVolatile, oop_store);
 109       __ sync_kit(kit);
 110     }
 111     // The classic GC reference write barrier is typically implemented
 112     // as a store into the global card mark table.  Unfortunately
 113     // unconditional stores can result in false sharing and excessive
 114     // coherence traffic as well as false transactional aborts.
 115     // UseCondCardMark enables MP "polite" conditional card mark
 116     // stores.  In theory we could relax the load from ctrl() to
 117     // no_ctrl, but that doesn't buy much latitude.
 118     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
 119     __ if_then(card_val, BoolTest::ne, zero);
 120   }
 121 
 122   // Smash zero into card
 123   if(!ct->scanned_concurrently()) {
 124     __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
 125   } else {
 126     // Specialized path for CM store barrier
 127     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, T_BYTE, adr_type);
 128   }




  88   // (Else it's an array (or unknown), and we want more precise card marks.)
  89   assert(adr != NULL, "");
  90 
  91   IdealKit ideal(kit, true);
  92 
  93   // Convert the pointer to an int prior to doing math on it
  94   Node* cast = __ CastPX(__ ctrl(), adr);
  95 
  96   // Divide by card size
  97   Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
  98 
  99   // Combine card table base and card offset
 100   Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset );
 101 
 102   // Get the alias_index for raw card-mark memory
 103   int adr_type = Compile::AliasIdxRaw;
 104   Node*   zero = __ ConI(0); // Dirty card value
 105 
 106   if (UseCondCardMark) {
 107     if (ct->scanned_concurrently()) {
 108       kit->insert_store_load_for_barrier();
 109       __ sync_kit(kit);
 110     }
 111     // The classic GC reference write barrier is typically implemented
 112     // as a store into the global card mark table.  Unfortunately
 113     // unconditional stores can result in false sharing and excessive
 114     // coherence traffic as well as false transactional aborts.
 115     // UseCondCardMark enables MP "polite" conditional card mark
 116     // stores.  In theory we could relax the load from ctrl() to
 117     // no_ctrl, but that doesn't buy much latitude.
 118     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
 119     __ if_then(card_val, BoolTest::ne, zero);
 120   }
 121 
 122   // Smash zero into card
 123   if(!ct->scanned_concurrently()) {
 124     __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
 125   } else {
 126     // Specialized path for CM store barrier
 127     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, T_BYTE, adr_type);
 128   }


< prev index next >