< prev index next >

src/share/vm/gc/g1/heapRegionRemSet.cpp

Print this page
rev 9982 : 8147087: Race when reusing PerRegionTable bitmaps may result in dropped remembered set entries
Summary: Do not make reused PRTs available to other threads before the bitmap of the PRT has been cleared.
Contributed-by: Poonam Bajaj <poonam.bajaj@oracle.com>, Thomas Schatzl <thomas.schatzl@oracle.com>
rev 9983 : [mq]: 8147087-comments


  93     // If the test below fails, then this table was reused concurrently
  94     // with this operation.  This is OK, since the old table was coarsened,
  95     // and adding a bit to the new table is never incorrect.
  96     // If the table used to belong to a continues humongous region and is
  97     // now reused for the corresponding start humongous region, we need to
  98     // make sure that we detect this. Thus, we call is_in_reserved_raw()
  99     // instead of just is_in_reserved() here.
 100     if (loc_hr->is_in_reserved(from)) {
 101       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
 102       CardIdx_t from_card = (CardIdx_t)
 103           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
 104 
 105       assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
 106              "Must be in range.");
 107       add_card_work(from_card, par);
 108     }
 109   }
 110 
 111 public:
 112 
 113   HeapRegion* hr() const { return _hr; }


 114 
 115   jint occupied() const {
 116     // Overkill, but if we ever need it...
 117     // guarantee(_occupied == _bm.count_one_bits(), "Check");
 118     return _occupied;
 119   }
 120 
 121   void init(HeapRegion* hr, bool clear_links_to_all_list) {
 122     if (clear_links_to_all_list) {
 123       set_next(NULL);
 124       set_prev(NULL);
 125     }
 126     _collision_list_next = NULL;
 127     _occupied = 0;
 128     _bm.clear();
 129     // Make sure that the bitmap clearing above has been finished before publishing
 130     // this PRT to concurrent threads.
 131     OrderAccess::release_store_ptr(&_hr, hr);
 132   }
 133 




  93     // If the test below fails, then this table was reused concurrently
  94     // with this operation.  This is OK, since the old table was coarsened,
  95     // and adding a bit to the new table is never incorrect.
  96     // If the table used to belong to a continues humongous region and is
  97     // now reused for the corresponding start humongous region, we need to
  98     // make sure that we detect this. Thus, we call is_in_reserved_raw()
  99     // instead of just is_in_reserved() here.
 100     if (loc_hr->is_in_reserved(from)) {
 101       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
 102       CardIdx_t from_card = (CardIdx_t)
 103           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
 104 
 105       assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
 106              "Must be in range.");
 107       add_card_work(from_card, par);
 108     }
 109   }
 110 
 111 public:
 112 
 113   HeapRegion* hr() const { 
 114     return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
 115   }
 116 
 117   jint occupied() const {
 118     // Overkill, but if we ever need it...
 119     // guarantee(_occupied == _bm.count_one_bits(), "Check");
 120     return _occupied;
 121   }
 122 
 123   void init(HeapRegion* hr, bool clear_links_to_all_list) {
 124     if (clear_links_to_all_list) {
 125       set_next(NULL);
 126       set_prev(NULL);
 127     }
 128     _collision_list_next = NULL;
 129     _occupied = 0;
 130     _bm.clear();
 131     // Make sure that the bitmap clearing above has been finished before publishing
 132     // this PRT to concurrent threads.
 133     OrderAccess::release_store_ptr(&_hr, hr);
 134   }
 135 


< prev index next >