< prev index next >

src/hotspot/share/gc/g1/heapRegionRemSet.cpp

Print this page




  96     // If the test below fails, then this table was reused concurrently
  97     // with this operation.  This is OK, since the old table was coarsened,
  98     // and adding a bit to the new table is never incorrect.
  99     // If the table used to belong to a continues humongous region and is
 100     // now reused for the corresponding start humongous region, we need to
 101     // make sure that we detect this. Thus, we call is_in_reserved_raw()
 102     // instead of just is_in_reserved() here.
 103     if (loc_hr->is_in_reserved(from)) {
 104       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
 105       CardIdx_t from_card = (CardIdx_t)
 106           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
 107 
 108       assert((size_t)from_card < HeapRegion::CardsPerRegion,
 109              "Must be in range.");
 110       add_card_work(from_card, par);
 111     }
 112   }
 113 
 114 public:
 115 
 116   HeapRegion* hr() const {
 117     return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
 118   }
 119 
 120   jint occupied() const {
 121     // Overkill, but if we ever need it...
 122     // guarantee(_occupied == _bm.count_one_bits(), "Check");
 123     return _occupied;
 124   }
 125 
 126   void init(HeapRegion* hr, bool clear_links_to_all_list) {
 127     if (clear_links_to_all_list) {
 128       set_next(NULL);
 129       set_prev(NULL);
 130     }
 131     _collision_list_next = NULL;
 132     _occupied = 0;
 133     _bm.clear();
 134     // Make sure that the bitmap clearing above has been finished before publishing
 135     // this PRT to concurrent threads.
 136     OrderAccess::release_store_ptr(&_hr, hr);
 137   }
 138 
 139   void add_reference(OopOrNarrowOopStar from) {
 140     add_reference_work(from, /*parallel*/ true);
 141   }
 142 
 143   void seq_add_reference(OopOrNarrowOopStar from) {
 144     add_reference_work(from, /*parallel*/ false);
 145   }
 146 
 147   void scrub(G1CardLiveData* live_data) {
 148     live_data->remove_nonlive_cards(hr()->hrm_index(), &_bm);
 149     recount_occupied();
 150   }
 151 
 152   void add_card(CardIdx_t from_card_index) {
 153     add_card_work(from_card_index, /*parallel*/ true);
 154   }
 155 
 156   void seq_add_card(CardIdx_t from_card_index) {


 165 
 166   // Mem size in bytes.
 167   size_t mem_size() const {
 168     return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
 169   }
 170 
 171   // Requires "from" to be in "hr()".
 172   bool contains_reference(OopOrNarrowOopStar from) const {
 173     assert(hr()->is_in_reserved(from), "Precondition.");
 174     size_t card_ind = pointer_delta(from, hr()->bottom(),
 175                                     CardTableModRefBS::card_size);
 176     return _bm.at(card_ind);
 177   }
 178 
 179   // Bulk-free the PRTs from prt to last, assumes that they are
 180   // linked together using their _next field.
 181   static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
 182     while (true) {
 183       PerRegionTable* fl = _free_list;
 184       last->set_next(fl);
 185       PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
 186       if (res == fl) {
 187         return;
 188       }
 189     }
 190     ShouldNotReachHere();
 191   }
 192 
 193   static void free(PerRegionTable* prt) {
 194     bulk_free(prt, prt);
 195   }
 196 
 197   // Returns an initialized PerRegionTable instance.
 198   static PerRegionTable* alloc(HeapRegion* hr) {
 199     PerRegionTable* fl = _free_list;
 200     while (fl != NULL) {
 201       PerRegionTable* nxt = fl->next();
 202       PerRegionTable* res =
 203         (PerRegionTable*)
 204         Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
 205       if (res == fl) {
 206         fl->init(hr, true);
 207         return fl;
 208       } else {
 209         fl = _free_list;
 210       }
 211     }
 212     assert(fl == NULL, "Loop condition.");
 213     return new PerRegionTable(hr);
 214   }
 215 
 216   PerRegionTable* next() const { return _next; }
 217   void set_next(PerRegionTable* next) { _next = next; }
 218   PerRegionTable* prev() const { return _prev; }
 219   void set_prev(PerRegionTable* prev) { _prev = prev; }
 220 
 221   // Accessor and Modification routines for the pointer for the
 222   // singly linked collision list that links the PRTs within the
 223   // OtherRegionsTable::_fine_grain_regions hash table.
 224   //


 399       if (_n_fine_entries == _max_fine_entries) {
 400         prt = delete_region_table();
 401         // There is no need to clear the links to the 'all' list here:
 402         // prt will be reused immediately, i.e. remain in the 'all' list.
 403         prt->init(from_hr, false /* clear_links_to_all_list */);
 404       } else {
 405         prt = PerRegionTable::alloc(from_hr);
 406         link_to_all(prt);
 407       }
 408 
 409       PerRegionTable* first_prt = _fine_grain_regions[ind];
 410       prt->set_collision_list_next(first_prt);
 411       // The assignment into _fine_grain_regions allows the prt to
 412       // start being used concurrently. In addition to
 413       // collision_list_next which must be visible (else concurrent
 414       // parsing of the list, if any, may fail to see other entries),
 415       // the content of the prt must be visible (else for instance
 416       // some mark bits may not yet seem cleared or a 'later' update
 417       // performed by a concurrent thread could be undone when the
 418       // zeroing becomes visible). This requires store ordering.
 419       OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt);
 420       _n_fine_entries++;
 421 
 422       if (G1HRRSUseSparseTable) {
 423         // Transfer from sparse to fine-grain.
 424         SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
 425         assert(sprt_entry != NULL, "There should have been an entry");
 426         for (int i = 0; i < sprt_entry->num_valid_cards(); i++) {
 427           CardIdx_t c = sprt_entry->card(i);
 428           prt->add_card(c);
 429         }
 430         // Now we can delete the sparse entry.
 431         bool res = _sparse_table.delete_entry(from_hrm_ind);
 432         assert(res, "It should have been there.");
 433       }
 434     }
 435     assert(prt != NULL && prt->hr() == from_hr, "consequence");
 436   }
 437   // Note that we can't assert "prt->hr() == from_hr", because of the
 438   // possibility of concurrent reuse.  But see head comment of
 439   // OtherRegionsTable for why this is OK.




  96     // If the test below fails, then this table was reused concurrently
  97     // with this operation.  This is OK, since the old table was coarsened,
  98     // and adding a bit to the new table is never incorrect.
  99     // If the table used to belong to a continues humongous region and is
 100     // now reused for the corresponding start humongous region, we need to
 101     // make sure that we detect this. Thus, we call is_in_reserved_raw()
 102     // instead of just is_in_reserved() here.
 103     if (loc_hr->is_in_reserved(from)) {
 104       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
 105       CardIdx_t from_card = (CardIdx_t)
 106           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
 107 
 108       assert((size_t)from_card < HeapRegion::CardsPerRegion,
 109              "Must be in range.");
 110       add_card_work(from_card, par);
 111     }
 112   }
 113 
 114 public:
 115 
 116   HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); }


 117 
 118   jint occupied() const {
 119     // Overkill, but if we ever need it...
 120     // guarantee(_occupied == _bm.count_one_bits(), "Check");
 121     return _occupied;
 122   }
 123 
 124   void init(HeapRegion* hr, bool clear_links_to_all_list) {
 125     if (clear_links_to_all_list) {
 126       set_next(NULL);
 127       set_prev(NULL);
 128     }
 129     _collision_list_next = NULL;
 130     _occupied = 0;
 131     _bm.clear();
 132     // Make sure that the bitmap clearing above has been finished before publishing
 133     // this PRT to concurrent threads.
 134     OrderAccess::release_store(&_hr, hr);
 135   }
 136 
 137   void add_reference(OopOrNarrowOopStar from) {
 138     add_reference_work(from, /*parallel*/ true);
 139   }
 140 
 141   void seq_add_reference(OopOrNarrowOopStar from) {
 142     add_reference_work(from, /*parallel*/ false);
 143   }
 144 
 145   void scrub(G1CardLiveData* live_data) {
 146     live_data->remove_nonlive_cards(hr()->hrm_index(), &_bm);
 147     recount_occupied();
 148   }
 149 
 150   void add_card(CardIdx_t from_card_index) {
 151     add_card_work(from_card_index, /*parallel*/ true);
 152   }
 153 
 154   void seq_add_card(CardIdx_t from_card_index) {


 163 
 164   // Mem size in bytes.
 165   size_t mem_size() const {
 166     return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
 167   }
 168 
 169   // Requires "from" to be in "hr()".
 170   bool contains_reference(OopOrNarrowOopStar from) const {
 171     assert(hr()->is_in_reserved(from), "Precondition.");
 172     size_t card_ind = pointer_delta(from, hr()->bottom(),
 173                                     CardTableModRefBS::card_size);
 174     return _bm.at(card_ind);
 175   }
 176 
 177   // Bulk-free the PRTs from prt to last, assumes that they are
 178   // linked together using their _next field.
 179   static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
 180     while (true) {
 181       PerRegionTable* fl = _free_list;
 182       last->set_next(fl);
 183       PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl);
 184       if (res == fl) {
 185         return;
 186       }
 187     }
 188     ShouldNotReachHere();
 189   }
 190 
 191   static void free(PerRegionTable* prt) {
 192     bulk_free(prt, prt);
 193   }
 194 
 195   // Returns an initialized PerRegionTable instance.
 196   static PerRegionTable* alloc(HeapRegion* hr) {
 197     PerRegionTable* fl = _free_list;
 198     while (fl != NULL) {
 199       PerRegionTable* nxt = fl->next();
 200       PerRegionTable* res = Atomic::cmpxchg(nxt, &_free_list, fl);


 201       if (res == fl) {
 202         fl->init(hr, true);
 203         return fl;
 204       } else {
 205         fl = _free_list;
 206       }
 207     }
 208     assert(fl == NULL, "Loop condition.");
 209     return new PerRegionTable(hr);
 210   }
 211 
 212   PerRegionTable* next() const { return _next; }
 213   void set_next(PerRegionTable* next) { _next = next; }
 214   PerRegionTable* prev() const { return _prev; }
 215   void set_prev(PerRegionTable* prev) { _prev = prev; }
 216 
 217   // Accessor and Modification routines for the pointer for the
 218   // singly linked collision list that links the PRTs within the
 219   // OtherRegionsTable::_fine_grain_regions hash table.
 220   //


 395       if (_n_fine_entries == _max_fine_entries) {
 396         prt = delete_region_table();
 397         // There is no need to clear the links to the 'all' list here:
 398         // prt will be reused immediately, i.e. remain in the 'all' list.
 399         prt->init(from_hr, false /* clear_links_to_all_list */);
 400       } else {
 401         prt = PerRegionTable::alloc(from_hr);
 402         link_to_all(prt);
 403       }
 404 
 405       PerRegionTable* first_prt = _fine_grain_regions[ind];
 406       prt->set_collision_list_next(first_prt);
 407       // The assignment into _fine_grain_regions allows the prt to
 408       // start being used concurrently. In addition to
 409       // collision_list_next which must be visible (else concurrent
 410       // parsing of the list, if any, may fail to see other entries),
 411       // the content of the prt must be visible (else for instance
 412       // some mark bits may not yet seem cleared or a 'later' update
 413       // performed by a concurrent thread could be undone when the
 414       // zeroing becomes visible). This requires store ordering.
 415       OrderAccess::release_store(&_fine_grain_regions[ind], prt);
 416       _n_fine_entries++;
 417 
 418       if (G1HRRSUseSparseTable) {
 419         // Transfer from sparse to fine-grain.
 420         SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
 421         assert(sprt_entry != NULL, "There should have been an entry");
 422         for (int i = 0; i < sprt_entry->num_valid_cards(); i++) {
 423           CardIdx_t c = sprt_entry->card(i);
 424           prt->add_card(c);
 425         }
 426         // Now we can delete the sparse entry.
 427         bool res = _sparse_table.delete_entry(from_hrm_ind);
 428         assert(res, "It should have been there.");
 429       }
 430     }
 431     assert(prt != NULL && prt->hr() == from_hr, "consequence");
 432   }
 433   // Note that we can't assert "prt->hr() == from_hr", because of the
 434   // possibility of concurrent reuse.  But see head comment of
 435   // OtherRegionsTable for why this is OK.


< prev index next >