< prev index next >

src/hotspot/share/gc/g1/sparsePRT.hpp

Print this page




 169   SparsePRTEntry* get_entry(RegionIdx_t region_id) const;
 170 
 171   void clear();
 172 
 173   size_t capacity() const      { return _capacity; }
 174   size_t capacity_mask() const { return _capacity_mask;  }
 175   size_t occupied_entries() const { return _occupied_entries; }
 176   size_t occupied_cards() const   { return _occupied_cards; }
 177   size_t mem_size() const;
 178   // The number of SparsePRTEntry instances available.
 179   size_t num_entries() const { return _num_entries; }
 180 
 181   SparsePRTEntry* entry(int i) const {
 182     assert(i >= 0 && (size_t)i < _num_entries, "precondition");
 183     return (SparsePRTEntry*)((char*)_entries + SparsePRTEntry::size() * i);
 184   }
 185 
 186   void print();
 187 };
 188 
 189 // ValueObj because will be embedded in HRRS iterator.
 190 class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
 191   // Return value indicating "invalid/no card".
 192   static const int NoCardFound = -1;
 193 
 194   int _tbl_ind;         // [-1, 0.._rsht->_capacity)
 195   int _bl_ind;          // [-1, 0.._rsht->_capacity)
 196   short _card_ind;      // [0..SparsePRTEntry::cards_num())
 197   RSHashTable* _rsht;
 198 
 199   // If the bucket list pointed to by _bl_ind contains a card, sets
 200   // _bl_ind to the index of that entry,
 201   // Returns the card found if there is, otherwise returns InvalidCard.
 202   CardIdx_t find_first_card_in_list();
 203 
 204   // Computes the proper card index for the card whose offset in the
 205   // current region (as indicated by _bl_ind) is "ci".
 206   // This is subject to errors when there is iteration concurrent with
 207   // modification, but these errors should be benign.
 208   size_t compute_card_ind(CardIdx_t ci);
 209 
 210 public:
 211   RSHashTableIter(RSHashTable* rsht) :
 212     _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
 213     _bl_ind(RSHashTable::NullEntry),
 214     _card_ind((SparsePRTEntry::cards_num() - 1)),
 215     _rsht(rsht) {}
 216 
 217   bool has_next(size_t& card_index);
 218 };
 219 
 220 // Concurrent access to a SparsePRT must be serialized by some external mutex.
 221 
 222 class SparsePRTIter;
 223 class SparsePRTCleanupTask;
 224 
 225 class SparsePRT VALUE_OBJ_CLASS_SPEC {
 226   friend class SparsePRTCleanupTask;
 227 
 228   //  Iterations are done on the _cur hash table, since they only need to
 229   //  see entries visible at the start of a collection pause.
 230   //  All other operations are done using the _next hash table.
 231   RSHashTable* _cur;
 232   RSHashTable* _next;
 233 
 234   HeapRegion* _hr;
 235 
 236   enum SomeAdditionalPrivateConstants {
 237     InitialCapacity = 16
 238   };
 239 
 240   void expand();
 241 
 242   bool _expanded;
 243 
 244   bool expanded() { return _expanded; }
 245   void set_expanded(bool b) { _expanded = b; }


 317   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
 318     return _next->contains_card(region_id, card_index);
 319   }
 320 };
 321 
 322 class SparsePRTIter: public RSHashTableIter {
 323 public:
 324   SparsePRTIter(const SparsePRT* sprt) :
 325     RSHashTableIter(sprt->cur()) {}
 326 
 327   bool has_next(size_t& card_index) {
 328     return RSHashTableIter::has_next(card_index);
 329   }
 330 };
 331 
 332 // This allows each worker during a cleanup pause to create a
 333 // thread-local list of sparse tables that have been expanded and need
 334 // to be processed at the beginning of the next GC pause. This lists
 335 // are concatenated into the single expanded list at the end of the
 336 // cleanup pause.
 337 class SparsePRTCleanupTask VALUE_OBJ_CLASS_SPEC {
 338 private:
 339   SparsePRT* _head;
 340   SparsePRT* _tail;
 341 
 342 public:
 343   SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { }
 344 
 345   void add(SparsePRT* sprt);
 346   SparsePRT* head() { return _head; }
 347   SparsePRT* tail() { return _tail; }
 348 };
 349 
 350 #endif // SHARE_VM_GC_G1_SPARSEPRT_HPP


 169   SparsePRTEntry* get_entry(RegionIdx_t region_id) const;
 170 
 171   void clear();
 172 
 173   size_t capacity() const      { return _capacity; }
 174   size_t capacity_mask() const { return _capacity_mask;  }
 175   size_t occupied_entries() const { return _occupied_entries; }
 176   size_t occupied_cards() const   { return _occupied_cards; }
 177   size_t mem_size() const;
 178   // The number of SparsePRTEntry instances available.
 179   size_t num_entries() const { return _num_entries; }
 180 
 181   SparsePRTEntry* entry(int i) const {
 182     assert(i >= 0 && (size_t)i < _num_entries, "precondition");
 183     return (SparsePRTEntry*)((char*)_entries + SparsePRTEntry::size() * i);
 184   }
 185 
 186   void print();
 187 };
 188 
 189 // This is an embedded in HRRS iterator.
 190 class RSHashTableIter {
 191   // Return value indicating "invalid/no card".
 192   static const int NoCardFound = -1;
 193 
 194   int _tbl_ind;         // [-1, 0.._rsht->_capacity)
 195   int _bl_ind;          // [-1, 0.._rsht->_capacity)
 196   short _card_ind;      // [0..SparsePRTEntry::cards_num())
 197   RSHashTable* _rsht;
 198 
 199   // If the bucket list pointed to by _bl_ind contains a card, sets
 200   // _bl_ind to the index of that entry,
 201   // Returns the card found if there is, otherwise returns InvalidCard.
 202   CardIdx_t find_first_card_in_list();
 203 
 204   // Computes the proper card index for the card whose offset in the
 205   // current region (as indicated by _bl_ind) is "ci".
 206   // This is subject to errors when there is iteration concurrent with
 207   // modification, but these errors should be benign.
 208   size_t compute_card_ind(CardIdx_t ci);
 209 
 210 public:
 211   RSHashTableIter(RSHashTable* rsht) :
 212     _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
 213     _bl_ind(RSHashTable::NullEntry),
 214     _card_ind((SparsePRTEntry::cards_num() - 1)),
 215     _rsht(rsht) {}
 216 
 217   bool has_next(size_t& card_index);
 218 };
 219 
 220 // Concurrent access to a SparsePRT must be serialized by some external mutex.
 221 
 222 class SparsePRTIter;
 223 class SparsePRTCleanupTask;
 224 
 225 class SparsePRT {
 226   friend class SparsePRTCleanupTask;
 227 
 228   //  Iterations are done on the _cur hash table, since they only need to
 229   //  see entries visible at the start of a collection pause.
 230   //  All other operations are done using the _next hash table.
 231   RSHashTable* _cur;
 232   RSHashTable* _next;
 233 
 234   HeapRegion* _hr;
 235 
 236   enum SomeAdditionalPrivateConstants {
 237     InitialCapacity = 16
 238   };
 239 
 240   void expand();
 241 
 242   bool _expanded;
 243 
 244   bool expanded() { return _expanded; }
 245   void set_expanded(bool b) { _expanded = b; }


 317   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
 318     return _next->contains_card(region_id, card_index);
 319   }
 320 };
 321 
 322 class SparsePRTIter: public RSHashTableIter {
 323 public:
 324   SparsePRTIter(const SparsePRT* sprt) :
 325     RSHashTableIter(sprt->cur()) {}
 326 
 327   bool has_next(size_t& card_index) {
 328     return RSHashTableIter::has_next(card_index);
 329   }
 330 };
 331 
 332 // This allows each worker during a cleanup pause to create a
 333 // thread-local list of sparse tables that have been expanded and need
 334 // to be processed at the beginning of the next GC pause. This lists
 335 // are concatenated into the single expanded list at the end of the
 336 // cleanup pause.
 337 class SparsePRTCleanupTask {
 338 private:
 339   SparsePRT* _head;
 340   SparsePRT* _tail;
 341 
 342 public:
 343   SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { }
 344 
 345   void add(SparsePRT* sprt);
 346   SparsePRT* head() { return _head; }
 347   SparsePRT* tail() { return _tail; }
 348 };
 349 
 350 #endif // SHARE_VM_GC_G1_SPARSEPRT_HPP
< prev index next >