< prev index next >

src/hotspot/share/gc/shared/oopStorage.inline.hpp

Print this page
rev 50951 : imported patch rename_AllocateList
rev 50952 : imported patch rename_AllocateEntry
rev 50953 : imported patch rename_allocate_entry
rev 50954 : imported patch rename_allocate_list


  90 }
  91 
  92 inline OopStorage::Block* const* OopStorage::ActiveArray::base_ptr() const {
  93   const void* ptr = reinterpret_cast<const char*>(this) + blocks_offset();
  94   return reinterpret_cast<Block* const*>(ptr);
  95 }
  96 
  97 inline OopStorage::Block* const* OopStorage::ActiveArray::block_ptr(size_t index) const {
  98   return base_ptr() + index;
  99 }
 100 
 101 inline OopStorage::Block** OopStorage::ActiveArray::block_ptr(size_t index) {
 102   return const_cast<Block**>(base_ptr() + index);
 103 }
 104 
 105 inline OopStorage::Block* OopStorage::ActiveArray::at(size_t index) const {
 106   assert(index < _block_count, "precondition");
 107   return *block_ptr(index);
 108 }
 109 
 110 // A Block has an embedded AllocateEntry to provide the links between
 111 // Blocks in a AllocateList.
 112 class OopStorage::AllocateEntry {
 113   friend class OopStorage::AllocateList;
 114 
 115   // Members are mutable, and we deal exclusively with pointers to
 116   // const, to make const blocks easier to use; a block being const
 117   // doesn't prevent modifying its list state.
 118   mutable const Block* _prev;
 119   mutable const Block* _next;
 120 
 121   // Noncopyable.
 122   AllocateEntry(const AllocateEntry&);
 123   AllocateEntry& operator=(const AllocateEntry&);
 124 
 125 public:
 126   AllocateEntry();
 127   ~AllocateEntry();
 128 };
 129 
 130 // Fixed-sized array of oops, plus bookkeeping data.
 131 // All blocks are in the storage's _active_array, at the block's _active_index.
 132 // Non-full blocks are in the storage's _allocate_list, linked through the
 133 // block's _allocate_entry.  Empty blocks are at the end of that list.
 134 class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
 135   // _data must be the first non-static data member, for alignment.
 136   oop _data[BitsPerWord];
 137   static const unsigned _data_pos = 0; // Position of _data.
 138 
 139   volatile uintx _allocated_bitmask; // One bit per _data element.
 140   const OopStorage* _owner;
 141   void* _memory;              // Unaligned storage containing block.
 142   size_t _active_index;
 143   AllocateEntry _allocate_entry;
 144   Block* volatile _deferred_updates_next;
 145   volatile uintx _release_refcount;
 146 
 147   Block(const OopStorage* owner, void* memory);
 148   ~Block();
 149 
 150   void check_index(unsigned index) const;
 151   unsigned get_index(const oop* ptr) const;
 152 
 153   template<typename F, typename BlockPtr>
 154   static bool iterate_impl(F f, BlockPtr b);
 155 
 156   // Noncopyable.
 157   Block(const Block&);
 158   Block& operator=(const Block&);
 159 
 160 public:
 161   const AllocateEntry& allocate_entry() const;
 162 
 163   static size_t allocation_size();
 164   static size_t allocation_alignment_shift();
 165 
 166   oop* get_pointer(unsigned index);
 167   const oop* get_pointer(unsigned index) const;
 168 
 169   uintx bitmask_for_index(unsigned index) const;
 170   uintx bitmask_for_entry(const oop* ptr) const;
 171 
 172   // Allocation bitmask accessors are racy.
 173   bool is_full() const;
 174   bool is_empty() const;
 175   uintx allocated_bitmask() const;
 176   bool is_deletable() const;
 177 
 178   Block* deferred_updates_next() const;
 179   void set_deferred_updates_next(Block* new_next);
 180 
 181   bool contains(const oop* ptr) const;
 182 
 183   size_t active_index() const;
 184   void set_active_index(size_t index);
 185   static size_t active_index_safe(const Block* block); // Returns 0 if access fails.
 186 
 187   // Returns NULL if ptr is not in a block or not allocated in that block.
 188   static Block* block_for_ptr(const OopStorage* owner, const oop* ptr);
 189 
 190   oop* allocate();
 191   static Block* new_block(const OopStorage* owner);
 192   static void delete_block(const Block& block);
 193 
 194   void release_entries(uintx releasing, Block* volatile* deferred_list);
 195 
 196   template<typename F> bool iterate(F f);
 197   template<typename F> bool iterate(F f) const;
 198 }; // class Block
 199 
 200 inline OopStorage::Block* OopStorage::AllocateList::head() {
 201   return const_cast<Block*>(_head);
 202 }
 203 
 204 inline OopStorage::Block* OopStorage::AllocateList::tail() {
 205   return const_cast<Block*>(_tail);
 206 }
 207 
 208 inline const OopStorage::Block* OopStorage::AllocateList::chead() const {
 209   return _head;
 210 }
 211 
 212 inline const OopStorage::Block* OopStorage::AllocateList::ctail() const {
 213   return _tail;
 214 }
 215 
 216 inline OopStorage::Block* OopStorage::AllocateList::prev(Block& block) {
 217   return const_cast<Block*>(block.allocate_entry()._prev);
 218 }
 219 
 220 inline OopStorage::Block* OopStorage::AllocateList::next(Block& block) {
 221   return const_cast<Block*>(block.allocate_entry()._next);
 222 }
 223 
 224 inline const OopStorage::Block* OopStorage::AllocateList::prev(const Block& block) const {
 225   return block.allocate_entry()._prev;
 226 }
 227 
 228 inline const OopStorage::Block* OopStorage::AllocateList::next(const Block& block) const {
 229   return block.allocate_entry()._next;
 230 }
 231 
 232 template<typename Closure>
 233 class OopStorage::OopFn {
 234 public:
 235   explicit OopFn(Closure* cl) : _cl(cl) {}
 236 
 237   template<typename OopPtr>     // [const] oop*
 238   bool operator()(OopPtr ptr) const {
 239     _cl->do_oop(ptr);
 240     return true;
 241   }
 242 
 243 private:
 244   Closure* _cl;
 245 };
 246 
 247 template<typename Closure>
 248 inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) {
 249   return OopFn<Closure>(cl);


 281 class OopStorage::SkipNullFn {
 282 public:
 283   SkipNullFn(F f) : _f(f) {}
 284 
 285   template<typename OopPtr>     // [const] oop*
 286   bool operator()(OopPtr ptr) const {
 287     return (*ptr != NULL) ? _f(ptr) : true;
 288   }
 289 
 290 private:
 291   F _f;
 292 };
 293 
 294 template<typename F>
 295 inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) {
 296   return SkipNullFn<F>(f);
 297 }
 298 
 299 // Inline Block accesses for use in iteration loops.
 300 
 301 inline const OopStorage::AllocateEntry& OopStorage::Block::allocate_entry() const {
 302   return _allocate_entry;
 303 }
 304 
 305 inline void OopStorage::Block::check_index(unsigned index) const {
 306   assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index);
 307 }
 308 
 309 inline oop* OopStorage::Block::get_pointer(unsigned index) {
 310   check_index(index);
 311   return &_data[index];
 312 }
 313 
 314 inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
 315   check_index(index);
 316   return &_data[index];
 317 }
 318 
 319 inline uintx OopStorage::Block::allocated_bitmask() const {
 320   return _allocated_bitmask;
 321 }
 322 




  90 }
  91 
  92 inline OopStorage::Block* const* OopStorage::ActiveArray::base_ptr() const {
  93   const void* ptr = reinterpret_cast<const char*>(this) + blocks_offset();
  94   return reinterpret_cast<Block* const*>(ptr);
  95 }
  96 
  97 inline OopStorage::Block* const* OopStorage::ActiveArray::block_ptr(size_t index) const {
  98   return base_ptr() + index;
  99 }
 100 
 101 inline OopStorage::Block** OopStorage::ActiveArray::block_ptr(size_t index) {
 102   return const_cast<Block**>(base_ptr() + index);
 103 }
 104 
 105 inline OopStorage::Block* OopStorage::ActiveArray::at(size_t index) const {
 106   assert(index < _block_count, "precondition");
 107   return *block_ptr(index);
 108 }
 109 
 110 // A Block has an embedded AllocationListEntry to provide the links between
 111 // Blocks in a AllocationList.
 112 class OopStorage::AllocationListEntry {
 113   friend class OopStorage::AllocationList;
 114 
 115   // Members are mutable, and we deal exclusively with pointers to
 116   // const, to make const blocks easier to use; a block being const
 117   // doesn't prevent modifying its list state.
 118   mutable const Block* _prev;
 119   mutable const Block* _next;
 120 
 121   // Noncopyable.
 122   AllocationListEntry(const AllocationListEntry&);
 123   AllocationListEntry& operator=(const AllocationListEntry&);
 124 
 125 public:
 126   AllocationListEntry();
 127   ~AllocationListEntry();
 128 };
 129 
 130 // Fixed-sized array of oops, plus bookkeeping data.
 131 // All blocks are in the storage's _active_array, at the block's _active_index.
 132 // Non-full blocks are in the storage's _allocation_list, linked through the
 133 // block's _allocation_list_entry.  Empty blocks are at the end of that list.
 134 class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
 135   // _data must be the first non-static data member, for alignment.
 136   oop _data[BitsPerWord];
 137   static const unsigned _data_pos = 0; // Position of _data.
 138 
 139   volatile uintx _allocated_bitmask; // One bit per _data element.
 140   const OopStorage* _owner;
 141   void* _memory;              // Unaligned storage containing block.
 142   size_t _active_index;
 143   AllocationListEntry _allocation_list_entry;
 144   Block* volatile _deferred_updates_next;
 145   volatile uintx _release_refcount;
 146 
 147   Block(const OopStorage* owner, void* memory);
 148   ~Block();
 149 
 150   void check_index(unsigned index) const;
 151   unsigned get_index(const oop* ptr) const;
 152 
 153   template<typename F, typename BlockPtr>
 154   static bool iterate_impl(F f, BlockPtr b);
 155 
 156   // Noncopyable.
 157   Block(const Block&);
 158   Block& operator=(const Block&);
 159 
 160 public:
 161   const AllocationListEntry& allocation_list_entry() const;
 162 
 163   static size_t allocation_size();
 164   static size_t allocation_alignment_shift();
 165 
 166   oop* get_pointer(unsigned index);
 167   const oop* get_pointer(unsigned index) const;
 168 
 169   uintx bitmask_for_index(unsigned index) const;
 170   uintx bitmask_for_entry(const oop* ptr) const;
 171 
 172   // Allocation bitmask accessors are racy.
 173   bool is_full() const;
 174   bool is_empty() const;
 175   uintx allocated_bitmask() const;
 176   bool is_deletable() const;
 177 
 178   Block* deferred_updates_next() const;
 179   void set_deferred_updates_next(Block* new_next);
 180 
 181   bool contains(const oop* ptr) const;
 182 
 183   size_t active_index() const;
 184   void set_active_index(size_t index);
 185   static size_t active_index_safe(const Block* block); // Returns 0 if access fails.
 186 
 187   // Returns NULL if ptr is not in a block or not allocated in that block.
 188   static Block* block_for_ptr(const OopStorage* owner, const oop* ptr);
 189 
 190   oop* allocate();
 191   static Block* new_block(const OopStorage* owner);
 192   static void delete_block(const Block& block);
 193 
 194   void release_entries(uintx releasing, Block* volatile* deferred_list);
 195 
 196   template<typename F> bool iterate(F f);
 197   template<typename F> bool iterate(F f) const;
 198 }; // class Block
 199 
 200 inline OopStorage::Block* OopStorage::AllocationList::head() {
 201   return const_cast<Block*>(_head);
 202 }
 203 
 204 inline OopStorage::Block* OopStorage::AllocationList::tail() {
 205   return const_cast<Block*>(_tail);
 206 }
 207 
 208 inline const OopStorage::Block* OopStorage::AllocationList::chead() const {
 209   return _head;
 210 }
 211 
 212 inline const OopStorage::Block* OopStorage::AllocationList::ctail() const {
 213   return _tail;
 214 }
 215 
 216 inline OopStorage::Block* OopStorage::AllocationList::prev(Block& block) {
 217   return const_cast<Block*>(block.allocation_list_entry()._prev);
 218 }
 219 
 220 inline OopStorage::Block* OopStorage::AllocationList::next(Block& block) {
 221   return const_cast<Block*>(block.allocation_list_entry()._next);
 222 }
 223 
 224 inline const OopStorage::Block* OopStorage::AllocationList::prev(const Block& block) const {
 225   return block.allocation_list_entry()._prev;
 226 }
 227 
 228 inline const OopStorage::Block* OopStorage::AllocationList::next(const Block& block) const {
 229   return block.allocation_list_entry()._next;
 230 }
 231 
 232 template<typename Closure>
 233 class OopStorage::OopFn {
 234 public:
 235   explicit OopFn(Closure* cl) : _cl(cl) {}
 236 
 237   template<typename OopPtr>     // [const] oop*
 238   bool operator()(OopPtr ptr) const {
 239     _cl->do_oop(ptr);
 240     return true;
 241   }
 242 
 243 private:
 244   Closure* _cl;
 245 };
 246 
 247 template<typename Closure>
 248 inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) {
 249   return OopFn<Closure>(cl);


 281 class OopStorage::SkipNullFn {
 282 public:
 283   SkipNullFn(F f) : _f(f) {}
 284 
 285   template<typename OopPtr>     // [const] oop*
 286   bool operator()(OopPtr ptr) const {
 287     return (*ptr != NULL) ? _f(ptr) : true;
 288   }
 289 
 290 private:
 291   F _f;
 292 };
 293 
 294 template<typename F>
 295 inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) {
 296   return SkipNullFn<F>(f);
 297 }
 298 
 299 // Inline Block accesses for use in iteration loops.
 300 
 301 inline const OopStorage::AllocationListEntry& OopStorage::Block::allocation_list_entry() const {
 302   return _allocation_list_entry;
 303 }
 304 
 305 inline void OopStorage::Block::check_index(unsigned index) const {
 306   assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index);
 307 }
 308 
 309 inline oop* OopStorage::Block::get_pointer(unsigned index) {
 310   check_index(index);
 311   return &_data[index];
 312 }
 313 
 314 inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
 315   check_index(index);
 316   return &_data[index];
 317 }
 318 
 319 inline uintx OopStorage::Block::allocated_bitmask() const {
 320   return _allocated_bitmask;
 321 }
 322 


< prev index next >