< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page




1329   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1330   // represent Java objects, or they might be free blocks in a
1331   // free-list-based heap (or subheap), as long as the two kinds are
1332   // distinguishable and the size of each is determinable.
1333 
1334   // Returns the address of the start of the "block" that contains the
1335   // address "addr".  We say "blocks" instead of "object" since some heaps
1336   // may not pack objects densely; a chunk may either be an object or a
1337   // non-object.
1338   virtual HeapWord* block_start(const void* addr) const;
1339 
1340   // Requires "addr" to be the start of a chunk, and returns its size.
1341   // "addr + size" is required to be the start of a new chunk, or the end
1342   // of the active area of the heap.
1343   virtual size_t block_size(const HeapWord* addr) const;
1344 
1345   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1346   // the block is an object.
1347   virtual bool block_is_obj(const HeapWord* addr) const;
1348 
1349   // Does this heap support heap inspection? (+PrintClassHistogram)
1350   virtual bool supports_heap_inspection() const { return true; }
1351 
1352   // Section on thread-local allocation buffers (TLABs)
1353   // See CollectedHeap for semantics.
1354 
1355   bool supports_tlab_allocation() const;
1356   size_t tlab_capacity(Thread* ignored) const;
1357   size_t tlab_used(Thread* ignored) const;
1358   size_t max_tlab_size() const;
1359   size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1360 
1361   // Can a compiler initialize a new object without store barriers?
1362   // This permission only extends from the creation of a new object
1363   // via a TLAB up to the first subsequent safepoint. If such permission
1364   // is granted for this heap type, the compiler promises to call
1365   // defer_store_barrier() below on any slow path allocation of
1366   // a new object for which such initializing store barriers will
1367   // have been elided. G1, like CMS, allows this, but should be
1368   // ready to provide a compensating write barrier as necessary
1369   // if that storage came out of a non-young region. The efficiency
1370   // of this implementation depends crucially on being able to
1371   // answer very efficiently in constant time whether a piece of




1329   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1330   // represent Java objects, or they might be free blocks in a
1331   // free-list-based heap (or subheap), as long as the two kinds are
1332   // distinguishable and the size of each is determinable.
1333 
1334   // Returns the address of the start of the "block" that contains the
1335   // address "addr".  We say "blocks" instead of "object" since some heaps
1336   // may not pack objects densely; a chunk may either be an object or a
1337   // non-object.
1338   virtual HeapWord* block_start(const void* addr) const;
1339 
1340   // Requires "addr" to be the start of a chunk, and returns its size.
1341   // "addr + size" is required to be the start of a new chunk, or the end
1342   // of the active area of the heap.
1343   virtual size_t block_size(const HeapWord* addr) const;
1344 
1345   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1346   // the block is an object.
1347   virtual bool block_is_obj(const HeapWord* addr) const;
1348 



1349   // Section on thread-local allocation buffers (TLABs)
1350   // See CollectedHeap for semantics.
1351 
1352   bool supports_tlab_allocation() const;
1353   size_t tlab_capacity(Thread* ignored) const;
1354   size_t tlab_used(Thread* ignored) const;
1355   size_t max_tlab_size() const;
1356   size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1357 
1358   // Can a compiler initialize a new object without store barriers?
1359   // This permission only extends from the creation of a new object
1360   // via a TLAB up to the first subsequent safepoint. If such permission
1361   // is granted for this heap type, the compiler promises to call
1362   // defer_store_barrier() below on any slow path allocation of
1363   // a new object for which such initializing store barriers will
1364   // have been elided. G1, like CMS, allows this, but should be
1365   // ready to provide a compensating write barrier as necessary
1366   // if that storage came out of a non-young region. The efficiency
1367   // of this implementation depends crucially on being able to
1368   // answer very efficiently in constant time whether a piece of


< prev index next >