src/share/vm/memory/generation.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/memory

src/share/vm/memory/generation.hpp

Print this page
rev 5732 : [mq]: comments2


 272   // caller must do the necessary locking.
 273   virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
 274                                              size_t word_size) {
 275     return NULL;
 276   }
 277 
 278   // Some generation may offer a region for shared, contiguous allocation,
 279   // via inlined code (by exporting the address of the top and end fields
 280   // defining the extent of the contiguous allocation region.)
 281 
 282   // This function returns "true" iff the heap supports this kind of
 283   // allocation.  (More precisely, this means the style of allocation that
 284   // increments *top_addr()" with a CAS.) (Default is "no".)
 285   // A generation that supports this allocation style must use lock-free
 286   // allocation for *all* allocation, since there are times when lock free
 287   // allocation will be concurrent with plain "allocate" calls.
 288   virtual bool supports_inline_contig_alloc() const { return false; }
 289 
 290   // These functions return the addresses of the fields that define the
 291   // boundaries of the contiguous allocation area.  (These fields should be
 292   // physicall near to one another.)
 293   virtual HeapWord** top_addr() const { return NULL; }
 294   virtual HeapWord** end_addr() const { return NULL; }
 295 
 296   // Thread-local allocation buffers
 297   virtual bool supports_tlab_allocation() const { return false; }
 298   virtual size_t tlab_capacity() const {
 299     guarantee(false, "Generation doesn't support thread local allocation buffers");
 300     return 0;
 301   }
 302   virtual size_t unsafe_max_tlab_alloc() const {
 303     guarantee(false, "Generation doesn't support thread local allocation buffers");
 304     return 0;
 305   }
 306 
 307   // "obj" is the address of an object in a younger generation.  Allocate space
 308   // for "obj" in the current (or some higher) generation, and copy "obj" into
 309   // the newly allocated space, if possible, returning the result (or NULL if
 310   // the allocation failed).
 311   //
 312   // The "obj_size" argument is just obj->size(), passed along so the caller can


 468   // beginning allocation point post-collection, which might allow some later
 469   // operations to be optimized.
 470   virtual void save_marks() {}
 471 
 472   // This function allows generations to initialize any "saved marks".  That
 473   // is, should only be called when the generation is empty.
 474   virtual void reset_saved_marks() {}
 475 
 476   // This function is "true" iff any no allocations have occurred in the
 477   // generation since the last call to "save_marks".
 478   virtual bool no_allocs_since_save_marks() = 0;
 479 
 480   // Apply "cl->apply" to (the addresses of) all reference fields in objects
 481   // allocated in the current generation since the last call to "save_marks".
 482   // If more objects are allocated in this generation as a result of applying
 483   // the closure, iterates over reference fields in those objects as well.
 484   // Calls "save_marks" at the end of the iteration.
 485   // General signature...
 486   virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
 487   // ...and specializations for de-virtualization.  (The general
 488   // implemention of the _nv versions call the virtual version.
 489   // Note that the _nv suffix is not really semantically necessary,
 490   // but it avoids some not-so-useful warnings on Solaris.)
 491 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)             \
 492   virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {    \
 493     oop_since_save_marks_iterate_v((OopsInGenClosure*)cl);                      \
 494   }
 495   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL)
 496 
 497 #undef Generation_SINCE_SAVE_MARKS_DECL
 498 
 499   // The "requestor" generation is performing some garbage collection
 500   // action for which it would be useful to have scratch space.  If
 501   // the target is not the requestor, no gc actions will be required
 502   // of the target.  The requestor promises to allocate no more than
 503   // "max_alloc_words" in the target generation (via promotion say,
 504   // if the requestor is a young generation and the target is older).
 505   // If the target generation can provide any scratch space, it adds
 506   // it to "list", leaving "list" pointing to the head of the
 507   // augmented list.  The default is to offer no space.
 508   virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,




 272   // caller must do the necessary locking.
 273   virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
 274                                              size_t word_size) {
 275     return NULL;
 276   }
 277 
 278   // Some generation may offer a region for shared, contiguous allocation,
 279   // via inlined code (by exporting the address of the top and end fields
 280   // defining the extent of the contiguous allocation region.)
 281 
 282   // This function returns "true" iff the heap supports this kind of
 283   // allocation.  (More precisely, this means the style of allocation that
 284   // increments *top_addr()" with a CAS.) (Default is "no".)
 285   // A generation that supports this allocation style must use lock-free
 286   // allocation for *all* allocation, since there are times when lock free
 287   // allocation will be concurrent with plain "allocate" calls.
 288   virtual bool supports_inline_contig_alloc() const { return false; }
 289 
 290   // These functions return the addresses of the fields that define the
 291   // boundaries of the contiguous allocation area.  (These fields should be
 292   // physically near to one another.)
 293   virtual HeapWord** top_addr() const { return NULL; }
 294   virtual HeapWord** end_addr() const { return NULL; }
 295 
 296   // Thread-local allocation buffers
 297   virtual bool supports_tlab_allocation() const { return false; }
 298   virtual size_t tlab_capacity() const {
 299     guarantee(false, "Generation doesn't support thread local allocation buffers");
 300     return 0;
 301   }
 302   virtual size_t unsafe_max_tlab_alloc() const {
 303     guarantee(false, "Generation doesn't support thread local allocation buffers");
 304     return 0;
 305   }
 306 
 307   // "obj" is the address of an object in a younger generation.  Allocate space
 308   // for "obj" in the current (or some higher) generation, and copy "obj" into
 309   // the newly allocated space, if possible, returning the result (or NULL if
 310   // the allocation failed).
 311   //
 312   // The "obj_size" argument is just obj->size(), passed along so the caller can


 468   // beginning allocation point post-collection, which might allow some later
 469   // operations to be optimized.
 470   virtual void save_marks() {}
 471 
 472   // This function allows generations to initialize any "saved marks".  That
 473   // is, should only be called when the generation is empty.
 474   virtual void reset_saved_marks() {}
 475 
 476   // This function is "true" iff any no allocations have occurred in the
 477   // generation since the last call to "save_marks".
 478   virtual bool no_allocs_since_save_marks() = 0;
 479 
 480   // Apply "cl->apply" to (the addresses of) all reference fields in objects
 481   // allocated in the current generation since the last call to "save_marks".
 482   // If more objects are allocated in this generation as a result of applying
 483   // the closure, iterates over reference fields in those objects as well.
 484   // Calls "save_marks" at the end of the iteration.
 485   // General signature...
 486   virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
 487   // ...and specializations for de-virtualization.  (The general
 488   // implementation of the _nv versions call the virtual version.
 489   // Note that the _nv suffix is not really semantically necessary,
 490   // but it avoids some not-so-useful warnings on Solaris.)
 491 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)             \
 492   virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {    \
 493     oop_since_save_marks_iterate_v((OopsInGenClosure*)cl);                      \
 494   }
 495   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL)
 496 
 497 #undef Generation_SINCE_SAVE_MARKS_DECL
 498 
 499   // The "requestor" generation is performing some garbage collection
 500   // action for which it would be useful to have scratch space.  If
 501   // the target is not the requestor, no gc actions will be required
 502   // of the target.  The requestor promises to allocate no more than
 503   // "max_alloc_words" in the target generation (via promotion say,
 504   // if the requestor is a young generation and the target is older).
 505   // If the target generation can provide any scratch space, it adds
 506   // it to "list", leaving "list" pointing to the head of the
 507   // augmented list.  The default is to offer no space.
 508   virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,


src/share/vm/memory/generation.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File