< prev index next >

src/share/vm/gc/shared/generation.hpp

Print this page




 292   // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote
 293   // object "obj", whose original mark word was "m", and whose size is
 294   // "word_sz".  If possible, allocate space for "obj", copy obj into it
 295   // (taking care to copy "m" into the mark word when done, since the mark
 296   // word of "obj" may have been overwritten with a forwarding pointer, and
 297   // also taking care to copy the klass pointer *last*.  Returns the new
 298   // object if successful, or else NULL.
 299   virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz);
 300 
 301   // Informs the current generation that all par_promote_alloc's in the
 302   // collection have been completed; any supporting data structures can be
 303   // reset.  Default is to do nothing.
 304   virtual void par_promote_alloc_done(int thread_num) {}
 305 
 306   // Informs the current generation that all oop_since_save_marks_iterates
 307   // performed by "thread_num" in the current collection, if any, have been
 308   // completed; any supporting data structures can be reset.  Default is to
 309   // do nothing.
 310   virtual void par_oop_since_save_marks_iterate_done(int thread_num) {}
 311 
 312   // This generation does in-place marking, meaning that mark words
 313   // are mutated during the marking phase and presumably reinitialized
 314   // to a canonical value after the GC. This is currently used by the
 315   // biased locking implementation to determine whether additional
 316   // work is required during the GC prologue and epilogue.
 317   virtual bool performs_in_place_marking() const { return true; }
 318 
 319   // Returns "true" iff collect() should subsequently be called on this
 320   // this generation. See comment below.
 321   // This is a generic implementation which can be overridden.
 322   //
 323   // Note: in the current (1.4) implementation, when genCollectedHeap's
 324   // incremental_collection_will_fail flag is set, all allocations are
 325   // slow path (the only fast-path place to allocate is DefNew, which
 326   // will be full if the flag is set).
 327   // Thus, older generations which collect younger generations should
 328   // test this flag and collect if it is set.
 329   virtual bool should_collect(bool   full,
 330                               size_t word_size,
 331                               bool   is_tlab) {
 332     return (full || should_allocate(word_size, is_tlab));
 333   }
 334 
 335   // Returns true if the collection is likely to be safely
 336   // completed. Even if this method returns true, a collection
 337   // may not be guaranteed to succeed, and the system should be
 338   // able to safely unwind and recover from that failure, albeit




 292   // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote
 293   // object "obj", whose original mark word was "m", and whose size is
 294   // "word_sz".  If possible, allocate space for "obj", copy obj into it
 295   // (taking care to copy "m" into the mark word when done, since the mark
 296   // word of "obj" may have been overwritten with a forwarding pointer, and
 297   // also taking care to copy the klass pointer *last*.  Returns the new
 298   // object if successful, or else NULL.
 299   virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz);
 300 
 301   // Informs the current generation that all par_promote_alloc's in the
 302   // collection have been completed; any supporting data structures can be
 303   // reset.  Default is to do nothing.
 304   virtual void par_promote_alloc_done(int thread_num) {}
 305 
 306   // Informs the current generation that all oop_since_save_marks_iterates
 307   // performed by "thread_num" in the current collection, if any, have been
 308   // completed; any supporting data structures can be reset.  Default is to
 309   // do nothing.
 310   virtual void par_oop_since_save_marks_iterate_done(int thread_num) {}
 311 







 312   // Returns "true" iff collect() should subsequently be called on this
 313   // this generation. See comment below.
 314   // This is a generic implementation which can be overridden.
 315   //
 316   // Note: in the current (1.4) implementation, when genCollectedHeap's
 317   // incremental_collection_will_fail flag is set, all allocations are
 318   // slow path (the only fast-path place to allocate is DefNew, which
 319   // will be full if the flag is set).
 320   // Thus, older generations which collect younger generations should
 321   // test this flag and collect if it is set.
 322   virtual bool should_collect(bool   full,
 323                               size_t word_size,
 324                               bool   is_tlab) {
 325     return (full || should_allocate(word_size, is_tlab));
 326   }
 327 
 328   // Returns true if the collection is likely to be safely
 329   // completed. Even if this method returns true, a collection
 330   // may not be guaranteed to succeed, and the system should be
 331   // able to safely unwind and recover from that failure, albeit


< prev index next >