index next >

src/share/vm/gc_implementation/parNew/parNewGeneration.hpp

Print this page
rev 7474 : imported patch 8066566
rev 7475 : [mq]: rev


 316  private:
 317   // The per-worker-thread work queues
 318   ObjToScanQueueSet* _task_queues;
 319 
 320   // Per-worker-thread local overflow stacks
 321   Stack<oop, mtGC>* _overflow_stacks;
 322 
 323   // Desired size of survivor space plab's
 324   PLABStats _plab_stats;
 325 
 326   // A list of from-space images of to-be-scanned objects, threaded through
 327   // klass-pointers (klass information already copied to the forwarded
 328   // image.)  Manipulated with CAS.
 329   oop _overflow_list;
 330   NOT_PRODUCT(ssize_t _num_par_pushes;)
 331 
 332   // This closure is used by the reference processor to filter out
 333   // references to live referent.
 334   DefNewGeneration::IsAliveClosure _is_alive_closure;
 335 



 336   static oop real_forwardee_slow(oop obj);
 337   static void waste_some_time();
 338 
 339   // Preserve the mark of "obj", if necessary, in preparation for its mark
 340   // word being overwritten with a self-forwarding-pointer.
 341   void preserve_mark_if_necessary(oop obj, markOop m);
 342 
 343   void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer);
 344 
 345  protected:
 346 
 347   bool _survivor_overflow;
 348 
 349   bool survivor_overflow() { return _survivor_overflow; }
 350   void set_survivor_overflow(bool v) { _survivor_overflow = v; }
 351 
 352  public:
 353   ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
 354 
 355   ~ParNewGeneration() {
 356     for (uint i = 0; i < ParallelGCThreads; i++)
 357         delete _task_queues->queue(i);
 358 
 359     delete _task_queues;
 360   }
 361 
 362   virtual void ref_processor_init();
 363   virtual Generation::Name kind()        { return Generation::ParNew; }


 392   // Push the given (from-space) object on the global overflow list.
 393   void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state);
 394 
 395   // If the global overflow list is non-empty, move some tasks from it
 396   // onto "work_q" (which need not be empty).  No more than 1/4 of the
 397   // available space on "work_q" is used.
 398   bool take_from_overflow_list(ParScanThreadState* par_scan_state);
 399   bool take_from_overflow_list_work(ParScanThreadState* par_scan_state);
 400 
 401   // The task queues to be used by parallel GC threads.
 402   ObjToScanQueueSet* task_queues() {
 403     return _task_queues;
 404   }
 405 
 406   PLABStats* plab_stats() {
 407     return &_plab_stats;
 408   }
 409 
 410   size_t desired_plab_sz() {
 411     return _plab_stats.desired_plab_sz();




 412   }
 413 
 414   static oop real_forwardee(oop obj);
 415 
 416   DEBUG_ONLY(static bool is_legal_forward_ptr(oop p);)
 417 };
 418 
 419 #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP


 316  private:
 317   // The per-worker-thread work queues
 318   ObjToScanQueueSet* _task_queues;
 319 
 320   // Per-worker-thread local overflow stacks
 321   Stack<oop, mtGC>* _overflow_stacks;
 322 
 323   // Desired size of survivor space plab's
 324   PLABStats _plab_stats;
 325 
 326   // A list of from-space images of to-be-scanned objects, threaded through
 327   // klass-pointers (klass information already copied to the forwarded
 328   // image.)  Manipulated with CAS.
 329   oop _overflow_list;
 330   NOT_PRODUCT(ssize_t _num_par_pushes;)
 331 
 332   // This closure is used by the reference processor to filter out
 333   // references to live referent.
 334   DefNewGeneration::IsAliveClosure _is_alive_closure;
 335 
 336   // GC tracer that should be used during collection.
 337   ParNewTracer _gc_tracer;
 338 
 339   static oop real_forwardee_slow(oop obj);
 340   static void waste_some_time();
 341 
 342   // Preserve the mark of "obj", if necessary, in preparation for its mark
 343   // word being overwritten with a self-forwarding-pointer.
 344   void preserve_mark_if_necessary(oop obj, markOop m);
 345 
 346   void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set);
 347 
 348  protected:
 349 
 350   bool _survivor_overflow;
 351 
 352   bool survivor_overflow() { return _survivor_overflow; }
 353   void set_survivor_overflow(bool v) { _survivor_overflow = v; }
 354 
 355  public:
 356   ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
 357 
 358   ~ParNewGeneration() {
 359     for (uint i = 0; i < ParallelGCThreads; i++)
 360         delete _task_queues->queue(i);
 361 
 362     delete _task_queues;
 363   }
 364 
 365   virtual void ref_processor_init();
 366   virtual Generation::Name kind()        { return Generation::ParNew; }


 395   // Push the given (from-space) object on the global overflow list.
 396   void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state);
 397 
 398   // If the global overflow list is non-empty, move some tasks from it
 399   // onto "work_q" (which need not be empty).  No more than 1/4 of the
 400   // available space on "work_q" is used.
 401   bool take_from_overflow_list(ParScanThreadState* par_scan_state);
 402   bool take_from_overflow_list_work(ParScanThreadState* par_scan_state);
 403 
 404   // The task queues to be used by parallel GC threads.
 405   ObjToScanQueueSet* task_queues() {
 406     return _task_queues;
 407   }
 408 
 409   PLABStats* plab_stats() {
 410     return &_plab_stats;
 411   }
 412 
 413   size_t desired_plab_sz() {
 414     return _plab_stats.desired_plab_sz();
 415   }
 416 
 417   const ParNewTracer* gc_tracer() const {
 418     return &_gc_tracer;
 419   }
 420 
 421   static oop real_forwardee(oop obj);
 422 
 423   DEBUG_ONLY(static bool is_legal_forward_ptr(oop p);)
 424 };
 425 
 426 #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
index next >