< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page
rev 52310 : imported patch 8071913-almost-done
rev 52312 : imported patch 6490394-uncommit-at-remark


 451                                    size_t allocated_bytes);
 452 
 453   // For GC alloc regions.
 454   bool has_more_regions(InCSetState dest);
 455   HeapRegion* new_gc_alloc_region(size_t word_size, InCSetState dest);
 456   void retire_gc_alloc_region(HeapRegion* alloc_region,
 457                               size_t allocated_bytes, InCSetState dest);
 458 
 459   // - if explicit_gc is true, the GC is for a System.gc() etc,
 460   //   otherwise it's for a failed allocation.
 461   // - if clear_all_soft_refs is true, all soft references should be
 462   //   cleared during the GC.
 463   // - it returns false if it is unable to do the collection due to the
 464   //   GC locker being active, true otherwise.
 465   bool do_full_collection(bool explicit_gc,
 466                           bool clear_all_soft_refs);
 467 
 468   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 469   virtual void do_full_collection(bool clear_all_soft_refs);
 470 
 471   // Resize the heap if necessary after a full collection.
 472   void resize_if_necessary_after_full_collection();
 473 
 474   // Callback from VM_G1CollectForAllocation operation.
 475   // This function does everything necessary/possible to satisfy a
 476   // failed allocation request (including collection, expansion, etc.)
 477   HeapWord* satisfy_failed_allocation(size_t word_size,
 478                                       bool* succeeded);
 479   // Internal helpers used during full GC to split it up to
 480   // increase readability.
 481   void abort_concurrent_cycle();
 482   void verify_before_full_collection(bool explicit_gc);
 483   void prepare_heap_for_full_collection();
 484   void prepare_heap_for_mutators();
 485   void abort_refinement();
 486   void verify_after_full_collection();
 487   void print_heap_after_full_collection(G1HeapTransition* heap_transition);
 488 
 489   // Helper method for satisfy_failed_allocation()
 490   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 491                                              bool do_gc,
 492                                              bool clear_all_soft_refs,
 493                                              bool expect_null_mutator_alloc_region,


 511   // during GC into global variables.
 512   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 513 public:
 514   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 515 
 516   WorkGang* workers() const { return _workers; }
 517 
 518   G1Allocator* allocator() {
 519     return _allocator;
 520   }
 521 
 522   G1HeapVerifier* verifier() {
 523     return _verifier;
 524   }
 525 
 526   G1MonitoringSupport* g1mm() {
 527     assert(_g1mm != NULL, "should have been initialized");
 528     return _g1mm;
 529   }
 530 


 531   // Expand the garbage-first heap by at least the given size (in bytes!).
 532   // Returns true if the heap was expanded by the requested amount;
 533   // false otherwise.
 534   // (Rounds up to a HeapRegion boundary.)
 535   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 536 
 537   // Returns the PLAB statistics for a given destination.
 538   inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
 539 
 540   // Determines PLAB size for a given destination.
 541   inline size_t desired_plab_sz(InCSetState dest);
 542 
 543   // Do anything common to GC's.
 544   void gc_prologue(bool full);
 545   void gc_epilogue(bool full);
 546 
 547   // Does the given region fulfill remembered set based eager reclaim candidate requirements?
 548   bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
 549 
 550   // Modify the reclaim candidate set and test for presence.




 451                                    size_t allocated_bytes);
 452 
 453   // For GC alloc regions.
 454   bool has_more_regions(InCSetState dest);
 455   HeapRegion* new_gc_alloc_region(size_t word_size, InCSetState dest);
 456   void retire_gc_alloc_region(HeapRegion* alloc_region,
 457                               size_t allocated_bytes, InCSetState dest);
 458 
 459   // - if explicit_gc is true, the GC is for a System.gc() etc,
 460   //   otherwise it's for a failed allocation.
 461   // - if clear_all_soft_refs is true, all soft references should be
 462   //   cleared during the GC.
 463   // - it returns false if it is unable to do the collection due to the
 464   //   GC locker being active, true otherwise.
 465   bool do_full_collection(bool explicit_gc,
 466                           bool clear_all_soft_refs);
 467 
 468   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 469   virtual void do_full_collection(bool clear_all_soft_refs);
 470 



 471   // Callback from VM_G1CollectForAllocation operation.
 472   // This function does everything necessary/possible to satisfy a
 473   // failed allocation request (including collection, expansion, etc.)
 474   HeapWord* satisfy_failed_allocation(size_t word_size,
 475                                       bool* succeeded);
 476   // Internal helpers used during full GC to split it up to
 477   // increase readability.
 478   void abort_concurrent_cycle();
 479   void verify_before_full_collection(bool explicit_gc);
 480   void prepare_heap_for_full_collection();
 481   void prepare_heap_for_mutators();
 482   void abort_refinement();
 483   void verify_after_full_collection();
 484   void print_heap_after_full_collection(G1HeapTransition* heap_transition);
 485 
 486   // Helper method for satisfy_failed_allocation()
 487   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 488                                              bool do_gc,
 489                                              bool clear_all_soft_refs,
 490                                              bool expect_null_mutator_alloc_region,


 508   // during GC into global variables.
 509   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 510 public:
 511   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 512 
 513   WorkGang* workers() const { return _workers; }
 514 
 515   G1Allocator* allocator() {
 516     return _allocator;
 517   }
 518 
 519   G1HeapVerifier* verifier() {
 520     return _verifier;
 521   }
 522 
 523   G1MonitoringSupport* g1mm() {
 524     assert(_g1mm != NULL, "should have been initialized");
 525     return _g1mm;
 526   }
 527 
 528   void resize_heap_if_necessary();
 529   
 530   // Expand the garbage-first heap by at least the given size (in bytes!).
 531   // Returns true if the heap was expanded by the requested amount;
 532   // false otherwise.
 533   // (Rounds up to a HeapRegion boundary.)
 534   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 535 
 536   // Returns the PLAB statistics for a given destination.
 537   inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
 538 
 539   // Determines PLAB size for a given destination.
 540   inline size_t desired_plab_sz(InCSetState dest);
 541 
 542   // Do anything common to GC's.
 543   void gc_prologue(bool full);
 544   void gc_epilogue(bool full);
 545 
 546   // Does the given region fulfill remembered set based eager reclaim candidate requirements?
 547   bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
 548 
 549   // Modify the reclaim candidate set and test for presence.


< prev index next >