323 volatile unsigned _gc_time_stamp;
324
325 size_t* _surviving_young_words;
326
327 G1HRPrinter _hr_printer;
328
329 void setup_surviving_young_words();
330 void update_surviving_young_words(size_t* surv_young_words);
331 void cleanup_surviving_young_words();
332
333 // It decides whether an explicit GC should start a concurrent cycle
334 // instead of doing a STW GC. Currently, a concurrent cycle is
335 // explicitly started if:
336 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
337 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
338 // (c) cause == _g1_humongous_allocation
339 bool should_do_concurrent_full_gc(GCCause::Cause cause);
340
341 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
342 // concurrent cycles) we have started.
343 volatile unsigned int _old_marking_cycles_started;
344
345 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
346 // concurrent cycles) we have completed.
347 volatile unsigned int _old_marking_cycles_completed;
348
349 bool _concurrent_cycle_started;
350 bool _heap_summary_sent;
351
352 // This is a non-product method that is helpful for testing. It is
353 // called at the end of a GC and artificially expands the heap by
354 // allocating a number of dead regions. This way we can induce very
355 // frequent marking cycles and stress the cleanup / concurrent
356 // cleanup code more (as all the regions that will be allocated by
357 // this method will be found dead by the marking cycle).
358 void allocate_dummy_regions() PRODUCT_RETURN;
359
360 // Clear RSets after a compaction. It also resets the GC time stamps.
361 void clear_rsets_post_compaction();
362
363 // If the HR printer is active, dump the state of the regions in the
364 // heap after a compaction.
365 void print_hrm_post_compaction();
366
367 double verify(bool guard, const char* msg);
495 // will satisfy them with a special path.
496
497 virtual HeapWord* allocate_new_tlab(size_t word_size);
498
499 virtual HeapWord* mem_allocate(size_t word_size,
500 bool* gc_overhead_limit_was_exceeded);
501
502 // The following three methods take a gc_count_before_ret
503 // parameter which is used to return the GC count if the method
504 // returns NULL. Given that we are required to read the GC count
505 // while holding the Heap_lock, and these paths will take the
506 // Heap_lock at some point, it's easier to get them to read the GC
507 // count while holding the Heap_lock before they return NULL instead
508 // of the caller (namely: mem_allocate()) having to also take the
509 // Heap_lock just to read the GC count.
510
511 // First-level mutator allocation attempt: try to allocate out of
512 // the mutator alloc region without taking the Heap_lock. This
513 // should only be used for non-humongous allocations.
514 inline HeapWord* attempt_allocation(size_t word_size,
515 unsigned int* gc_count_before_ret,
516 int* gclocker_retry_count_ret);
517
518 // Second-level mutator allocation attempt: take the Heap_lock and
519 // retry the allocation attempt, potentially scheduling a GC
520 // pause. This should only be used for non-humongous allocations.
521 HeapWord* attempt_allocation_slow(size_t word_size,
522 AllocationContext_t context,
523 unsigned int* gc_count_before_ret,
524 int* gclocker_retry_count_ret);
525
526 // Takes the Heap_lock and attempts a humongous allocation. It can
527 // potentially schedule a GC pause.
528 HeapWord* attempt_allocation_humongous(size_t word_size,
529 unsigned int* gc_count_before_ret,
530 int* gclocker_retry_count_ret);
531
532 // Allocation attempt that should be called during safepoints (e.g.,
533 // at the end of a successful GC). expect_null_mutator_alloc_region
534 // specifies whether the mutator alloc region is expected to be NULL
535 // or not.
536 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
537 AllocationContext_t context,
538 bool expect_null_mutator_alloc_region);
539
540 // It dirties the cards that cover the block so that so that the post
541 // write barrier never queues anything when updating objects on this
542 // block. It is assumed (and in fact we assert) that the block
543 // belongs to a young region.
544 inline void dirty_young_block(HeapWord* start, size_t word_size);
545
546 // Allocate blocks during garbage collection. Will ensure an
547 // allocation region, either by picking one or expanding the
548 // heap, and then allocate a block of the given size. The block
549 // may not be a humongous - it must fit into a single heap region.
550 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
699 // This is called at the start of either a concurrent cycle or a Full
700 // GC to update the number of old marking cycles started.
701 void increment_old_marking_cycles_started();
702
703 // This is called at the end of either a concurrent cycle or a Full
704 // GC to update the number of old marking cycles completed. Those two
705 // can happen in a nested fashion, i.e., we start a concurrent
706 // cycle, a Full GC happens half-way through it which ends first,
707 // and then the cycle notices that a Full GC happened and ends
708 // too. The concurrent parameter is a boolean to help us do a bit
709 // tighter consistency checking in the method. If concurrent is
710 // false, the caller is the inner caller in the nesting (i.e., the
711 // Full GC). If concurrent is true, the caller is the outer caller
712 // in this nesting (i.e., the concurrent cycle). Further nesting is
713 // not currently supported. The end of this call also notifies
714 // the FullGCCount_lock in case a Java thread is waiting for a full
715 // GC to happen (e.g., it called System.gc() with
716 // +ExplicitGCInvokesConcurrent).
717 void increment_old_marking_cycles_completed(bool concurrent);
718
719 unsigned int old_marking_cycles_completed() {
720 return _old_marking_cycles_completed;
721 }
722
723 void register_concurrent_cycle_start(const Ticks& start_time);
724 void register_concurrent_cycle_end();
725 void trace_heap_after_concurrent_cycle();
726
727 G1YCType yc_type();
728
729 G1HRPrinter* hr_printer() { return &_hr_printer; }
730
731 // Frees a non-humongous region by initializing its contents and
732 // adding it to the free list that's passed as a parameter (this is
733 // usually a local list which will be appended to the master free
734 // list later). The used bytes of freed regions are accumulated in
735 // pre_used. If par is true, the region's RSet will not be freed
736 // up. The assumption is that this will be done later.
737 // The locked parameter indicates if the caller has already taken
738 // care of proper synchronization. This may allow some optimizations.
739 void free_region(HeapRegion* hr,
758 virtual void shrink(size_t expand_bytes);
759 void shrink_helper(size_t expand_bytes);
760
761 #if TASKQUEUE_STATS
762 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
763 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
764 void reset_taskqueue_stats();
765 #endif // TASKQUEUE_STATS
766
767 // Schedule the VM operation that will do an evacuation pause to
768 // satisfy an allocation request of word_size. *succeeded will
769 // return whether the VM operation was successful (it did do an
770 // evacuation pause) or not (another thread beat us to it or the GC
771 // locker was active). Given that we should not be holding the
772 // Heap_lock when we enter this method, we will pass the
773 // gc_count_before (i.e., total_collections()) as a parameter since
774 // it has to be read while holding the Heap_lock. Currently, both
775 // methods that call do_collection_pause() release the Heap_lock
776 // before the call, so it's easy to read gc_count_before just before.
777 HeapWord* do_collection_pause(size_t word_size,
778 unsigned int gc_count_before,
779 bool* succeeded,
780 GCCause::Cause gc_cause);
781
782 // The guts of the incremental collection pause, executed by the vm
783 // thread. It returns false if it is unable to do the collection due
784 // to the GC locker being active, true otherwise
785 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
786
787 // Actually do the work of evacuating the collection set.
788 void evacuate_collection_set(EvacuationInfo& evacuation_info);
789
790 // The g1 remembered set of the heap.
791 G1RemSet* _g1_rem_set;
792
793 // A set of cards that cover the objects for which the Rsets should be updated
794 // concurrently after the collection.
795 DirtyCardQueueSet _dirty_card_queue_set;
796
797 // The closure used to refine a single card.
798 RefineCardTableEntryClosure* _refine_cte_cl;
994
995 // The (concurrent marking) reference processor...
996 ReferenceProcessor* _ref_processor_cm;
997
998 // Instance of the concurrent mark is_alive closure for embedding
999 // into the Concurrent Marking reference processor as the
1000 // _is_alive_non_header field. Supplying a value for the
1001 // _is_alive_non_header field is optional but doing so prevents
1002 // unnecessary additions to the discovered lists during reference
1003 // discovery.
1004 G1CMIsAliveClosure _is_alive_closure_cm;
1005
1006 // Cache used by G1CollectedHeap::start_cset_region_for_worker().
1007 HeapRegion** _worker_cset_start_region;
1008
1009 // Time stamp to validate the regions recorded in the cache
1010 // used by G1CollectedHeap::start_cset_region_for_worker().
1011 // The heap region entry for a given worker is valid iff
1012 // the associated time stamp value matches the current value
1013 // of G1CollectedHeap::_gc_time_stamp.
1014 unsigned int* _worker_cset_start_region_time_stamp;
1015
1016 enum G1H_process_roots_tasks {
1017 G1H_PS_filter_satb_buffers,
1018 G1H_PS_refProcessor_oops_do,
1019 // Leave this one last.
1020 G1H_PS_NumElements
1021 };
1022
1023 SubTasksDone* _process_strong_tasks;
1024
1025 volatile bool _free_regions_coming;
1026
1027 public:
1028
1029 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
1030
1031 void set_refine_cte_cl_concurrency(bool concurrent);
1032
1033 RefToScanQueue *task_queue(int i) const;
1034
|
323 volatile unsigned _gc_time_stamp;
324
325 size_t* _surviving_young_words;
326
327 G1HRPrinter _hr_printer;
328
329 void setup_surviving_young_words();
330 void update_surviving_young_words(size_t* surv_young_words);
331 void cleanup_surviving_young_words();
332
333 // It decides whether an explicit GC should start a concurrent cycle
334 // instead of doing a STW GC. Currently, a concurrent cycle is
335 // explicitly started if:
336 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
337 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
338 // (c) cause == _g1_humongous_allocation
339 bool should_do_concurrent_full_gc(GCCause::Cause cause);
340
341 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
342 // concurrent cycles) we have started.
343 volatile uint _old_marking_cycles_started;
344
345 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
346 // concurrent cycles) we have completed.
347 volatile uint _old_marking_cycles_completed;
348
349 bool _concurrent_cycle_started;
350 bool _heap_summary_sent;
351
352 // This is a non-product method that is helpful for testing. It is
353 // called at the end of a GC and artificially expands the heap by
354 // allocating a number of dead regions. This way we can induce very
355 // frequent marking cycles and stress the cleanup / concurrent
356 // cleanup code more (as all the regions that will be allocated by
357 // this method will be found dead by the marking cycle).
358 void allocate_dummy_regions() PRODUCT_RETURN;
359
360 // Clear RSets after a compaction. It also resets the GC time stamps.
361 void clear_rsets_post_compaction();
362
363 // If the HR printer is active, dump the state of the regions in the
364 // heap after a compaction.
365 void print_hrm_post_compaction();
366
367 double verify(bool guard, const char* msg);
495 // will satisfy them with a special path.
496
497 virtual HeapWord* allocate_new_tlab(size_t word_size);
498
499 virtual HeapWord* mem_allocate(size_t word_size,
500 bool* gc_overhead_limit_was_exceeded);
501
502 // The following three methods take a gc_count_before_ret
503 // parameter which is used to return the GC count if the method
504 // returns NULL. Given that we are required to read the GC count
505 // while holding the Heap_lock, and these paths will take the
506 // Heap_lock at some point, it's easier to get them to read the GC
507 // count while holding the Heap_lock before they return NULL instead
508 // of the caller (namely: mem_allocate()) having to also take the
509 // Heap_lock just to read the GC count.
510
511 // First-level mutator allocation attempt: try to allocate out of
512 // the mutator alloc region without taking the Heap_lock. This
513 // should only be used for non-humongous allocations.
514 inline HeapWord* attempt_allocation(size_t word_size,
515 uint* gc_count_before_ret,
516 uint* gclocker_retry_count_ret);
517
518 // Second-level mutator allocation attempt: take the Heap_lock and
519 // retry the allocation attempt, potentially scheduling a GC
520 // pause. This should only be used for non-humongous allocations.
521 HeapWord* attempt_allocation_slow(size_t word_size,
522 AllocationContext_t context,
523 uint* gc_count_before_ret,
524 uint* gclocker_retry_count_ret);
525
526 // Takes the Heap_lock and attempts a humongous allocation. It can
527 // potentially schedule a GC pause.
528 HeapWord* attempt_allocation_humongous(size_t word_size,
529 uint* gc_count_before_ret,
530 uint* gclocker_retry_count_ret);
531
532 // Allocation attempt that should be called during safepoints (e.g.,
533 // at the end of a successful GC). expect_null_mutator_alloc_region
534 // specifies whether the mutator alloc region is expected to be NULL
535 // or not.
536 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
537 AllocationContext_t context,
538 bool expect_null_mutator_alloc_region);
539
540 // It dirties the cards that cover the block so that so that the post
541 // write barrier never queues anything when updating objects on this
542 // block. It is assumed (and in fact we assert) that the block
543 // belongs to a young region.
544 inline void dirty_young_block(HeapWord* start, size_t word_size);
545
546 // Allocate blocks during garbage collection. Will ensure an
547 // allocation region, either by picking one or expanding the
548 // heap, and then allocate a block of the given size. The block
549 // may not be a humongous - it must fit into a single heap region.
550 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
699 // This is called at the start of either a concurrent cycle or a Full
700 // GC to update the number of old marking cycles started.
701 void increment_old_marking_cycles_started();
702
703 // This is called at the end of either a concurrent cycle or a Full
704 // GC to update the number of old marking cycles completed. Those two
705 // can happen in a nested fashion, i.e., we start a concurrent
706 // cycle, a Full GC happens half-way through it which ends first,
707 // and then the cycle notices that a Full GC happened and ends
708 // too. The concurrent parameter is a boolean to help us do a bit
709 // tighter consistency checking in the method. If concurrent is
710 // false, the caller is the inner caller in the nesting (i.e., the
711 // Full GC). If concurrent is true, the caller is the outer caller
712 // in this nesting (i.e., the concurrent cycle). Further nesting is
713 // not currently supported. The end of this call also notifies
714 // the FullGCCount_lock in case a Java thread is waiting for a full
715 // GC to happen (e.g., it called System.gc() with
716 // +ExplicitGCInvokesConcurrent).
717 void increment_old_marking_cycles_completed(bool concurrent);
718
719 uint old_marking_cycles_completed() {
720 return _old_marking_cycles_completed;
721 }
722
723 void register_concurrent_cycle_start(const Ticks& start_time);
724 void register_concurrent_cycle_end();
725 void trace_heap_after_concurrent_cycle();
726
727 G1YCType yc_type();
728
729 G1HRPrinter* hr_printer() { return &_hr_printer; }
730
731 // Frees a non-humongous region by initializing its contents and
732 // adding it to the free list that's passed as a parameter (this is
733 // usually a local list which will be appended to the master free
734 // list later). The used bytes of freed regions are accumulated in
735 // pre_used. If par is true, the region's RSet will not be freed
736 // up. The assumption is that this will be done later.
737 // The locked parameter indicates if the caller has already taken
738 // care of proper synchronization. This may allow some optimizations.
739 void free_region(HeapRegion* hr,
758 virtual void shrink(size_t expand_bytes);
759 void shrink_helper(size_t expand_bytes);
760
761 #if TASKQUEUE_STATS
762 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
763 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
764 void reset_taskqueue_stats();
765 #endif // TASKQUEUE_STATS
766
767 // Schedule the VM operation that will do an evacuation pause to
768 // satisfy an allocation request of word_size. *succeeded will
769 // return whether the VM operation was successful (it did do an
770 // evacuation pause) or not (another thread beat us to it or the GC
771 // locker was active). Given that we should not be holding the
772 // Heap_lock when we enter this method, we will pass the
773 // gc_count_before (i.e., total_collections()) as a parameter since
774 // it has to be read while holding the Heap_lock. Currently, both
775 // methods that call do_collection_pause() release the Heap_lock
776 // before the call, so it's easy to read gc_count_before just before.
777 HeapWord* do_collection_pause(size_t word_size,
778 uint gc_count_before,
779 bool* succeeded,
780 GCCause::Cause gc_cause);
781
782 // The guts of the incremental collection pause, executed by the vm
783 // thread. It returns false if it is unable to do the collection due
784 // to the GC locker being active, true otherwise
785 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
786
787 // Actually do the work of evacuating the collection set.
788 void evacuate_collection_set(EvacuationInfo& evacuation_info);
789
790 // The g1 remembered set of the heap.
791 G1RemSet* _g1_rem_set;
792
793 // A set of cards that cover the objects for which the Rsets should be updated
794 // concurrently after the collection.
795 DirtyCardQueueSet _dirty_card_queue_set;
796
797 // The closure used to refine a single card.
798 RefineCardTableEntryClosure* _refine_cte_cl;
994
995 // The (concurrent marking) reference processor...
996 ReferenceProcessor* _ref_processor_cm;
997
998 // Instance of the concurrent mark is_alive closure for embedding
999 // into the Concurrent Marking reference processor as the
1000 // _is_alive_non_header field. Supplying a value for the
1001 // _is_alive_non_header field is optional but doing so prevents
1002 // unnecessary additions to the discovered lists during reference
1003 // discovery.
1004 G1CMIsAliveClosure _is_alive_closure_cm;
1005
1006 // Cache used by G1CollectedHeap::start_cset_region_for_worker().
1007 HeapRegion** _worker_cset_start_region;
1008
1009 // Time stamp to validate the regions recorded in the cache
1010 // used by G1CollectedHeap::start_cset_region_for_worker().
1011 // The heap region entry for a given worker is valid iff
1012 // the associated time stamp value matches the current value
1013 // of G1CollectedHeap::_gc_time_stamp.
1014 uint* _worker_cset_start_region_time_stamp;
1015
1016 enum G1H_process_roots_tasks {
1017 G1H_PS_filter_satb_buffers,
1018 G1H_PS_refProcessor_oops_do,
1019 // Leave this one last.
1020 G1H_PS_NumElements
1021 };
1022
1023 SubTasksDone* _process_strong_tasks;
1024
1025 volatile bool _free_regions_coming;
1026
1027 public:
1028
1029 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
1030
1031 void set_refine_cte_cl_concurrency(bool concurrent);
1032
1033 RefToScanQueue *task_queue(int i) const;
1034
|