1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
308 volatile unsigned _gc_time_stamp;
309
310 size_t* _surviving_young_words;
311
312 G1HRPrinter _hr_printer;
313
314 void setup_surviving_young_words();
315 void update_surviving_young_words(size_t* surv_young_words);
316 void cleanup_surviving_young_words();
317
318 // It decides whether an explicit GC should start a concurrent cycle
319 // instead of doing a STW GC. Currently, a concurrent cycle is
320 // explicitly started if:
321 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
322 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
323 // (c) cause == _g1_humongous_allocation
324 bool should_do_concurrent_full_gc(GCCause::Cause cause);
325
326 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
327 // concurrent cycles) we have started.
328 volatile unsigned int _old_marking_cycles_started;
329
330 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
331 // concurrent cycles) we have completed.
332 volatile unsigned int _old_marking_cycles_completed;
333
334 bool _concurrent_cycle_started;
335 bool _heap_summary_sent;
336
337 // This is a non-product method that is helpful for testing. It is
338 // called at the end of a GC and artificially expands the heap by
339 // allocating a number of dead regions. This way we can induce very
340 // frequent marking cycles and stress the cleanup / concurrent
341 // cleanup code more (as all the regions that will be allocated by
342 // this method will be found dead by the marking cycle).
343 void allocate_dummy_regions() PRODUCT_RETURN;
344
345 // Clear RSets after a compaction. It also resets the GC time stamps.
346 void clear_rsets_post_compaction();
347
348 // If the HR printer is active, dump the state of the regions in the
349 // heap after a compaction.
350 void print_hrm_post_compaction();
351
352 double verify(bool guard, const char* msg);
480 // will satisfy them with a special path.
481
482 virtual HeapWord* allocate_new_tlab(size_t word_size);
483
484 virtual HeapWord* mem_allocate(size_t word_size,
485 bool* gc_overhead_limit_was_exceeded);
486
487 // The following three methods take a gc_count_before_ret
488 // parameter which is used to return the GC count if the method
489 // returns NULL. Given that we are required to read the GC count
490 // while holding the Heap_lock, and these paths will take the
491 // Heap_lock at some point, it's easier to get them to read the GC
492 // count while holding the Heap_lock before they return NULL instead
493 // of the caller (namely: mem_allocate()) having to also take the
494 // Heap_lock just to read the GC count.
495
496 // First-level mutator allocation attempt: try to allocate out of
497 // the mutator alloc region without taking the Heap_lock. This
498 // should only be used for non-humongous allocations.
499 inline HeapWord* attempt_allocation(size_t word_size,
500 unsigned int* gc_count_before_ret,
501 int* gclocker_retry_count_ret);
502
503 // Second-level mutator allocation attempt: take the Heap_lock and
504 // retry the allocation attempt, potentially scheduling a GC
505 // pause. This should only be used for non-humongous allocations.
506 HeapWord* attempt_allocation_slow(size_t word_size,
507 AllocationContext_t context,
508 unsigned int* gc_count_before_ret,
509 int* gclocker_retry_count_ret);
510
511 // Takes the Heap_lock and attempts a humongous allocation. It can
512 // potentially schedule a GC pause.
513 HeapWord* attempt_allocation_humongous(size_t word_size,
514 unsigned int* gc_count_before_ret,
515 int* gclocker_retry_count_ret);
516
517 // Allocation attempt that should be called during safepoints (e.g.,
518 // at the end of a successful GC). expect_null_mutator_alloc_region
519 // specifies whether the mutator alloc region is expected to be NULL
520 // or not.
521 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
522 AllocationContext_t context,
523 bool expect_null_mutator_alloc_region);
524
525 // It dirties the cards that cover the block so that so that the post
526 // write barrier never queues anything when updating objects on this
527 // block. It is assumed (and in fact we assert) that the block
528 // belongs to a young region.
529 inline void dirty_young_block(HeapWord* start, size_t word_size);
530
531 // Allocate blocks during garbage collection. Will ensure an
532 // allocation region, either by picking one or expanding the
533 // heap, and then allocate a block of the given size. The block
534 // may not be a humongous - it must fit into a single heap region.
535 inline HeapWord* par_allocate_during_gc(InCSetState dest,
669 // This is called at the start of either a concurrent cycle or a Full
670 // GC to update the number of old marking cycles started.
671 void increment_old_marking_cycles_started();
672
673 // This is called at the end of either a concurrent cycle or a Full
674 // GC to update the number of old marking cycles completed. Those two
675 // can happen in a nested fashion, i.e., we start a concurrent
676 // cycle, a Full GC happens half-way through it which ends first,
677 // and then the cycle notices that a Full GC happened and ends
678 // too. The concurrent parameter is a boolean to help us do a bit
679 // tighter consistency checking in the method. If concurrent is
680 // false, the caller is the inner caller in the nesting (i.e., the
681 // Full GC). If concurrent is true, the caller is the outer caller
682 // in this nesting (i.e., the concurrent cycle). Further nesting is
683 // not currently supported. The end of this call also notifies
684 // the FullGCCount_lock in case a Java thread is waiting for a full
685 // GC to happen (e.g., it called System.gc() with
686 // +ExplicitGCInvokesConcurrent).
687 void increment_old_marking_cycles_completed(bool concurrent);
688
689 unsigned int old_marking_cycles_completed() {
690 return _old_marking_cycles_completed;
691 }
692
693 void register_concurrent_cycle_start(const Ticks& start_time);
694 void register_concurrent_cycle_end();
695 void trace_heap_after_concurrent_cycle();
696
697 G1YCType yc_type();
698
699 G1HRPrinter* hr_printer() { return &_hr_printer; }
700
701 // Frees a non-humongous region by initializing its contents and
702 // adding it to the free list that's passed as a parameter (this is
703 // usually a local list which will be appended to the master free
704 // list later). The used bytes of freed regions are accumulated in
705 // pre_used. If par is true, the region's RSet will not be freed
706 // up. The assumption is that this will be done later.
707 // The locked parameter indicates if the caller has already taken
708 // care of proper synchronization. This may allow some optimizations.
709 void free_region(HeapRegion* hr,
728 virtual void shrink(size_t expand_bytes);
729 void shrink_helper(size_t expand_bytes);
730
731 #if TASKQUEUE_STATS
732 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
733 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
734 void reset_taskqueue_stats();
735 #endif // TASKQUEUE_STATS
736
737 // Schedule the VM operation that will do an evacuation pause to
738 // satisfy an allocation request of word_size. *succeeded will
739 // return whether the VM operation was successful (it did do an
740 // evacuation pause) or not (another thread beat us to it or the GC
741 // locker was active). Given that we should not be holding the
742 // Heap_lock when we enter this method, we will pass the
743 // gc_count_before (i.e., total_collections()) as a parameter since
744 // it has to be read while holding the Heap_lock. Currently, both
745 // methods that call do_collection_pause() release the Heap_lock
746 // before the call, so it's easy to read gc_count_before just before.
747 HeapWord* do_collection_pause(size_t word_size,
748 unsigned int gc_count_before,
749 bool* succeeded,
750 GCCause::Cause gc_cause);
751
752 // The guts of the incremental collection pause, executed by the vm
753 // thread. It returns false if it is unable to do the collection due
754 // to the GC locker being active, true otherwise
755 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
756
757 // Actually do the work of evacuating the collection set.
758 void evacuate_collection_set(EvacuationInfo& evacuation_info);
759
760 // The g1 remembered set of the heap.
761 G1RemSet* _g1_rem_set;
762
763 // A set of cards that cover the objects for which the Rsets should be updated
764 // concurrently after the collection.
765 DirtyCardQueueSet _dirty_card_queue_set;
766
767 // The closure used to refine a single card.
768 RefineCardTableEntryClosure* _refine_cte_cl;
964
965 // The (concurrent marking) reference processor...
966 ReferenceProcessor* _ref_processor_cm;
967
968 // Instance of the concurrent mark is_alive closure for embedding
969 // into the Concurrent Marking reference processor as the
970 // _is_alive_non_header field. Supplying a value for the
971 // _is_alive_non_header field is optional but doing so prevents
972 // unnecessary additions to the discovered lists during reference
973 // discovery.
974 G1CMIsAliveClosure _is_alive_closure_cm;
975
976 // Cache used by G1CollectedHeap::start_cset_region_for_worker().
977 HeapRegion** _worker_cset_start_region;
978
979 // Time stamp to validate the regions recorded in the cache
980 // used by G1CollectedHeap::start_cset_region_for_worker().
981 // The heap region entry for a given worker is valid iff
982 // the associated time stamp value matches the current value
983 // of G1CollectedHeap::_gc_time_stamp.
984 unsigned int* _worker_cset_start_region_time_stamp;
985
986 enum G1H_process_roots_tasks {
987 G1H_PS_filter_satb_buffers,
988 G1H_PS_refProcessor_oops_do,
989 // Leave this one last.
990 G1H_PS_NumElements
991 };
992
993 SubTasksDone* _process_strong_tasks;
994
995 volatile bool _free_regions_coming;
996
997 public:
998
999 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
1000
1001 void set_refine_cte_cl_concurrency(bool concurrent);
1002
1003 RefToScanQueue *task_queue(int i) const;
1004
|
1 /*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
308 volatile unsigned _gc_time_stamp;
309
310 size_t* _surviving_young_words;
311
312 G1HRPrinter _hr_printer;
313
314 void setup_surviving_young_words();
315 void update_surviving_young_words(size_t* surv_young_words);
316 void cleanup_surviving_young_words();
317
318 // It decides whether an explicit GC should start a concurrent cycle
319 // instead of doing a STW GC. Currently, a concurrent cycle is
320 // explicitly started if:
321 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
322 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
323 // (c) cause == _g1_humongous_allocation
324 bool should_do_concurrent_full_gc(GCCause::Cause cause);
325
326 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
327 // concurrent cycles) we have started.
328 volatile uint _old_marking_cycles_started;
329
330 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
331 // concurrent cycles) we have completed.
332 volatile uint _old_marking_cycles_completed;
333
334 bool _concurrent_cycle_started;
335 bool _heap_summary_sent;
336
337 // This is a non-product method that is helpful for testing. It is
338 // called at the end of a GC and artificially expands the heap by
339 // allocating a number of dead regions. This way we can induce very
340 // frequent marking cycles and stress the cleanup / concurrent
341 // cleanup code more (as all the regions that will be allocated by
342 // this method will be found dead by the marking cycle).
343 void allocate_dummy_regions() PRODUCT_RETURN;
344
345 // Clear RSets after a compaction. It also resets the GC time stamps.
346 void clear_rsets_post_compaction();
347
348 // If the HR printer is active, dump the state of the regions in the
349 // heap after a compaction.
350 void print_hrm_post_compaction();
351
352 double verify(bool guard, const char* msg);
480 // will satisfy them with a special path.
481
482 virtual HeapWord* allocate_new_tlab(size_t word_size);
483
484 virtual HeapWord* mem_allocate(size_t word_size,
485 bool* gc_overhead_limit_was_exceeded);
486
487 // The following three methods take a gc_count_before_ret
488 // parameter which is used to return the GC count if the method
489 // returns NULL. Given that we are required to read the GC count
490 // while holding the Heap_lock, and these paths will take the
491 // Heap_lock at some point, it's easier to get them to read the GC
492 // count while holding the Heap_lock before they return NULL instead
493 // of the caller (namely: mem_allocate()) having to also take the
494 // Heap_lock just to read the GC count.
495
496 // First-level mutator allocation attempt: try to allocate out of
497 // the mutator alloc region without taking the Heap_lock. This
498 // should only be used for non-humongous allocations.
499 inline HeapWord* attempt_allocation(size_t word_size,
500 uint* gc_count_before_ret,
501 uint* gclocker_retry_count_ret);
502
503 // Second-level mutator allocation attempt: take the Heap_lock and
504 // retry the allocation attempt, potentially scheduling a GC
505 // pause. This should only be used for non-humongous allocations.
506 HeapWord* attempt_allocation_slow(size_t word_size,
507 AllocationContext_t context,
508 uint* gc_count_before_ret,
509 uint* gclocker_retry_count_ret);
510
511 // Takes the Heap_lock and attempts a humongous allocation. It can
512 // potentially schedule a GC pause.
513 HeapWord* attempt_allocation_humongous(size_t word_size,
514 uint* gc_count_before_ret,
515 uint* gclocker_retry_count_ret);
516
517 // Allocation attempt that should be called during safepoints (e.g.,
518 // at the end of a successful GC). expect_null_mutator_alloc_region
519 // specifies whether the mutator alloc region is expected to be NULL
520 // or not.
521 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
522 AllocationContext_t context,
523 bool expect_null_mutator_alloc_region);
524
525 // It dirties the cards that cover the block so that so that the post
526 // write barrier never queues anything when updating objects on this
527 // block. It is assumed (and in fact we assert) that the block
528 // belongs to a young region.
529 inline void dirty_young_block(HeapWord* start, size_t word_size);
530
531 // Allocate blocks during garbage collection. Will ensure an
532 // allocation region, either by picking one or expanding the
533 // heap, and then allocate a block of the given size. The block
534 // may not be a humongous - it must fit into a single heap region.
535 inline HeapWord* par_allocate_during_gc(InCSetState dest,
669 // This is called at the start of either a concurrent cycle or a Full
670 // GC to update the number of old marking cycles started.
671 void increment_old_marking_cycles_started();
672
673 // This is called at the end of either a concurrent cycle or a Full
674 // GC to update the number of old marking cycles completed. Those two
675 // can happen in a nested fashion, i.e., we start a concurrent
676 // cycle, a Full GC happens half-way through it which ends first,
677 // and then the cycle notices that a Full GC happened and ends
678 // too. The concurrent parameter is a boolean to help us do a bit
679 // tighter consistency checking in the method. If concurrent is
680 // false, the caller is the inner caller in the nesting (i.e., the
681 // Full GC). If concurrent is true, the caller is the outer caller
682 // in this nesting (i.e., the concurrent cycle). Further nesting is
683 // not currently supported. The end of this call also notifies
684 // the FullGCCount_lock in case a Java thread is waiting for a full
685 // GC to happen (e.g., it called System.gc() with
686 // +ExplicitGCInvokesConcurrent).
687 void increment_old_marking_cycles_completed(bool concurrent);
688
689 uint old_marking_cycles_completed() {
690 return _old_marking_cycles_completed;
691 }
692
693 void register_concurrent_cycle_start(const Ticks& start_time);
694 void register_concurrent_cycle_end();
695 void trace_heap_after_concurrent_cycle();
696
697 G1YCType yc_type();
698
699 G1HRPrinter* hr_printer() { return &_hr_printer; }
700
701 // Frees a non-humongous region by initializing its contents and
702 // adding it to the free list that's passed as a parameter (this is
703 // usually a local list which will be appended to the master free
704 // list later). The used bytes of freed regions are accumulated in
705 // pre_used. If par is true, the region's RSet will not be freed
706 // up. The assumption is that this will be done later.
707 // The locked parameter indicates if the caller has already taken
708 // care of proper synchronization. This may allow some optimizations.
709 void free_region(HeapRegion* hr,
728 virtual void shrink(size_t expand_bytes);
729 void shrink_helper(size_t expand_bytes);
730
731 #if TASKQUEUE_STATS
732 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
733 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
734 void reset_taskqueue_stats();
735 #endif // TASKQUEUE_STATS
736
737 // Schedule the VM operation that will do an evacuation pause to
738 // satisfy an allocation request of word_size. *succeeded will
739 // return whether the VM operation was successful (it did do an
740 // evacuation pause) or not (another thread beat us to it or the GC
741 // locker was active). Given that we should not be holding the
742 // Heap_lock when we enter this method, we will pass the
743 // gc_count_before (i.e., total_collections()) as a parameter since
744 // it has to be read while holding the Heap_lock. Currently, both
745 // methods that call do_collection_pause() release the Heap_lock
746 // before the call, so it's easy to read gc_count_before just before.
747 HeapWord* do_collection_pause(size_t word_size,
748 uint gc_count_before,
749 bool* succeeded,
750 GCCause::Cause gc_cause);
751
752 // The guts of the incremental collection pause, executed by the vm
753 // thread. It returns false if it is unable to do the collection due
754 // to the GC locker being active, true otherwise
755 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
756
757 // Actually do the work of evacuating the collection set.
758 void evacuate_collection_set(EvacuationInfo& evacuation_info);
759
760 // The g1 remembered set of the heap.
761 G1RemSet* _g1_rem_set;
762
763 // A set of cards that cover the objects for which the Rsets should be updated
764 // concurrently after the collection.
765 DirtyCardQueueSet _dirty_card_queue_set;
766
767 // The closure used to refine a single card.
768 RefineCardTableEntryClosure* _refine_cte_cl;
964
965 // The (concurrent marking) reference processor...
966 ReferenceProcessor* _ref_processor_cm;
967
968 // Instance of the concurrent mark is_alive closure for embedding
969 // into the Concurrent Marking reference processor as the
970 // _is_alive_non_header field. Supplying a value for the
971 // _is_alive_non_header field is optional but doing so prevents
972 // unnecessary additions to the discovered lists during reference
973 // discovery.
974 G1CMIsAliveClosure _is_alive_closure_cm;
975
976 // Cache used by G1CollectedHeap::start_cset_region_for_worker().
977 HeapRegion** _worker_cset_start_region;
978
979 // Time stamp to validate the regions recorded in the cache
980 // used by G1CollectedHeap::start_cset_region_for_worker().
981 // The heap region entry for a given worker is valid iff
982 // the associated time stamp value matches the current value
983 // of G1CollectedHeap::_gc_time_stamp.
984 uint* _worker_cset_start_region_time_stamp;
985
986 enum G1H_process_roots_tasks {
987 G1H_PS_filter_satb_buffers,
988 G1H_PS_refProcessor_oops_do,
989 // Leave this one last.
990 G1H_PS_NumElements
991 };
992
993 SubTasksDone* _process_strong_tasks;
994
995 volatile bool _free_regions_coming;
996
997 public:
998
999 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
1000
1001 void set_refine_cte_cl_concurrency(bool concurrent);
1002
1003 RefToScanQueue *task_queue(int i) const;
1004
|