30 #include "classfile/systemDictionary.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/codeCache.hpp"
33 #include "code/icBuffer.hpp"
34 #include "gc/serial/defNewGeneration.hpp"
35 #include "gc/shared/adaptiveSizePolicy.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/cardTableRS.hpp"
38 #include "gc/shared/collectedHeap.inline.hpp"
39 #include "gc/shared/collectorCounters.hpp"
40 #include "gc/shared/gcId.hpp"
41 #include "gc/shared/gcLocker.hpp"
42 #include "gc/shared/gcPolicyCounters.hpp"
43 #include "gc/shared/gcTrace.hpp"
44 #include "gc/shared/gcTraceTime.inline.hpp"
45 #include "gc/shared/gcVMOperations.hpp"
46 #include "gc/shared/genCollectedHeap.hpp"
47 #include "gc/shared/genOopClosures.inline.hpp"
48 #include "gc/shared/generationSpec.hpp"
49 #include "gc/shared/oopStorageParState.inline.hpp"
50 #include "gc/shared/space.hpp"
51 #include "gc/shared/strongRootsScope.hpp"
52 #include "gc/shared/weakProcessor.hpp"
53 #include "gc/shared/workgroup.hpp"
54 #include "memory/filemap.hpp"
55 #include "memory/metaspaceCounters.hpp"
56 #include "memory/resourceArea.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "runtime/biasedLocking.hpp"
59 #include "runtime/flags/flagSetting.hpp"
60 #include "runtime/handles.hpp"
61 #include "runtime/handles.inline.hpp"
62 #include "runtime/java.hpp"
63 #include "runtime/vmThread.hpp"
64 #include "services/management.hpp"
65 #include "services/memoryService.hpp"
66 #include "utilities/debug.hpp"
67 #include "utilities/formatBuffer.hpp"
68 #include "utilities/macros.hpp"
69 #include "utilities/stack.inline.hpp"
158 if (total_reserved < _young_gen_spec->max_size()) {
159 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
160 "the maximum representable size");
161 }
162 assert(total_reserved % alignment == 0,
163 "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
164 SIZE_FORMAT, total_reserved, alignment);
165
166 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
167
168 os::trace_page_sizes("Heap",
169 collector_policy()->min_heap_byte_size(),
170 total_reserved,
171 alignment,
172 heap_rs->base(),
173 heap_rs->size());
174
175 return heap_rs->base();
176 }
177
178 void GenCollectedHeap::post_initialize() {
179 CollectedHeap::post_initialize();
180 ref_processing_init();
181
182 DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
183
184 initialize_size_policy(def_new_gen->eden()->capacity(),
185 _old_gen->capacity(),
186 def_new_gen->from()->capacity());
187
188 MarkSweep::initialize();
189 }
190
191 void GenCollectedHeap::ref_processing_init() {
192 _young_gen->ref_processor_init();
193 _old_gen->ref_processor_init();
194 }
195
196 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
197 return _young_gen_spec;
198 }
199
200 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
201 return _old_gen_spec;
202 }
203
204 size_t GenCollectedHeap::capacity() const {
205 return _young_gen->capacity() + _old_gen->capacity();
206 }
207
208 size_t GenCollectedHeap::used() const {
668 MetaspaceUtils::print_metaspace_change(metadata_prev_used);
669
670 // Track memory usage and detect low memory after GC finishes
671 MemoryService::track_memory_usage();
672
673 gc_epilogue(complete);
674
675 if (must_restore_marks_for_biased_locking) {
676 BiasedLocking::restore_marks();
677 }
678 }
679
680 print_heap_after_gc();
681
682 #ifdef TRACESPINNING
683 ParallelTaskTerminator::print_termination_counts();
684 #endif
685 }
686
687 void GenCollectedHeap::register_nmethod(nmethod* nm) {
688 CodeCache::register_scavenge_root_nmethod(nm);
689 }
690
691 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
692 CodeCache::verify_scavenge_root_nmethod(nm);
693 }
694
695 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
696 GCCauseSetter x(this, GCCause::_allocation_failure);
697 HeapWord* result = NULL;
698
699 assert(size != 0, "Precondition violated");
700 if (GCLocker::is_active_and_needs_gc()) {
701 // GC locker is active; instead of a collection we will attempt
702 // to expand the heap, if there's room for expansion.
703 if (!is_maximal_no_gc()) {
704 result = expand_heap_and_allocate(size, is_tlab);
705 }
706 return result; // Could be null if we are out of space.
707 } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
708 // Do an incremental collection.
709 do_collection(false, // full
710 false, // clear_all_soft_refs
711 size, // size
712 is_tlab, // is_tlab
816 }
817 if (_process_strong_tasks->try_claim_task(GCH_PS_Management_oops_do)) {
818 Management::oops_do(strong_roots);
819 }
820 if (_process_strong_tasks->try_claim_task(GCH_PS_jvmti_oops_do)) {
821 JvmtiExport::oops_do(strong_roots);
822 }
823 if (UseAOT && _process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
824 AOTLoader::oops_do(strong_roots);
825 }
826
827 if (_process_strong_tasks->try_claim_task(GCH_PS_SystemDictionary_oops_do)) {
828 SystemDictionary::oops_do(strong_roots);
829 }
830
831 if (_process_strong_tasks->try_claim_task(GCH_PS_CodeCache_oops_do)) {
832 if (so & SO_ScavengeCodeCache) {
833 assert(code_roots != NULL, "must supply closure for code cache");
834
835 // We only visit parts of the CodeCache when scavenging.
836 CodeCache::scavenge_root_nmethods_do(code_roots);
837 }
838 if (so & SO_AllCodeCache) {
839 assert(code_roots != NULL, "must supply closure for code cache");
840
841 // CMSCollector uses this to do intermediate-strength collections.
842 // We scan the entire code cache, since CodeCache::do_unloading is not called.
843 CodeCache::blobs_do(code_roots);
844 }
845 // Verify that the code cache contents are not subject to
846 // movement by a scavenging collection.
847 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
848 DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
849 }
850 }
851
852 void GenCollectedHeap::young_process_roots(StrongRootsScope* scope,
853 OopsInGenClosure* root_closure,
854 OopsInGenClosure* old_gen_closure,
855 CLDClosure* cld_closure) {
856 MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);
857
858 process_roots(scope, SO_ScavengeCodeCache, root_closure,
859 cld_closure, cld_closure, &mark_code_closure);
860
861 if (_process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {
862 root_closure->reset_generation();
863 }
864
865 // When collection is parallel, all threads get to cooperate to do
866 // old generation scanning.
867 old_gen_closure->set_generation(_old_gen);
868 rem_set()->younger_refs_iterate(_old_gen, old_gen_closure, scope->n_threads());
|
30 #include "classfile/systemDictionary.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/codeCache.hpp"
33 #include "code/icBuffer.hpp"
34 #include "gc/serial/defNewGeneration.hpp"
35 #include "gc/shared/adaptiveSizePolicy.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/cardTableRS.hpp"
38 #include "gc/shared/collectedHeap.inline.hpp"
39 #include "gc/shared/collectorCounters.hpp"
40 #include "gc/shared/gcId.hpp"
41 #include "gc/shared/gcLocker.hpp"
42 #include "gc/shared/gcPolicyCounters.hpp"
43 #include "gc/shared/gcTrace.hpp"
44 #include "gc/shared/gcTraceTime.inline.hpp"
45 #include "gc/shared/gcVMOperations.hpp"
46 #include "gc/shared/genCollectedHeap.hpp"
47 #include "gc/shared/genOopClosures.inline.hpp"
48 #include "gc/shared/generationSpec.hpp"
49 #include "gc/shared/oopStorageParState.inline.hpp"
50 #include "gc/shared/scavengableNMethods.hpp"
51 #include "gc/shared/space.hpp"
52 #include "gc/shared/strongRootsScope.hpp"
53 #include "gc/shared/weakProcessor.hpp"
54 #include "gc/shared/workgroup.hpp"
55 #include "memory/filemap.hpp"
56 #include "memory/metaspaceCounters.hpp"
57 #include "memory/resourceArea.hpp"
58 #include "oops/oop.inline.hpp"
59 #include "runtime/biasedLocking.hpp"
60 #include "runtime/flags/flagSetting.hpp"
61 #include "runtime/handles.hpp"
62 #include "runtime/handles.inline.hpp"
63 #include "runtime/java.hpp"
64 #include "runtime/vmThread.hpp"
65 #include "services/management.hpp"
66 #include "services/memoryService.hpp"
67 #include "utilities/debug.hpp"
68 #include "utilities/formatBuffer.hpp"
69 #include "utilities/macros.hpp"
70 #include "utilities/stack.inline.hpp"
159 if (total_reserved < _young_gen_spec->max_size()) {
160 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
161 "the maximum representable size");
162 }
163 assert(total_reserved % alignment == 0,
164 "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
165 SIZE_FORMAT, total_reserved, alignment);
166
167 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
168
169 os::trace_page_sizes("Heap",
170 collector_policy()->min_heap_byte_size(),
171 total_reserved,
172 alignment,
173 heap_rs->base(),
174 heap_rs->size());
175
176 return heap_rs->base();
177 }
178
179 namespace {
180 class GenIsScavengable : public BoolObjectClosure {
181 public:
182 bool do_object_b(oop obj) {
183 return GenCollectedHeap::heap()->is_in_young(obj);
184 }
185 };
186
187 GenIsScavengable _is_scavengable;
188 }
189
190 void GenCollectedHeap::post_initialize() {
191 CollectedHeap::post_initialize();
192 ref_processing_init();
193
194 DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
195
196 initialize_size_policy(def_new_gen->eden()->capacity(),
197 _old_gen->capacity(),
198 def_new_gen->from()->capacity());
199
200 MarkSweep::initialize();
201
202 ScavengableNMethods::initialize(&_is_scavengable);
203 }
204
205 void GenCollectedHeap::ref_processing_init() {
206 _young_gen->ref_processor_init();
207 _old_gen->ref_processor_init();
208 }
209
210 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
211 return _young_gen_spec;
212 }
213
214 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
215 return _old_gen_spec;
216 }
217
218 size_t GenCollectedHeap::capacity() const {
219 return _young_gen->capacity() + _old_gen->capacity();
220 }
221
222 size_t GenCollectedHeap::used() const {
682 MetaspaceUtils::print_metaspace_change(metadata_prev_used);
683
684 // Track memory usage and detect low memory after GC finishes
685 MemoryService::track_memory_usage();
686
687 gc_epilogue(complete);
688
689 if (must_restore_marks_for_biased_locking) {
690 BiasedLocking::restore_marks();
691 }
692 }
693
694 print_heap_after_gc();
695
696 #ifdef TRACESPINNING
697 ParallelTaskTerminator::print_termination_counts();
698 #endif
699 }
700
701 void GenCollectedHeap::register_nmethod(nmethod* nm) {
702 ScavengableNMethods::register_nmethod(nm);
703 }
704
705 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
706 ScavengableNMethods::unregister_nmethod(nm);
707 }
708
709 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
710 ScavengableNMethods::verify_nmethod(nm);
711 }
712
713 void GenCollectedHeap::flush_nmethod(nmethod* nm) {
714 ScavengableNMethods::flush_nmethod(nm);
715 }
716
717 void GenCollectedHeap::prune_nmethods() {
718 ScavengableNMethods::prune_nmethods();
719 }
720
721 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
722 GCCauseSetter x(this, GCCause::_allocation_failure);
723 HeapWord* result = NULL;
724
725 assert(size != 0, "Precondition violated");
726 if (GCLocker::is_active_and_needs_gc()) {
727 // GC locker is active; instead of a collection we will attempt
728 // to expand the heap, if there's room for expansion.
729 if (!is_maximal_no_gc()) {
730 result = expand_heap_and_allocate(size, is_tlab);
731 }
732 return result; // Could be null if we are out of space.
733 } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
734 // Do an incremental collection.
735 do_collection(false, // full
736 false, // clear_all_soft_refs
737 size, // size
738 is_tlab, // is_tlab
842 }
843 if (_process_strong_tasks->try_claim_task(GCH_PS_Management_oops_do)) {
844 Management::oops_do(strong_roots);
845 }
846 if (_process_strong_tasks->try_claim_task(GCH_PS_jvmti_oops_do)) {
847 JvmtiExport::oops_do(strong_roots);
848 }
849 if (UseAOT && _process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
850 AOTLoader::oops_do(strong_roots);
851 }
852
853 if (_process_strong_tasks->try_claim_task(GCH_PS_SystemDictionary_oops_do)) {
854 SystemDictionary::oops_do(strong_roots);
855 }
856
857 if (_process_strong_tasks->try_claim_task(GCH_PS_CodeCache_oops_do)) {
858 if (so & SO_ScavengeCodeCache) {
859 assert(code_roots != NULL, "must supply closure for code cache");
860
861 // We only visit parts of the CodeCache when scavenging.
862 ScavengableNMethods::scavengable_nmethods_do(code_roots);
863 }
864 if (so & SO_AllCodeCache) {
865 assert(code_roots != NULL, "must supply closure for code cache");
866
867 // CMSCollector uses this to do intermediate-strength collections.
868 // We scan the entire code cache, since CodeCache::do_unloading is not called.
869 CodeCache::blobs_do(code_roots);
870 }
871 // Verify that the code cache contents are not subject to
872 // movement by a scavenging collection.
873 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
874 DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
875 }
876 }
877
878 void GenCollectedHeap::young_process_roots(StrongRootsScope* scope,
879 OopsInGenClosure* root_closure,
880 OopsInGenClosure* old_gen_closure,
881 CLDClosure* cld_closure) {
882 MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);
883
884 process_roots(scope, SO_ScavengeCodeCache, root_closure,
885 cld_closure, cld_closure, &mark_code_closure);
886
887 if (_process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {
888 root_closure->reset_generation();
889 }
890
891 // When collection is parallel, all threads get to cooperate to do
892 // old generation scanning.
893 old_gen_closure->set_generation(_old_gen);
894 rem_set()->younger_refs_iterate(_old_gen, old_gen_closure, scope->n_threads());
|