1 /* 2 * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/cms/cmsCardTable.hpp" 27 #include "gc/cms/compactibleFreeListSpace.hpp" 28 #include "gc/cms/concurrentMarkSweepGeneration.hpp" 29 #include "gc/cms/concurrentMarkSweepThread.hpp" 30 #include "gc/cms/cmsHeap.hpp" 31 #include "gc/cms/parNewGeneration.hpp" 32 #include "gc/cms/vmCMSOperations.hpp" 33 #include "gc/shared/genCollectedHeap.hpp" 34 #include "gc/shared/genMemoryPools.hpp" 35 #include "gc/shared/genOopClosures.inline.hpp" 36 #include "gc/shared/strongRootsScope.hpp" 37 #include "gc/shared/workgroup.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/vmThread.hpp" 40 #include "services/memoryManager.hpp" 41 #include "utilities/stack.inline.hpp" 42 43 class CompactibleFreeListSpacePool : public CollectedMemoryPool { 44 private: 45 CompactibleFreeListSpace* _space; 46 public: 47 CompactibleFreeListSpacePool(CompactibleFreeListSpace* space, 48 const char* name, 49 size_t max_size, 50 bool support_usage_threshold) : 51 CollectedMemoryPool(name, space->capacity(), max_size, support_usage_threshold), 52 _space(space) { 53 } 54 55 MemoryUsage get_memory_usage() { 56 size_t max_heap_size = (available_for_allocation() ? max_size() : 0); 57 size_t used = used_in_bytes(); 58 size_t committed = _space->capacity(); 59 60 return MemoryUsage(initial_size(), used, committed, max_heap_size); 61 } 62 63 size_t used_in_bytes() { 64 return _space->used(); 65 } 66 }; 67 68 CMSHeap::CMSHeap(GenCollectorPolicy *policy) : 69 GenCollectedHeap(policy, 70 Generation::ParNew, 71 Generation::ConcurrentMarkSweep, 72 "ParNew::CMS"), 73 _eden_pool(NULL), 74 _survivor_pool(NULL), 75 _old_pool(NULL) { 76 _workers = new WorkGang("GC Thread", ParallelGCThreads, 77 /* are_GC_task_threads */true, 78 /* are_ConcurrentGC_threads */false); 79 _workers->initialize_workers(); 80 } 81 82 jint CMSHeap::initialize() { 83 jint status = GenCollectedHeap::initialize(); 84 if (status != JNI_OK) return status; 85 86 // If we are running CMS, create the collector responsible 87 // for collecting the CMS generations. 88 if (!create_cms_collector()) { 89 return JNI_ENOMEM; 90 } 91 92 return JNI_OK; 93 } 94 95 CardTableRS* CMSHeap::create_rem_set(const MemRegion& reserved_region) { 96 return new CMSCardTable(reserved_region); 97 } 98 99 void CMSHeap::initialize_serviceability() { 100 _young_manager = new GCMemoryManager("ParNew", "end of minor GC"); 101 _old_manager = new GCMemoryManager("ConcurrentMarkSweep", "end of major GC"); 102 103 ParNewGeneration* young = young_gen(); 104 _eden_pool = new ContiguousSpacePool(young->eden(), 105 "Par Eden Space", 106 young->max_eden_size(), 107 false); 108 109 _survivor_pool = new SurvivorContiguousSpacePool(young, 110 "Par Survivor Space", 111 young->max_survivor_size(), 112 false); 113 114 ConcurrentMarkSweepGeneration* old = (ConcurrentMarkSweepGeneration*) old_gen(); 115 _old_pool = new CompactibleFreeListSpacePool(old->cmsSpace(), 116 "CMS Old Gen", 117 old->reserved().byte_size(), 118 true); 119 120 _young_manager->add_pool(_eden_pool); 121 _young_manager->add_pool(_survivor_pool); 122 young->set_gc_manager(_young_manager); 123 124 _old_manager->add_pool(_eden_pool); 125 _old_manager->add_pool(_survivor_pool); 126 _old_manager->add_pool(_old_pool); 127 old ->set_gc_manager(_old_manager); 128 129 } 130 131 CMSHeap* CMSHeap::heap() { 132 CollectedHeap* heap = Universe::heap(); 133 assert(heap != NULL, "Uninitialized access to CMSHeap::heap()"); 134 assert(heap->kind() == CollectedHeap::CMS, "Invalid name"); 135 return static_cast<CMSHeap*>(heap); 136 } 137 138 void CMSHeap::gc_threads_do(ThreadClosure* tc) const { 139 assert(workers() != NULL, "should have workers here"); 140 workers()->threads_do(tc); 141 ConcurrentMarkSweepThread::threads_do(tc); 142 } 143 144 void CMSHeap::print_gc_threads_on(outputStream* st) const { 145 assert(workers() != NULL, "should have workers here"); 146 workers()->print_worker_threads_on(st); 147 ConcurrentMarkSweepThread::print_all_on(st); 148 } 149 150 void CMSHeap::print_on_error(outputStream* st) const { 151 GenCollectedHeap::print_on_error(st); 152 st->cr(); 153 CMSCollector::print_on_error(st); 154 } 155 156 bool CMSHeap::create_cms_collector() { 157 assert(old_gen()->kind() == Generation::ConcurrentMarkSweep, 158 "Unexpected generation kinds"); 159 CMSCollector* collector = 160 new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(), 161 rem_set(), 162 (ConcurrentMarkSweepPolicy*) gen_policy()); 163 164 if (collector == NULL || !collector->completed_initialization()) { 165 if (collector) { 166 delete collector; // Be nice in embedded situation 167 } 168 vm_shutdown_during_initialization("Could not create CMS collector"); 169 return false; 170 } 171 return true; // success 172 } 173 174 void CMSHeap::collect(GCCause::Cause cause) { 175 if (should_do_concurrent_full_gc(cause)) { 176 // Mostly concurrent full collection. 177 collect_mostly_concurrent(cause); 178 } else { 179 GenCollectedHeap::collect(cause); 180 } 181 } 182 183 bool CMSHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { 184 switch (cause) { 185 case GCCause::_gc_locker: return GCLockerInvokesConcurrent; 186 case GCCause::_java_lang_system_gc: 187 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent; 188 default: return false; 189 } 190 } 191 192 void CMSHeap::collect_mostly_concurrent(GCCause::Cause cause) { 193 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock"); 194 195 MutexLocker ml(Heap_lock); 196 // Read the GC counts while holding the Heap_lock 197 unsigned int full_gc_count_before = total_full_collections(); 198 unsigned int gc_count_before = total_collections(); 199 { 200 MutexUnlocker mu(Heap_lock); 201 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause); 202 VMThread::execute(&op); 203 } 204 } 205 206 void CMSHeap::stop() { 207 ConcurrentMarkSweepThread::cmst()->stop(); 208 } 209 210 void CMSHeap::safepoint_synchronize_begin() { 211 ConcurrentMarkSweepThread::synchronize(false); 212 } 213 214 void CMSHeap::safepoint_synchronize_end() { 215 ConcurrentMarkSweepThread::desynchronize(false); 216 } 217 218 void CMSHeap::cms_process_roots(StrongRootsScope* scope, 219 bool young_gen_as_roots, 220 ScanningOption so, 221 bool only_strong_roots, 222 OopsInGenClosure* root_closure, 223 CLDClosure* cld_closure, 224 OopStorage::ParState<false, false>* par_state_string) { 225 MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations); 226 CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure; 227 228 process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure); 229 if (!only_strong_roots) { 230 process_string_table_roots(scope, root_closure, par_state_string); 231 } 232 233 if (young_gen_as_roots && 234 !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { 235 root_closure->set_generation(young_gen()); 236 young_gen()->oop_iterate(root_closure); 237 root_closure->reset_generation(); 238 } 239 240 _process_strong_tasks->all_tasks_completed(scope->n_threads()); 241 } 242 243 void CMSHeap::gc_prologue(bool full) { 244 always_do_update_barrier = false; 245 GenCollectedHeap::gc_prologue(full); 246 }; 247 248 void CMSHeap::gc_epilogue(bool full) { 249 GenCollectedHeap::gc_epilogue(full); 250 always_do_update_barrier = true; 251 }; 252 253 GrowableArray<GCMemoryManager*> CMSHeap::memory_managers() { 254 GrowableArray<GCMemoryManager*> memory_managers(2); 255 memory_managers.append(_young_manager); 256 memory_managers.append(_old_manager); 257 return memory_managers; 258 } 259 260 GrowableArray<MemoryPool*> CMSHeap::memory_pools() { 261 GrowableArray<MemoryPool*> memory_pools(3); 262 memory_pools.append(_eden_pool); 263 memory_pools.append(_survivor_pool); 264 memory_pools.append(_old_pool); 265 return memory_pools; 266 }