1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/compactibleFreeListSpace.hpp"
  27 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
  28 #include "gc/cms/concurrentMarkSweepThread.hpp"
  29 #include "gc/cms/cmsHeap.hpp"
  30 #include "gc/cms/parNewGeneration.hpp"
  31 #include "gc/cms/vmCMSOperations.hpp"
  32 #include "gc/shared/genMemoryPools.hpp"
  33 #include "gc/shared/genOopClosures.inline.hpp"
  34 #include "gc/shared/strongRootsScope.hpp"
  35 #include "gc/shared/workgroup.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "runtime/vmThread.hpp"
  38 #include "services/memoryManager.hpp"
  39 #include "utilities/stack.inline.hpp"
  40 
  41 class CompactibleFreeListSpacePool : public CollectedMemoryPool {
  42 private:
  43   CompactibleFreeListSpace* _space;
  44 public:
  45   CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
  46                                const char* name,
  47                                size_t max_size,
  48                                bool support_usage_threshold) :
  49     CollectedMemoryPool(name, space->capacity(), max_size, support_usage_threshold),
  50     _space(space) {
  51   }
  52 
  53   MemoryUsage get_memory_usage() {
  54     size_t maxSize   = (available_for_allocation() ? max_size() : 0);
  55     size_t used      = used_in_bytes();
  56     size_t committed = _space->capacity();
  57 
  58     return MemoryUsage(initial_size(), used, committed, maxSize);
  59   }
  60 
  61   size_t used_in_bytes() {
  62     return _space->used();
  63   }
  64 };
  65 
  66 CMSHeap::CMSHeap(GenCollectorPolicy *policy) :
  67   GenCollectedHeap(policy), _eden_pool(NULL), _survivor_pool(NULL), _old_pool(NULL) {
  68   _young_mgr = new GCMemoryManager("ParNew", "end of minor GC");
  69   _old_mgr = new GCMemoryManager("ConcurrentMarkSweep", "end of major GC");
  70 
  71   _workers = new WorkGang("GC Thread", ParallelGCThreads,
  72                           /* are_GC_task_threads */true,
  73                           /* are_ConcurrentGC_threads */false);
  74   _workers->initialize_workers();
  75 }
  76 
  77 jint CMSHeap::initialize() {
  78   jint status = GenCollectedHeap::initialize();
  79   if (status != JNI_OK) return status;
  80 
  81   // If we are running CMS, create the collector responsible
  82   // for collecting the CMS generations.
  83   assert(collector_policy()->is_concurrent_mark_sweep_policy(), "must be CMS policy");
  84   if (!create_cms_collector()) {
  85     return JNI_ENOMEM;
  86   }
  87 
  88   ParNewGeneration* young = (ParNewGeneration*) young_gen();
  89   _eden_pool = new ContiguousSpacePool(young->eden(),
  90                                        "Par Eden Space",
  91                                        young->max_eden_size(),
  92                                        false);
  93 
  94   _survivor_pool = new SurvivorContiguousSpacePool(young,
  95                                                    "Par Survivor Space",
  96                                                    young->max_survivor_size(),
  97                                                    false);
  98 
  99   ConcurrentMarkSweepGeneration* old = (ConcurrentMarkSweepGeneration*) old_gen();
 100   _old_pool = new CompactibleFreeListSpacePool(old->cmsSpace(),
 101                                                "CMS Old Gen",
 102                                                old->reserved().byte_size(),
 103                                                true);
 104 
 105   _young_mgr->add_pool(_eden_pool);
 106   _young_mgr->add_pool(_survivor_pool);
 107 
 108   _old_mgr->add_pool(_eden_pool);
 109   _old_mgr->add_pool(_survivor_pool);
 110   _old_mgr->add_pool(_old_pool);
 111 
 112   return JNI_OK;
 113 }
 114 
 115 void CMSHeap::check_gen_kinds() {
 116   assert(young_gen()->kind() == Generation::ParNew,
 117          "Wrong youngest generation type");
 118   assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
 119          "Wrong generation kind");
 120 }
 121 
 122 CMSHeap* CMSHeap::heap() {
 123   CollectedHeap* heap = Universe::heap();
 124   assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
 125   assert(heap->kind() == CollectedHeap::CMSHeap, "Not a CMSHeap");
 126   return (CMSHeap*) heap;
 127 }
 128 
 129 void CMSHeap::gc_threads_do(ThreadClosure* tc) const {
 130   assert(workers() != NULL, "should have workers here");
 131   workers()->threads_do(tc);
 132   ConcurrentMarkSweepThread::threads_do(tc);
 133 }
 134 
 135 void CMSHeap::print_gc_threads_on(outputStream* st) const {
 136   assert(workers() != NULL, "should have workers here");
 137   workers()->print_worker_threads_on(st);
 138   ConcurrentMarkSweepThread::print_all_on(st);
 139 }
 140 
 141 void CMSHeap::print_on_error(outputStream* st) const {
 142   GenCollectedHeap::print_on_error(st);
 143   st->cr();
 144   CMSCollector::print_on_error(st);
 145 }
 146 
 147 bool CMSHeap::create_cms_collector() {
 148   assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
 149          "Unexpected generation kinds");
 150   assert(gen_policy()->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
 151   CMSCollector* collector =
 152     new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(),
 153                      rem_set(),
 154                      gen_policy()->as_concurrent_mark_sweep_policy());
 155 
 156   if (collector == NULL || !collector->completed_initialization()) {
 157     if (collector) {
 158       delete collector; // Be nice in embedded situation
 159     }
 160     vm_shutdown_during_initialization("Could not create CMS collector");
 161     return false;
 162   }
 163   return true; // success
 164 }
 165 
 166 void CMSHeap::collect(GCCause::Cause cause) {
 167   if (should_do_concurrent_full_gc(cause)) {
 168     // Mostly concurrent full collection.
 169     collect_mostly_concurrent(cause);
 170   } else {
 171     GenCollectedHeap::collect(cause);
 172   }
 173 }
 174 
 175 bool CMSHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 176   switch (cause) {
 177     case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
 178     case GCCause::_java_lang_system_gc:
 179     case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
 180     default:                            return false;
 181   }
 182 }
 183 
 184 void CMSHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 185   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 186 
 187   MutexLocker ml(Heap_lock);
 188   // Read the GC counts while holding the Heap_lock
 189   unsigned int full_gc_count_before = total_full_collections();
 190   unsigned int gc_count_before      = total_collections();
 191   {
 192     MutexUnlocker mu(Heap_lock);
 193     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
 194     VMThread::execute(&op);
 195   }
 196 }
 197 
 198 void CMSHeap::stop() {
 199   ConcurrentMarkSweepThread::cmst()->stop();
 200 }
 201 
 202 void CMSHeap::safepoint_synchronize_begin() {
 203   ConcurrentMarkSweepThread::synchronize(false);
 204 }
 205 
 206 void CMSHeap::safepoint_synchronize_end() {
 207   ConcurrentMarkSweepThread::desynchronize(false);
 208 }
 209 
 210 void CMSHeap::cms_process_roots(StrongRootsScope* scope,
 211                                 bool young_gen_as_roots,
 212                                 ScanningOption so,
 213                                 bool only_strong_roots,
 214                                 OopsInGenClosure* root_closure,
 215                                 CLDClosure* cld_closure) {
 216   MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
 217   OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
 218   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 219 
 220   process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
 221   if (!only_strong_roots) {
 222     process_string_table_roots(scope, root_closure);
 223   }
 224 
 225   if (young_gen_as_roots &&
 226       !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 227     root_closure->set_generation(young_gen());
 228     young_gen()->oop_iterate(root_closure);
 229     root_closure->reset_generation();
 230   }
 231 
 232   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 233 }
 234 
 235 void CMSHeap::gc_prologue(bool full) {
 236   always_do_update_barrier = false;
 237   GenCollectedHeap::gc_prologue(full);
 238 };
 239 
 240 void CMSHeap::gc_epilogue(bool full) {
 241   GenCollectedHeap::gc_epilogue(full);
 242   always_do_update_barrier = true;
 243 };
 244 
 245 GrowableArray<GCMemoryManager*> CMSHeap::memory_managers() {
 246   GrowableArray<GCMemoryManager*> memory_managers(2);
 247   memory_managers.append(_young_mgr);
 248   memory_managers.append(_old_mgr);
 249   return memory_managers;
 250 }
 251 
 252 GrowableArray<MemoryPool*> CMSHeap::memory_pools() {
 253   GrowableArray<MemoryPool*> memory_pools(3);
 254   memory_pools.append(_eden_pool);
 255   memory_pools.append(_survivor_pool);
 256   memory_pools.append(_old_pool);
 257   return memory_pools;
 258 }