1 /* 2 * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/stringTable.hpp" 27 #include "gc/shared/oopStorage.inline.hpp" 28 #include "gc/shared/oopStorageParState.inline.hpp" 29 #include "gc/shared/oopStorageSet.hpp" 30 #include "gc/shared/weakProcessor.inline.hpp" 31 #include "gc/shared/weakProcessorPhases.hpp" 32 #include "gc/shared/weakProcessorPhaseTimes.hpp" 33 #include "memory/allocation.inline.hpp" 34 #include "memory/iterator.hpp" 35 #include "prims/resolvedMethodTable.hpp" 36 #include "runtime/globals.hpp" 37 #include "utilities/macros.hpp" 38 39 void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive) { 40 WeakProcessorPhases::Iterator pit = WeakProcessorPhases::serial_iterator(); 41 for ( ; !pit.is_end(); ++pit) { 42 WeakProcessorPhases::processor(*pit)(is_alive, keep_alive); 43 } 44 45 OopStorageSet::Iterator it = OopStorageSet::weak_iterator(); 46 for ( ; !it.is_end(); ++it) { 47 if (it->can_notify()) { 48 CountingSkippedIsAliveClosure<BoolObjectClosure, OopClosure> cl(is_alive, keep_alive); 49 it->oops_do(&cl); 50 it->notify(cl.num_dead()); 51 } else { 52 it->weak_oops_do(is_alive, keep_alive); 53 } 54 } 55 } 56 57 void WeakProcessor::oops_do(OopClosure* closure) { 58 AlwaysTrueClosure always_true; 59 weak_oops_do(&always_true, closure); 60 } 61 62 uint WeakProcessor::ergo_workers(uint max_workers) { 63 // Ignore ParallelRefProcEnabled; that's for j.l.r.Reference processing. 64 if (ReferencesPerThread == 0) { 65 // Configuration says always use all the threads. 66 return max_workers; 67 } 68 69 // One thread per ReferencesPerThread references (or fraction thereof) 70 // in the various OopStorage objects, bounded by max_threads. 71 // 72 // Serial phases are ignored in this calculation, because of the 73 // cost of running unnecessary threads. These phases are normally 74 // small or empty (assuming they are configured to exist at all), 75 // and development oriented, so not allocating any threads 76 // specifically for them is okay. 77 size_t ref_count = 0; 78 OopStorageSet::Iterator it = OopStorageSet::weak_iterator(); 79 for ( ; !it.is_end(); ++it) { 80 ref_count += it->allocation_count(); 81 } 82 83 // +1 to (approx) round up the ref per thread division. 84 size_t nworkers = 1 + (ref_count / ReferencesPerThread); 85 nworkers = MIN2(nworkers, static_cast<size_t>(max_workers)); 86 return static_cast<uint>(nworkers); 87 } 88 89 void WeakProcessor::Task::initialize() { 90 assert(_nworkers != 0, "must be"); 91 assert(_phase_times == NULL || _nworkers <= _phase_times->max_threads(), 92 "nworkers (%u) exceeds max threads (%u)", 93 _nworkers, _phase_times->max_threads()); 94 95 if (_phase_times) { 96 _phase_times->set_active_workers(_nworkers); 97 } 98 99 uint storage_count = WeakProcessorPhases::oopstorage_phase_count; 100 _storage_states = NEW_C_HEAP_ARRAY(StorageState, storage_count, mtGC); 101 102 StorageState* cur_state = _storage_states; 103 OopStorageSet::Iterator it = OopStorageSet::weak_iterator(); 104 for ( ; !it.is_end(); ++it, ++cur_state) { 105 assert(pointer_delta(cur_state, _storage_states, sizeof(StorageState)) < storage_count, "invariant"); 106 new (cur_state) StorageState(*it, _nworkers); 107 } 108 assert(pointer_delta(cur_state, _storage_states, sizeof(StorageState)) == storage_count, "invariant"); 109 } 110 111 WeakProcessor::Task::Task(uint nworkers) : 112 _phase_times(NULL), 113 _nworkers(nworkers), 114 _serial_phases_done(WeakProcessorPhases::serial_phase_count), 115 _storage_states(NULL) 116 { 117 initialize(); 118 } 119 120 WeakProcessor::Task::Task(WeakProcessorPhaseTimes* phase_times, uint nworkers) : 121 _phase_times(phase_times), 122 _nworkers(nworkers), 123 _serial_phases_done(WeakProcessorPhases::serial_phase_count), 124 _storage_states(NULL) 125 { 126 initialize(); 127 } 128 129 WeakProcessor::Task::~Task() { 130 if (_storage_states != NULL) { 131 StorageState* states = _storage_states; 132 for (uint i = 0; i < WeakProcessorPhases::oopstorage_phase_count; ++i) { 133 states->storage()->notify(states->num_dead()); 134 states->StorageState::~StorageState(); 135 ++states; 136 } 137 FREE_C_HEAP_ARRAY(StorageState, _storage_states); 138 } 139 } 140 141 void WeakProcessor::GangTask::work(uint worker_id) { 142 _erased_do_work(this, worker_id); 143 }