10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #if !defined(__clang_major__) && defined(__GNUC__)
26 #define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess.
27 #endif
28
29 #include "precompiled.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/icBuffer.hpp"
32 #include "gc_implementation/g1/bufferingOopClosure.hpp"
33 #include "gc_implementation/g1/concurrentG1Refine.hpp"
34 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
35 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
36 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
37 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
38 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
39 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
40 #include "gc_implementation/g1/g1EvacFailure.hpp"
41 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
42 #include "gc_implementation/g1/g1Log.hpp"
43 #include "gc_implementation/g1/g1MarkSweep.hpp"
44 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
45 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
46 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
47 #include "gc_implementation/g1/g1RemSet.inline.hpp"
48 #include "gc_implementation/g1/g1StringDedup.hpp"
49 #include "gc_implementation/g1/g1YCTypes.hpp"
5116 // The first nmethods is claimed by the first worker.
5117 if (worker_id == 0 && _first_nmethod != NULL) {
5118 clean_nmethod(_first_nmethod);
5119 _first_nmethod = NULL;
5120 }
5121
5122 int num_claimed_nmethods;
5123 nmethod* claimed_nmethods[MaxClaimNmethods];
5124
5125 while (true) {
5126 claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
5127
5128 if (num_claimed_nmethods == 0) {
5129 break;
5130 }
5131
5132 for (int i = 0; i < num_claimed_nmethods; i++) {
5133 clean_nmethod(claimed_nmethods[i]);
5134 }
5135 }
5136 }
5137
5138 void work_second_pass(uint worker_id) {
5139 nmethod* nm;
5140 // Take care of postponed nmethods.
5141 while ((nm = claim_postponed_nmethod()) != NULL) {
5142 clean_nmethod_postponed(nm);
5143 }
5144 }
5145 };
5146
5147 Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
5148
5149 class G1KlassCleaningTask : public StackObj {
5150 BoolObjectClosure* _is_alive;
5151 volatile jint _clean_klass_tree_claimed;
5152 ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
5153
5154 public:
5155 G1KlassCleaningTask(BoolObjectClosure* is_alive) :
5168 }
5169
5170 InstanceKlass* claim_next_klass() {
5171 Klass* klass;
5172 do {
5173 klass =_klass_iterator.next_klass();
5174 } while (klass != NULL && !klass->oop_is_instance());
5175
5176 return (InstanceKlass*)klass;
5177 }
5178
5179 public:
5180
5181 void clean_klass(InstanceKlass* ik) {
5182 ik->clean_implementors_list(_is_alive);
5183 ik->clean_method_data(_is_alive);
5184
5185 // G1 specific cleanup work that has
5186 // been moved here to be done in parallel.
5187 ik->clean_dependent_nmethods();
5188 }
5189
5190 void work() {
5191 ResourceMark rm;
5192
5193 // One worker will clean the subklass/sibling klass tree.
5194 if (claim_clean_klass_tree_task()) {
5195 Klass::clean_subklass_tree(_is_alive);
5196 }
5197
5198 // All workers will help cleaning the classes,
5199 InstanceKlass* klass;
5200 while ((klass = claim_next_klass()) != NULL) {
5201 clean_klass(klass);
5202 }
5203 }
5204 };
5205
5206 // To minimize the remark pause times, the tasks below are done in parallel.
5207 class G1ParallelCleaningTask : public AbstractGangTask {
5208 private:
5209 G1StringSymbolTableUnlinkTask _string_symbol_task;
5210 G1CodeCacheUnloadingTask _code_cache_task;
5211 G1KlassCleaningTask _klass_cleaning_task;
5212
5213 public:
5214 // The constructor is run in the VMThread.
5215 G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
5216 AbstractGangTask("Parallel Cleaning"),
5217 _string_symbol_task(is_alive, process_strings, process_symbols),
5218 _code_cache_task(num_workers, is_alive, unloading_occurred),
5219 _klass_cleaning_task(is_alive) {
5220 }
5221
5222 // The parallel work done by all worker threads.
5223 void work(uint worker_id) {
5224 // Do first pass of code cache cleaning.
5225 _code_cache_task.work_first_pass(worker_id);
5226
5227 // Let the threads mark that the first pass is done.
5228 _code_cache_task.barrier_mark(worker_id);
5229
5230 // Clean the Strings and Symbols.
5231 _string_symbol_task.work(worker_id);
5232
5233 // Wait for all workers to finish the first code cache cleaning pass.
5234 _code_cache_task.barrier_wait(worker_id);
5235
5236 // Do the second code cache cleaning work, which realize on
5237 // the liveness information gathered during the first pass.
5238 _code_cache_task.work_second_pass(worker_id);
5239
5240 // Clean all klasses that were not unloaded.
5241 _klass_cleaning_task.work();
5242 }
5243 };
5244
5245
5246 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
5247 bool process_strings,
5248 bool process_symbols,
5249 bool class_unloading_occurred) {
5250 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5251 workers()->active_workers() : 1);
5252
5253 G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
5254 n_workers, class_unloading_occurred);
5255 if (G1CollectedHeap::use_parallel_gc_threads()) {
5256 set_par_threads(n_workers);
5257 workers()->run_task(&g1_unlink_task);
5258 set_par_threads(0);
5259 } else {
5260 g1_unlink_task.work(0);
5261 }
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #if !defined(__clang_major__) && defined(__GNUC__)
26 #define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess.
27 #endif
28
29 #include "precompiled.hpp"
30 #include "classfile/metadataOnStackMark.hpp"
31 #include "code/codeCache.hpp"
32 #include "code/icBuffer.hpp"
33 #include "gc_implementation/g1/bufferingOopClosure.hpp"
34 #include "gc_implementation/g1/concurrentG1Refine.hpp"
35 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
36 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
37 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
38 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
39 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
40 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
41 #include "gc_implementation/g1/g1EvacFailure.hpp"
42 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
43 #include "gc_implementation/g1/g1Log.hpp"
44 #include "gc_implementation/g1/g1MarkSweep.hpp"
45 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
46 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
47 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
48 #include "gc_implementation/g1/g1RemSet.inline.hpp"
49 #include "gc_implementation/g1/g1StringDedup.hpp"
50 #include "gc_implementation/g1/g1YCTypes.hpp"
5117 // The first nmethods is claimed by the first worker.
5118 if (worker_id == 0 && _first_nmethod != NULL) {
5119 clean_nmethod(_first_nmethod);
5120 _first_nmethod = NULL;
5121 }
5122
5123 int num_claimed_nmethods;
5124 nmethod* claimed_nmethods[MaxClaimNmethods];
5125
5126 while (true) {
5127 claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
5128
5129 if (num_claimed_nmethods == 0) {
5130 break;
5131 }
5132
5133 for (int i = 0; i < num_claimed_nmethods; i++) {
5134 clean_nmethod(claimed_nmethods[i]);
5135 }
5136 }
5137
5138 // The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.
5139 // Need to retire the buffers now that this thread has stopped cleaning nmethods.
5140 MetadataOnStackMark::retire_buffer_for_thread(Thread::current());
5141 }
5142
5143 void work_second_pass(uint worker_id) {
5144 nmethod* nm;
5145 // Take care of postponed nmethods.
5146 while ((nm = claim_postponed_nmethod()) != NULL) {
5147 clean_nmethod_postponed(nm);
5148 }
5149 }
5150 };
5151
5152 Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
5153
5154 class G1KlassCleaningTask : public StackObj {
5155 BoolObjectClosure* _is_alive;
5156 volatile jint _clean_klass_tree_claimed;
5157 ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
5158
5159 public:
5160 G1KlassCleaningTask(BoolObjectClosure* is_alive) :
5173 }
5174
5175 InstanceKlass* claim_next_klass() {
5176 Klass* klass;
5177 do {
5178 klass =_klass_iterator.next_klass();
5179 } while (klass != NULL && !klass->oop_is_instance());
5180
5181 return (InstanceKlass*)klass;
5182 }
5183
5184 public:
5185
5186 void clean_klass(InstanceKlass* ik) {
5187 ik->clean_implementors_list(_is_alive);
5188 ik->clean_method_data(_is_alive);
5189
5190 // G1 specific cleanup work that has
5191 // been moved here to be done in parallel.
5192 ik->clean_dependent_nmethods();
5193 if (JvmtiExport::has_redefined_a_class()) {
5194 InstanceKlass::purge_previous_versions(ik);
5195 }
5196 }
5197
5198 void work() {
5199 ResourceMark rm;
5200
5201 // One worker will clean the subklass/sibling klass tree.
5202 if (claim_clean_klass_tree_task()) {
5203 Klass::clean_subklass_tree(_is_alive);
5204 }
5205
5206 // All workers will help cleaning the classes,
5207 InstanceKlass* klass;
5208 while ((klass = claim_next_klass()) != NULL) {
5209 clean_klass(klass);
5210 }
5211 }
5212 };
5213
5214 // To minimize the remark pause times, the tasks below are done in parallel.
5215 class G1ParallelCleaningTask : public AbstractGangTask {
5216 private:
5217 G1StringSymbolTableUnlinkTask _string_symbol_task;
5218 G1CodeCacheUnloadingTask _code_cache_task;
5219 G1KlassCleaningTask _klass_cleaning_task;
5220
5221 public:
5222 // The constructor is run in the VMThread.
5223 G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
5224 AbstractGangTask("Parallel Cleaning"),
5225 _string_symbol_task(is_alive, process_strings, process_symbols),
5226 _code_cache_task(num_workers, is_alive, unloading_occurred),
5227 _klass_cleaning_task(is_alive) {
5228 }
5229
5230 void pre_work_verification() {
5231 assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
5232 }
5233
5234 void post_work_verification() {
5235 assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
5236 }
5237
5238 // The parallel work done by all worker threads.
5239 void work(uint worker_id) {
5240 pre_work_verification();
5241
5242 // Do first pass of code cache cleaning.
5243 _code_cache_task.work_first_pass(worker_id);
5244
5245 // Let the threads mark that the first pass is done.
5246 _code_cache_task.barrier_mark(worker_id);
5247
5248 // Clean the Strings and Symbols.
5249 _string_symbol_task.work(worker_id);
5250
5251 // Wait for all workers to finish the first code cache cleaning pass.
5252 _code_cache_task.barrier_wait(worker_id);
5253
5254 // Do the second code cache cleaning work, which realize on
5255 // the liveness information gathered during the first pass.
5256 _code_cache_task.work_second_pass(worker_id);
5257
5258 // Clean all klasses that were not unloaded.
5259 _klass_cleaning_task.work();
5260
5261 post_work_verification();
5262 }
5263 };
5264
5265
5266 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
5267 bool process_strings,
5268 bool process_symbols,
5269 bool class_unloading_occurred) {
5270 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5271 workers()->active_workers() : 1);
5272
5273 G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
5274 n_workers, class_unloading_occurred);
5275 if (G1CollectedHeap::use_parallel_gc_threads()) {
5276 set_par_threads(n_workers);
5277 workers()->run_task(&g1_unlink_task);
5278 set_par_threads(0);
5279 } else {
5280 g1_unlink_task.work(0);
5281 }
|