19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "code/codeCache.hpp"
28 #include "gc/parallel/cardTableExtension.hpp"
29 #include "gc/parallel/gcTaskManager.hpp"
30 #include "gc/parallel/parallelScavengeHeap.hpp"
31 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
32 #include "gc/parallel/psMarkSweep.hpp"
33 #include "gc/parallel/psParallelCompact.hpp"
34 #include "gc/parallel/psScavenge.inline.hpp"
35 #include "gc/parallel/psTasks.hpp"
36 #include "gc/shared/collectorPolicy.hpp"
37 #include "gc/shared/gcCause.hpp"
38 #include "gc/shared/gcHeapSummary.hpp"
39 #include "gc/shared/gcLocker.inline.hpp"
40 #include "gc/shared/gcTimer.hpp"
41 #include "gc/shared/gcTrace.hpp"
42 #include "gc/shared/gcTraceTime.hpp"
43 #include "gc/shared/isGCActiveMark.hpp"
44 #include "gc/shared/referencePolicy.hpp"
45 #include "gc/shared/referenceProcessor.hpp"
46 #include "gc/shared/spaceDecorator.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "oops/oop.inline.hpp"
49 #include "runtime/biasedLocking.hpp"
50 #include "runtime/fprofiler.hpp"
51 #include "runtime/handles.inline.hpp"
52 #include "runtime/threadCritical.hpp"
53 #include "runtime/vmThread.hpp"
54 #include "runtime/vm_operations.hpp"
55 #include "services/memoryService.hpp"
56 #include "utilities/stack.inline.hpp"
57
58 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
261 _gc_timer.register_gc_start();
262
263 TimeStamp scavenge_entry;
264 TimeStamp scavenge_midpoint;
265 TimeStamp scavenge_exit;
266
267 scavenge_entry.update();
268
269 if (GC_locker::check_active_before_gc()) {
270 return false;
271 }
272
273 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
274 GCCause::Cause gc_cause = heap->gc_cause();
275
276 // Check for potential problems.
277 if (!should_attempt_scavenge()) {
278 return false;
279 }
280
281 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
282
283 bool promotion_failure_occurred = false;
284
285 PSYoungGen* young_gen = heap->young_gen();
286 PSOldGen* old_gen = heap->old_gen();
287 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
288
289 heap->increment_total_collections();
290
291 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
292
293 if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) {
294 // Gather the feedback data for eden occupancy.
295 young_gen->eden_space()->accumulate_statistics();
296 }
297
298 if (ZapUnusedHeapArea) {
299 // Save information needed to minimize mangling
300 heap->record_gen_tops_before_GC();
305
306 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
307 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
308
309 size_t prev_used = heap->used();
310
311 // Fill in TLABs
312 heap->accumulate_statistics_all_tlabs();
313 heap->ensure_parsability(true); // retire TLABs
314
315 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
316 HandleMark hm; // Discard invalid handles created during verification
317 Universe::verify(" VerifyBeforeGC:");
318 }
319
320 {
321 ResourceMark rm;
322 HandleMark hm;
323
324 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
325 GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
326 TraceCollectorStats tcs(counters());
327 TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
328
329 if (TraceYoungGenTime) accumulated_time()->start();
330
331 // Let the size policy know we're starting
332 size_policy->minor_collection_begin();
333
334 // Verify the object start arrays.
335 if (VerifyObjectStartArray &&
336 VerifyBeforeGC) {
337 old_gen->verify_object_start_array();
338 }
339
340 // Verify no unmarked old->young roots
341 if (VerifyRememberedSets) {
342 CardTableExtension::verify_all_young_refs_imprecise();
343 }
344
345 if (!ScavengeWithObjectsInToSpace) {
370 // creating the promotion_manager. We pass the top
371 // values to the card_table, to prevent it from
372 // straying into the promotion labs.
373 HeapWord* old_top = old_gen->object_space()->top();
374
375 // Release all previously held resources
376 gc_task_manager()->release_all_resources();
377
378 // Set the number of GC threads to be used in this collection
379 gc_task_manager()->set_active_gang();
380 gc_task_manager()->task_idle_workers();
381 // Get the active number of workers here and use that value
382 // throughout the methods.
383 uint active_workers = gc_task_manager()->active_workers();
384
385 PSPromotionManager::pre_scavenge();
386
387 // We'll use the promotion manager again later.
388 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
389 {
390 GCTraceTime tm("Scavenge", false, false, &_gc_timer, _gc_tracer.gc_id());
391 ParallelScavengeHeap::ParStrongRootsScope psrs;
392
393 GCTaskQueue* q = GCTaskQueue::create();
394
395 if (!old_gen->object_space()->is_empty()) {
396 // There are only old-to-young pointers if there are objects
397 // in the old gen.
398 uint stripe_total = active_workers;
399 for(uint i=0; i < stripe_total; i++) {
400 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
401 }
402 }
403
404 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
405 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
406 // We scan the thread roots in parallel
407 Threads::create_thread_roots_tasks(q);
408 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
409 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
410 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
412 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
413 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
414 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
415
416 ParallelTaskTerminator terminator(
417 active_workers,
418 (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
419 if (active_workers > 1) {
420 for (uint j = 0; j < active_workers; j++) {
421 q->enqueue(new StealTask(&terminator));
422 }
423 }
424
425 gc_task_manager()->execute_and_wait(q);
426 }
427
428 scavenge_midpoint.update();
429
430 // Process reference objects discovered during scavenge
431 {
432 GCTraceTime tm("References", false, false, &_gc_timer, _gc_tracer.gc_id());
433
434 reference_processor()->setup_policy(false); // not always_clear
435 reference_processor()->set_active_mt_degree(active_workers);
436 PSKeepAliveClosure keep_alive(promotion_manager);
437 PSEvacuateFollowersClosure evac_followers(promotion_manager);
438 ReferenceProcessorStats stats;
439 if (reference_processor()->processing_is_mt()) {
440 PSRefProcTaskExecutor task_executor;
441 stats = reference_processor()->process_discovered_references(
442 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
443 &_gc_timer, _gc_tracer.gc_id());
444 } else {
445 stats = reference_processor()->process_discovered_references(
446 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer, _gc_tracer.gc_id());
447 }
448
449 _gc_tracer.report_gc_reference_stats(stats);
450
451 // Enqueue reference objects discovered during scavenge.
452 if (reference_processor()->processing_is_mt()) {
453 PSRefProcTaskExecutor task_executor;
454 reference_processor()->enqueue_discovered_references(&task_executor);
455 } else {
456 reference_processor()->enqueue_discovered_references(NULL);
457 }
458 }
459
460 {
461 GCTraceTime tm("StringTable", false, false, &_gc_timer, _gc_tracer.gc_id());
462 // Unlink any dead interned Strings and process the remaining live ones.
463 PSScavengeRootsClosure root_closure(promotion_manager);
464 StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
465 }
466
467 // Finally, flush the promotion_manager's labs, and deallocate its stacks.
468 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
469 if (promotion_failure_occurred) {
470 clean_up_failed_promotion();
471 if (PrintGC) {
472 gclog_or_tty->print("--");
473 }
474 }
475
476 // Let the size policy know we're done. Note that we count promotion
477 // failure cleanup time as part of the collection (otherwise, we're
478 // implicitly saying it's mutator time).
479 size_policy->minor_collection_end(gc_cause);
480
481 if (!promotion_failure_occurred) {
611 }
612
613 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
614 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
615 // Also update() will case adaptive NUMA chunk resizing.
616 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
617 young_gen->eden_space()->update();
618
619 heap->gc_policy_counters()->update_counters();
620
621 heap->resize_all_tlabs();
622
623 assert(young_gen->to_space()->is_empty(), "to space should be empty now");
624 }
625
626 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
627
628 NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
629
630 {
631 GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer, _gc_tracer.gc_id());
632
633 CodeCache::prune_scavenge_root_nmethods();
634 }
635
636 // Re-verify object start arrays
637 if (VerifyObjectStartArray &&
638 VerifyAfterGC) {
639 old_gen->verify_object_start_array();
640 }
641
642 // Verify all old -> young cards are now precise
643 if (VerifyRememberedSets) {
644 // Precise verification will give false positives. Until this is fixed,
645 // use imprecise verification.
646 // CardTableExtension::verify_all_young_refs_precise();
647 CardTableExtension::verify_all_young_refs_imprecise();
648 }
649
650 if (TraceYoungGenTime) accumulated_time()->stop();
651
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "code/codeCache.hpp"
28 #include "gc/parallel/cardTableExtension.hpp"
29 #include "gc/parallel/gcTaskManager.hpp"
30 #include "gc/parallel/parallelScavengeHeap.hpp"
31 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
32 #include "gc/parallel/psMarkSweep.hpp"
33 #include "gc/parallel/psParallelCompact.hpp"
34 #include "gc/parallel/psScavenge.inline.hpp"
35 #include "gc/parallel/psTasks.hpp"
36 #include "gc/shared/collectorPolicy.hpp"
37 #include "gc/shared/gcCause.hpp"
38 #include "gc/shared/gcHeapSummary.hpp"
39 #include "gc/shared/gcId.hpp"
40 #include "gc/shared/gcLocker.inline.hpp"
41 #include "gc/shared/gcTimer.hpp"
42 #include "gc/shared/gcTrace.hpp"
43 #include "gc/shared/gcTraceTime.hpp"
44 #include "gc/shared/isGCActiveMark.hpp"
45 #include "gc/shared/referencePolicy.hpp"
46 #include "gc/shared/referenceProcessor.hpp"
47 #include "gc/shared/spaceDecorator.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "runtime/biasedLocking.hpp"
51 #include "runtime/fprofiler.hpp"
52 #include "runtime/handles.inline.hpp"
53 #include "runtime/threadCritical.hpp"
54 #include "runtime/vmThread.hpp"
55 #include "runtime/vm_operations.hpp"
56 #include "services/memoryService.hpp"
57 #include "utilities/stack.inline.hpp"
58
59 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
262 _gc_timer.register_gc_start();
263
264 TimeStamp scavenge_entry;
265 TimeStamp scavenge_midpoint;
266 TimeStamp scavenge_exit;
267
268 scavenge_entry.update();
269
270 if (GC_locker::check_active_before_gc()) {
271 return false;
272 }
273
274 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
275 GCCause::Cause gc_cause = heap->gc_cause();
276
277 // Check for potential problems.
278 if (!should_attempt_scavenge()) {
279 return false;
280 }
281
282 GCIdMark gc_id_mark;
283 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
284
285 bool promotion_failure_occurred = false;
286
287 PSYoungGen* young_gen = heap->young_gen();
288 PSOldGen* old_gen = heap->old_gen();
289 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
290
291 heap->increment_total_collections();
292
293 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
294
295 if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) {
296 // Gather the feedback data for eden occupancy.
297 young_gen->eden_space()->accumulate_statistics();
298 }
299
300 if (ZapUnusedHeapArea) {
301 // Save information needed to minimize mangling
302 heap->record_gen_tops_before_GC();
307
308 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
309 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
310
311 size_t prev_used = heap->used();
312
313 // Fill in TLABs
314 heap->accumulate_statistics_all_tlabs();
315 heap->ensure_parsability(true); // retire TLABs
316
317 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
318 HandleMark hm; // Discard invalid handles created during verification
319 Universe::verify(" VerifyBeforeGC:");
320 }
321
322 {
323 ResourceMark rm;
324 HandleMark hm;
325
326 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
327 GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
328 TraceCollectorStats tcs(counters());
329 TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
330
331 if (TraceYoungGenTime) accumulated_time()->start();
332
333 // Let the size policy know we're starting
334 size_policy->minor_collection_begin();
335
336 // Verify the object start arrays.
337 if (VerifyObjectStartArray &&
338 VerifyBeforeGC) {
339 old_gen->verify_object_start_array();
340 }
341
342 // Verify no unmarked old->young roots
343 if (VerifyRememberedSets) {
344 CardTableExtension::verify_all_young_refs_imprecise();
345 }
346
347 if (!ScavengeWithObjectsInToSpace) {
372 // creating the promotion_manager. We pass the top
373 // values to the card_table, to prevent it from
374 // straying into the promotion labs.
375 HeapWord* old_top = old_gen->object_space()->top();
376
377 // Release all previously held resources
378 gc_task_manager()->release_all_resources();
379
380 // Set the number of GC threads to be used in this collection
381 gc_task_manager()->set_active_gang();
382 gc_task_manager()->task_idle_workers();
383 // Get the active number of workers here and use that value
384 // throughout the methods.
385 uint active_workers = gc_task_manager()->active_workers();
386
387 PSPromotionManager::pre_scavenge();
388
389 // We'll use the promotion manager again later.
390 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
391 {
392 GCTraceTime tm("Scavenge", false, false, &_gc_timer);
393 ParallelScavengeHeap::ParStrongRootsScope psrs;
394
395 GCTaskQueue* q = GCTaskQueue::create();
396
397 if (!old_gen->object_space()->is_empty()) {
398 // There are only old-to-young pointers if there are objects
399 // in the old gen.
400 uint stripe_total = active_workers;
401 for(uint i=0; i < stripe_total; i++) {
402 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
403 }
404 }
405
406 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
407 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
408 // We scan the thread roots in parallel
409 Threads::create_thread_roots_tasks(q);
410 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
411 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
412 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
414 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
415 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
416 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
417
418 ParallelTaskTerminator terminator(
419 active_workers,
420 (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
421 if (active_workers > 1) {
422 for (uint j = 0; j < active_workers; j++) {
423 q->enqueue(new StealTask(&terminator));
424 }
425 }
426
427 gc_task_manager()->execute_and_wait(q);
428 }
429
430 scavenge_midpoint.update();
431
432 // Process reference objects discovered during scavenge
433 {
434 GCTraceTime tm("References", false, false, &_gc_timer);
435
436 reference_processor()->setup_policy(false); // not always_clear
437 reference_processor()->set_active_mt_degree(active_workers);
438 PSKeepAliveClosure keep_alive(promotion_manager);
439 PSEvacuateFollowersClosure evac_followers(promotion_manager);
440 ReferenceProcessorStats stats;
441 if (reference_processor()->processing_is_mt()) {
442 PSRefProcTaskExecutor task_executor;
443 stats = reference_processor()->process_discovered_references(
444 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
445 &_gc_timer);
446 } else {
447 stats = reference_processor()->process_discovered_references(
448 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
449 }
450
451 _gc_tracer.report_gc_reference_stats(stats);
452
453 // Enqueue reference objects discovered during scavenge.
454 if (reference_processor()->processing_is_mt()) {
455 PSRefProcTaskExecutor task_executor;
456 reference_processor()->enqueue_discovered_references(&task_executor);
457 } else {
458 reference_processor()->enqueue_discovered_references(NULL);
459 }
460 }
461
462 {
463 GCTraceTime tm("StringTable", false, false, &_gc_timer);
464 // Unlink any dead interned Strings and process the remaining live ones.
465 PSScavengeRootsClosure root_closure(promotion_manager);
466 StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
467 }
468
469 // Finally, flush the promotion_manager's labs, and deallocate its stacks.
470 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
471 if (promotion_failure_occurred) {
472 clean_up_failed_promotion();
473 if (PrintGC) {
474 gclog_or_tty->print("--");
475 }
476 }
477
478 // Let the size policy know we're done. Note that we count promotion
479 // failure cleanup time as part of the collection (otherwise, we're
480 // implicitly saying it's mutator time).
481 size_policy->minor_collection_end(gc_cause);
482
483 if (!promotion_failure_occurred) {
613 }
614
615 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
616 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
617 // Also update() will case adaptive NUMA chunk resizing.
618 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
619 young_gen->eden_space()->update();
620
621 heap->gc_policy_counters()->update_counters();
622
623 heap->resize_all_tlabs();
624
625 assert(young_gen->to_space()->is_empty(), "to space should be empty now");
626 }
627
628 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
629
630 NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
631
632 {
633 GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
634
635 CodeCache::prune_scavenge_root_nmethods();
636 }
637
638 // Re-verify object start arrays
639 if (VerifyObjectStartArray &&
640 VerifyAfterGC) {
641 old_gen->verify_object_start_array();
642 }
643
644 // Verify all old -> young cards are now precise
645 if (VerifyRememberedSets) {
646 // Precise verification will give false positives. Until this is fixed,
647 // use imprecise verification.
648 // CardTableExtension::verify_all_young_refs_precise();
649 CardTableExtension::verify_all_young_refs_imprecise();
650 }
651
652 if (TraceYoungGenTime) accumulated_time()->stop();
653
|