10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/g1/concurrentMarkThread.inline.hpp"
30 #include "gc/g1/g1CollectedHeap.inline.hpp"
31 #include "gc/g1/g1CollectorState.hpp"
32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
33 #include "gc/g1/g1HeapVerifier.hpp"
34 #include "gc/g1/g1OopClosures.inline.hpp"
35 #include "gc/g1/g1CardLiveData.inline.hpp"
36 #include "gc/g1/g1Policy.hpp"
37 #include "gc/g1/g1StringDedup.hpp"
38 #include "gc/g1/heapRegion.inline.hpp"
39 #include "gc/g1/heapRegionRemSet.hpp"
40 #include "gc/g1/heapRegionSet.inline.hpp"
41 #include "gc/g1/suspendibleThreadSet.hpp"
42 #include "gc/shared/gcId.hpp"
43 #include "gc/shared/gcTimer.hpp"
44 #include "gc/shared/gcTrace.hpp"
45 #include "gc/shared/gcTraceTime.inline.hpp"
46 #include "gc/shared/genOopClosures.inline.hpp"
47 #include "gc/shared/referencePolicy.hpp"
48 #include "gc/shared/strongRootsScope.hpp"
49 #include "gc/shared/taskqueue.inline.hpp"
429
430 _parallel_workers(NULL),
431
432 _completed_initialization(false) {
433
434 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
435 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
436
437 // Create & start a ConcurrentMark thread.
438 _cmThread = new ConcurrentMarkThread(this);
439 assert(cmThread() != NULL, "CM Thread should have been created");
440 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
441 if (_cmThread->osthread() == NULL) {
442 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
443 }
444
445 assert(CGC_lock != NULL, "Where's the CGC_lock?");
446 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
447 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
448
449 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
450 satb_qs.set_buffer_size(G1SATBBufferSize);
451
452 _root_regions.init(_g1h->survivor(), this);
453
454 if (ConcGCThreads > ParallelGCThreads) {
455 log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).",
456 ConcGCThreads, ParallelGCThreads);
457 return;
458 }
459 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
460 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
461 // if both are set
462 _sleep_factor = 0.0;
463 _marking_task_overhead = 1.0;
464 } else if (G1MarkingOverheadPercent > 0) {
465 // We will calculate the number of parallel marking threads based
466 // on a target overhead with respect to the soft real-time goal
467 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
468 double overall_cm_overhead =
469 (double) MaxGCPauseMillis * marking_overhead /
794 _has_aborted = false;
795
796 // Initialize marking structures. This has to be done in a STW phase.
797 reset();
798
799 // For each region note start of marking.
800 NoteStartOfMarkHRClosure startcl;
801 g1h->heap_region_iterate(&startcl);
802 }
803
804
805 void G1ConcurrentMark::checkpointRootsInitialPost() {
806 G1CollectedHeap* g1h = G1CollectedHeap::heap();
807
808 // Start Concurrent Marking weak-reference discovery.
809 ReferenceProcessor* rp = g1h->ref_processor_cm();
810 // enable ("weak") refs discovery
811 rp->enable_discovery();
812 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
813
814 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
815 // This is the start of the marking cycle, we're expected all
816 // threads to have SATB queues with active set to false.
817 satb_mq_set.set_active_all_threads(true, /* new active value */
818 false /* expected_active */);
819
820 _root_regions.prepare_for_scan();
821
822 // update_g1_committed() will be called at the end of an evac pause
823 // when marking is on. So, it's also called at the end of the
824 // initial-mark pause to update the heap end, if the heap expands
825 // during it. No need to call it here.
826 }
827
828 /*
829 * Notice that in the next two methods, we actually leave the STS
830 * during the barrier sync and join it immediately afterwards. If we
831 * do not do this, the following deadlock can occur: one thread could
832 * be in the barrier sync code, waiting for the other thread to also
833 * sync up, whereas another one could be trying to yield, while also
834 * waiting for the other threads to sync up too.
1123
1124 double mark_work_end = os::elapsedTime();
1125
1126 weakRefsWork(clear_all_soft_refs);
1127
1128 if (has_overflown()) {
1129 // We overflowed. Restart concurrent marking.
1130 _restart_for_overflow = true;
1131
1132 // Verify the heap w.r.t. the previous marking bitmap.
1133 if (VerifyDuringGC) {
1134 HandleMark hm; // handle scope
1135 g1h->prepare_for_verify();
1136 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1137 }
1138
1139 // Clear the marking state because we will be restarting
1140 // marking due to overflowing the global mark stack.
1141 reset_marking_state();
1142 } else {
1143 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1144 // We're done with marking.
1145 // This is the end of the marking cycle, we're expected all
1146 // threads to have SATB queues with active set to true.
1147 satb_mq_set.set_active_all_threads(false, /* new active value */
1148 true /* expected_active */);
1149
1150 if (VerifyDuringGC) {
1151 HandleMark hm; // handle scope
1152 g1h->prepare_for_verify();
1153 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
1154 }
1155 g1h->verifier()->check_bitmaps("Remark End");
1156 assert(!restart_for_overflow(), "sanity");
1157 // Completely reset the marking state since marking completed
1158 set_non_marking_state();
1159 }
1160
1161 // Expand the marking stack, if we have to and if we can.
1162 if (_global_mark_stack.should_expand()) {
1163 _global_mark_stack.expand();
1822 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1823 _thread_parity(Threads::thread_claim_parity()) {}
1824
1825 void do_thread(Thread* thread) {
1826 if (thread->is_Java_thread()) {
1827 if (thread->claim_oops_do(true, _thread_parity)) {
1828 JavaThread* jt = (JavaThread*)thread;
1829
1830 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1831 // however the liveness of oops reachable from nmethods have very complex lifecycles:
1832 // * Alive if on the stack of an executing method
1833 // * Weakly reachable otherwise
1834 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1835 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1836 jt->nmethods_do(&_code_cl);
1837
1838 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
1839 }
1840 } else if (thread->is_VM_thread()) {
1841 if (thread->claim_oops_do(true, _thread_parity)) {
1842 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1843 }
1844 }
1845 }
1846 };
1847
1848 class G1CMRemarkTask: public AbstractGangTask {
1849 private:
1850 G1ConcurrentMark* _cm;
1851 public:
1852 void work(uint worker_id) {
1853 // Since all available tasks are actually started, we should
1854 // only proceed if we're supposed to be active.
1855 if (worker_id < _cm->active_tasks()) {
1856 G1CMTask* task = _cm->task(worker_id);
1857 task->record_start_time();
1858 {
1859 ResourceMark rm;
1860 HandleMark hm;
1861
1862 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1890 g1h->ensure_parsability(false);
1891
1892 // this is remark, so we'll use up all active threads
1893 uint active_workers = g1h->workers()->active_workers();
1894 set_concurrency_and_phase(active_workers, false /* concurrent */);
1895 // Leave _parallel_marking_threads at it's
1896 // value originally calculated in the G1ConcurrentMark
1897 // constructor and pass values of the active workers
1898 // through the gang in the task.
1899
1900 {
1901 StrongRootsScope srs(active_workers);
1902
1903 G1CMRemarkTask remarkTask(this, active_workers);
1904 // We will start all available threads, even if we decide that the
1905 // active_workers will be fewer. The extra ones will just bail out
1906 // immediately.
1907 g1h->workers()->run_task(&remarkTask);
1908 }
1909
1910 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1911 guarantee(has_overflown() ||
1912 satb_mq_set.completed_buffers_num() == 0,
1913 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1914 BOOL_TO_STR(has_overflown()),
1915 satb_mq_set.completed_buffers_num());
1916
1917 print_stats();
1918 }
1919
1920 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
1921 // Note we are overriding the read-only view of the prev map here, via
1922 // the cast.
1923 ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr);
1924 }
1925
1926 HeapRegion*
1927 G1ConcurrentMark::claim_region(uint worker_id) {
1928 // "checkpoint" the finger
1929 HeapWord* finger = _finger;
1930
2089 // since VerifyDuringGC verifies the objects marked during
2090 // a full GC against the previous bitmap.
2091
2092 {
2093 GCTraceTime(Debug, gc)("Clear Live Data");
2094 clear_live_data(_g1h->workers());
2095 }
2096 DEBUG_ONLY({
2097 GCTraceTime(Debug, gc)("Verify Live Data Clear");
2098 verify_live_data_clear();
2099 })
2100 // Empty mark stack
2101 reset_marking_state();
2102 for (uint i = 0; i < _max_worker_id; ++i) {
2103 _tasks[i]->clear_region_fields();
2104 }
2105 _first_overflow_barrier_sync.abort();
2106 _second_overflow_barrier_sync.abort();
2107 _has_aborted = true;
2108
2109 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2110 satb_mq_set.abandon_partial_marking();
2111 // This can be called either during or outside marking, we'll read
2112 // the expected_active value from the SATB queue set.
2113 satb_mq_set.set_active_all_threads(
2114 false, /* new active value */
2115 satb_mq_set.is_active() /* expected_active */);
2116 }
2117
2118 static void print_ms_time_info(const char* prefix, const char* name,
2119 NumberSeq& ns) {
2120 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2121 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2122 if (ns.num() > 0) {
2123 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]",
2124 prefix, ns.sd(), ns.maximum());
2125 }
2126 }
2127
2128 void G1ConcurrentMark::print_summary_info() {
2129 Log(gc, marking) log;
2330
2331 // (4) We check whether we should yield. If we have to, then we abort.
2332 if (SuspendibleThreadSet::should_yield()) {
2333 // We should yield. To do this we abort the task. The caller is
2334 // responsible for yielding.
2335 set_has_aborted();
2336 return;
2337 }
2338
2339 // (5) We check whether we've reached our time quota. If we have,
2340 // then we abort.
2341 double elapsed_time_ms = curr_time_ms - _start_time_ms;
2342 if (elapsed_time_ms > _time_target_ms) {
2343 set_has_aborted();
2344 _has_timed_out = true;
2345 return;
2346 }
2347
2348 // (6) Finally, we check whether there are enough completed STAB
2349 // buffers available for processing. If there are, we abort.
2350 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2351 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2352 // we do need to process SATB buffers, we'll abort and restart
2353 // the marking task to do so
2354 set_has_aborted();
2355 return;
2356 }
2357 }
2358
2359 void G1CMTask::recalculate_limits() {
2360 _real_words_scanned_limit = _words_scanned + words_scanned_period;
2361 _words_scanned_limit = _real_words_scanned_limit;
2362
2363 _real_refs_reached_limit = _refs_reached + refs_reached_period;
2364 _refs_reached_limit = _real_refs_reached_limit;
2365 }
2366
2367 void G1CMTask::decrease_limits() {
2368 // This is called when we believe that we're going to do an infrequent
2369 // operation which will increase the per byte scanned cost (i.e. move
2370 // entries to/from the global stack). It basically tries to decrease the
2483 while (!has_aborted() && get_entries_from_global_stack()) {
2484 drain_local_queue(partially);
2485 }
2486 }
2487 }
2488
2489 // SATB Queue has several assumptions on whether to call the par or
2490 // non-par versions of the methods. this is why some of the code is
2491 // replicated. We should really get rid of the single-threaded version
2492 // of the code to simplify things.
2493 void G1CMTask::drain_satb_buffers() {
2494 if (has_aborted()) return;
2495
2496 // We set this so that the regular clock knows that we're in the
2497 // middle of draining buffers and doesn't set the abort flag when it
2498 // notices that SATB buffers are available for draining. It'd be
2499 // very counter productive if it did that. :-)
2500 _draining_satb_buffers = true;
2501
2502 G1CMSATBBufferClosure satb_cl(this, _g1h);
2503 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2504
2505 // This keeps claiming and applying the closure to completed buffers
2506 // until we run out of buffers or we need to abort.
2507 while (!has_aborted() &&
2508 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2509 regular_clock_call();
2510 }
2511
2512 _draining_satb_buffers = false;
2513
2514 assert(has_aborted() ||
2515 concurrent() ||
2516 satb_mq_set.completed_buffers_num() == 0, "invariant");
2517
2518 // again, this was a potentially expensive operation, decrease the
2519 // limits to get the regular clock call early
2520 decrease_limits();
2521 }
2522
2523 void G1CMTask::print_stats() {
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/g1/concurrentMarkThread.inline.hpp"
30 #include "gc/g1/g1BarrierSet.hpp"
31 #include "gc/g1/g1CollectedHeap.inline.hpp"
32 #include "gc/g1/g1CollectorState.hpp"
33 #include "gc/g1/g1ConcurrentMark.inline.hpp"
34 #include "gc/g1/g1HeapVerifier.hpp"
35 #include "gc/g1/g1OopClosures.inline.hpp"
36 #include "gc/g1/g1CardLiveData.inline.hpp"
37 #include "gc/g1/g1Policy.hpp"
38 #include "gc/g1/g1StringDedup.hpp"
39 #include "gc/g1/heapRegion.inline.hpp"
40 #include "gc/g1/heapRegionRemSet.hpp"
41 #include "gc/g1/heapRegionSet.inline.hpp"
42 #include "gc/g1/suspendibleThreadSet.hpp"
43 #include "gc/shared/gcId.hpp"
44 #include "gc/shared/gcTimer.hpp"
45 #include "gc/shared/gcTrace.hpp"
46 #include "gc/shared/gcTraceTime.inline.hpp"
47 #include "gc/shared/genOopClosures.inline.hpp"
48 #include "gc/shared/referencePolicy.hpp"
49 #include "gc/shared/strongRootsScope.hpp"
50 #include "gc/shared/taskqueue.inline.hpp"
430
431 _parallel_workers(NULL),
432
433 _completed_initialization(false) {
434
435 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
436 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
437
438 // Create & start a ConcurrentMark thread.
439 _cmThread = new ConcurrentMarkThread(this);
440 assert(cmThread() != NULL, "CM Thread should have been created");
441 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
442 if (_cmThread->osthread() == NULL) {
443 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
444 }
445
446 assert(CGC_lock != NULL, "Where's the CGC_lock?");
447 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
448 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
449
450 SATBMarkQueueSet& satb_qs = G1BarrierSet::satb_mark_queue_set();
451 satb_qs.set_buffer_size(G1SATBBufferSize);
452
453 _root_regions.init(_g1h->survivor(), this);
454
455 if (ConcGCThreads > ParallelGCThreads) {
456 log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).",
457 ConcGCThreads, ParallelGCThreads);
458 return;
459 }
460 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
461 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
462 // if both are set
463 _sleep_factor = 0.0;
464 _marking_task_overhead = 1.0;
465 } else if (G1MarkingOverheadPercent > 0) {
466 // We will calculate the number of parallel marking threads based
467 // on a target overhead with respect to the soft real-time goal
468 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
469 double overall_cm_overhead =
470 (double) MaxGCPauseMillis * marking_overhead /
795 _has_aborted = false;
796
797 // Initialize marking structures. This has to be done in a STW phase.
798 reset();
799
800 // For each region note start of marking.
801 NoteStartOfMarkHRClosure startcl;
802 g1h->heap_region_iterate(&startcl);
803 }
804
805
806 void G1ConcurrentMark::checkpointRootsInitialPost() {
807 G1CollectedHeap* g1h = G1CollectedHeap::heap();
808
809 // Start Concurrent Marking weak-reference discovery.
810 ReferenceProcessor* rp = g1h->ref_processor_cm();
811 // enable ("weak") refs discovery
812 rp->enable_discovery();
813 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
814
815 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
816 // This is the start of the marking cycle, we're expected all
817 // threads to have SATB queues with active set to false.
818 satb_mq_set.set_active_all_threads(true, /* new active value */
819 false /* expected_active */);
820
821 _root_regions.prepare_for_scan();
822
823 // update_g1_committed() will be called at the end of an evac pause
824 // when marking is on. So, it's also called at the end of the
825 // initial-mark pause to update the heap end, if the heap expands
826 // during it. No need to call it here.
827 }
828
829 /*
830 * Notice that in the next two methods, we actually leave the STS
831 * during the barrier sync and join it immediately afterwards. If we
832 * do not do this, the following deadlock can occur: one thread could
833 * be in the barrier sync code, waiting for the other thread to also
834 * sync up, whereas another one could be trying to yield, while also
835 * waiting for the other threads to sync up too.
1124
1125 double mark_work_end = os::elapsedTime();
1126
1127 weakRefsWork(clear_all_soft_refs);
1128
1129 if (has_overflown()) {
1130 // We overflowed. Restart concurrent marking.
1131 _restart_for_overflow = true;
1132
1133 // Verify the heap w.r.t. the previous marking bitmap.
1134 if (VerifyDuringGC) {
1135 HandleMark hm; // handle scope
1136 g1h->prepare_for_verify();
1137 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1138 }
1139
1140 // Clear the marking state because we will be restarting
1141 // marking due to overflowing the global mark stack.
1142 reset_marking_state();
1143 } else {
1144 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1145 // We're done with marking.
1146 // This is the end of the marking cycle, we're expected all
1147 // threads to have SATB queues with active set to true.
1148 satb_mq_set.set_active_all_threads(false, /* new active value */
1149 true /* expected_active */);
1150
1151 if (VerifyDuringGC) {
1152 HandleMark hm; // handle scope
1153 g1h->prepare_for_verify();
1154 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
1155 }
1156 g1h->verifier()->check_bitmaps("Remark End");
1157 assert(!restart_for_overflow(), "sanity");
1158 // Completely reset the marking state since marking completed
1159 set_non_marking_state();
1160 }
1161
1162 // Expand the marking stack, if we have to and if we can.
1163 if (_global_mark_stack.should_expand()) {
1164 _global_mark_stack.expand();
1823 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1824 _thread_parity(Threads::thread_claim_parity()) {}
1825
1826 void do_thread(Thread* thread) {
1827 if (thread->is_Java_thread()) {
1828 if (thread->claim_oops_do(true, _thread_parity)) {
1829 JavaThread* jt = (JavaThread*)thread;
1830
1831 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1832 // however the liveness of oops reachable from nmethods have very complex lifecycles:
1833 // * Alive if on the stack of an executing method
1834 // * Weakly reachable otherwise
1835 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1836 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1837 jt->nmethods_do(&_code_cl);
1838
1839 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
1840 }
1841 } else if (thread->is_VM_thread()) {
1842 if (thread->claim_oops_do(true, _thread_parity)) {
1843 G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1844 }
1845 }
1846 }
1847 };
1848
1849 class G1CMRemarkTask: public AbstractGangTask {
1850 private:
1851 G1ConcurrentMark* _cm;
1852 public:
1853 void work(uint worker_id) {
1854 // Since all available tasks are actually started, we should
1855 // only proceed if we're supposed to be active.
1856 if (worker_id < _cm->active_tasks()) {
1857 G1CMTask* task = _cm->task(worker_id);
1858 task->record_start_time();
1859 {
1860 ResourceMark rm;
1861 HandleMark hm;
1862
1863 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1891 g1h->ensure_parsability(false);
1892
1893 // this is remark, so we'll use up all active threads
1894 uint active_workers = g1h->workers()->active_workers();
1895 set_concurrency_and_phase(active_workers, false /* concurrent */);
1896 // Leave _parallel_marking_threads at it's
1897 // value originally calculated in the G1ConcurrentMark
1898 // constructor and pass values of the active workers
1899 // through the gang in the task.
1900
1901 {
1902 StrongRootsScope srs(active_workers);
1903
1904 G1CMRemarkTask remarkTask(this, active_workers);
1905 // We will start all available threads, even if we decide that the
1906 // active_workers will be fewer. The extra ones will just bail out
1907 // immediately.
1908 g1h->workers()->run_task(&remarkTask);
1909 }
1910
1911 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1912 guarantee(has_overflown() ||
1913 satb_mq_set.completed_buffers_num() == 0,
1914 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1915 BOOL_TO_STR(has_overflown()),
1916 satb_mq_set.completed_buffers_num());
1917
1918 print_stats();
1919 }
1920
1921 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
1922 // Note we are overriding the read-only view of the prev map here, via
1923 // the cast.
1924 ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr);
1925 }
1926
1927 HeapRegion*
1928 G1ConcurrentMark::claim_region(uint worker_id) {
1929 // "checkpoint" the finger
1930 HeapWord* finger = _finger;
1931
2090 // since VerifyDuringGC verifies the objects marked during
2091 // a full GC against the previous bitmap.
2092
2093 {
2094 GCTraceTime(Debug, gc)("Clear Live Data");
2095 clear_live_data(_g1h->workers());
2096 }
2097 DEBUG_ONLY({
2098 GCTraceTime(Debug, gc)("Verify Live Data Clear");
2099 verify_live_data_clear();
2100 })
2101 // Empty mark stack
2102 reset_marking_state();
2103 for (uint i = 0; i < _max_worker_id; ++i) {
2104 _tasks[i]->clear_region_fields();
2105 }
2106 _first_overflow_barrier_sync.abort();
2107 _second_overflow_barrier_sync.abort();
2108 _has_aborted = true;
2109
2110 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2111 satb_mq_set.abandon_partial_marking();
2112 // This can be called either during or outside marking, we'll read
2113 // the expected_active value from the SATB queue set.
2114 satb_mq_set.set_active_all_threads(
2115 false, /* new active value */
2116 satb_mq_set.is_active() /* expected_active */);
2117 }
2118
2119 static void print_ms_time_info(const char* prefix, const char* name,
2120 NumberSeq& ns) {
2121 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2122 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2123 if (ns.num() > 0) {
2124 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]",
2125 prefix, ns.sd(), ns.maximum());
2126 }
2127 }
2128
2129 void G1ConcurrentMark::print_summary_info() {
2130 Log(gc, marking) log;
2331
2332 // (4) We check whether we should yield. If we have to, then we abort.
2333 if (SuspendibleThreadSet::should_yield()) {
2334 // We should yield. To do this we abort the task. The caller is
2335 // responsible for yielding.
2336 set_has_aborted();
2337 return;
2338 }
2339
2340 // (5) We check whether we've reached our time quota. If we have,
2341 // then we abort.
2342 double elapsed_time_ms = curr_time_ms - _start_time_ms;
2343 if (elapsed_time_ms > _time_target_ms) {
2344 set_has_aborted();
2345 _has_timed_out = true;
2346 return;
2347 }
2348
2349 // (6) Finally, we check whether there are enough completed STAB
2350 // buffers available for processing. If there are, we abort.
2351 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2352 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2353 // we do need to process SATB buffers, we'll abort and restart
2354 // the marking task to do so
2355 set_has_aborted();
2356 return;
2357 }
2358 }
2359
2360 void G1CMTask::recalculate_limits() {
2361 _real_words_scanned_limit = _words_scanned + words_scanned_period;
2362 _words_scanned_limit = _real_words_scanned_limit;
2363
2364 _real_refs_reached_limit = _refs_reached + refs_reached_period;
2365 _refs_reached_limit = _real_refs_reached_limit;
2366 }
2367
2368 void G1CMTask::decrease_limits() {
2369 // This is called when we believe that we're going to do an infrequent
2370 // operation which will increase the per byte scanned cost (i.e. move
2371 // entries to/from the global stack). It basically tries to decrease the
2484 while (!has_aborted() && get_entries_from_global_stack()) {
2485 drain_local_queue(partially);
2486 }
2487 }
2488 }
2489
2490 // SATB Queue has several assumptions on whether to call the par or
2491 // non-par versions of the methods. this is why some of the code is
2492 // replicated. We should really get rid of the single-threaded version
2493 // of the code to simplify things.
2494 void G1CMTask::drain_satb_buffers() {
2495 if (has_aborted()) return;
2496
2497 // We set this so that the regular clock knows that we're in the
2498 // middle of draining buffers and doesn't set the abort flag when it
2499 // notices that SATB buffers are available for draining. It'd be
2500 // very counter productive if it did that. :-)
2501 _draining_satb_buffers = true;
2502
2503 G1CMSATBBufferClosure satb_cl(this, _g1h);
2504 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2505
2506 // This keeps claiming and applying the closure to completed buffers
2507 // until we run out of buffers or we need to abort.
2508 while (!has_aborted() &&
2509 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2510 regular_clock_call();
2511 }
2512
2513 _draining_satb_buffers = false;
2514
2515 assert(has_aborted() ||
2516 concurrent() ||
2517 satb_mq_set.completed_buffers_num() == 0, "invariant");
2518
2519 // again, this was a potentially expensive operation, decrease the
2520 // limits to get the regular clock call early
2521 decrease_limits();
2522 }
2523
2524 void G1CMTask::print_stats() {
|