801 // Initialise marking structures. This has to be done in a STW phase.
802 reset();
803 }
804
805
806 void ConcurrentMark::checkpointRootsInitialPost() {
807 G1CollectedHeap* g1h = G1CollectedHeap::heap();
808
809 // If we force an overflow during remark, the remark operation will
810 // actually abort and we'll restart concurrent marking. If we always
811 // force an oveflow during remark we'll never actually complete the
812 // marking phase. So, we initilize this here, at the start of the
813 // cycle, so that at the remaining overflow number will decrease at
814 // every remark and we'll eventually not need to cause one.
815 force_overflow_stw()->init();
816
817 // For each region note start of marking.
818 NoteStartOfMarkHRClosure startcl;
819 g1h->heap_region_iterate(&startcl);
820
821 // Start weak-reference discovery.
822 ReferenceProcessor* rp = g1h->ref_processor();
823 rp->verify_no_references_recorded();
824 rp->enable_discovery(); // enable ("weak") refs discovery
825 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
826
827 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
828 // This is the start of the marking cycle, we're expected all
829 // threads to have SATB queues with active set to false.
830 satb_mq_set.set_active_all_threads(true, /* new active value */
831 false /* expected_active */);
832
833 // update_g1_committed() will be called at the end of an evac pause
834 // when marking is on. So, it's also called at the end of the
835 // initial-mark pause to update the heap end, if the heap expands
836 // during it. No need to call it here.
837 }
838
839 /*
840 * Notice that in the next two methods, we actually leave the STS
841 * during the barrier sync and join it immediately afterwards. If we
842 * do not do this, the following deadlock can occur: one thread could
843 * be in the barrier sync code, waiting for the other thread to also
844 * sync up, whereas another one could be trying to yield, while also
1116
1117 _restart_for_overflow = false;
1118
1119 size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
1120 force_overflow_conc()->init();
1121 set_phase(active_workers, true /* concurrent */);
1122
1123 CMConcurrentMarkingTask markingTask(this, cmThread());
1124 if (parallel_marking_threads() > 0) {
1125 _parallel_workers->run_task(&markingTask);
1126 } else {
1127 markingTask.work(0);
1128 }
1129 print_stats();
1130 }
1131
1132 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1133 // world is stopped at this checkpoint
1134 assert(SafepointSynchronize::is_at_safepoint(),
1135 "world should be stopped");
1136 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1137
1138 // If a full collection has happened, we shouldn't do this.
1139 if (has_aborted()) {
1140 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1141 return;
1142 }
1143
1144 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1145
1146 if (VerifyDuringGC) {
1147 HandleMark hm; // handle scope
1148 gclog_or_tty->print(" VerifyDuringGC:(before)");
1149 Universe::heap()->prepare_for_verify();
1150 Universe::verify(/* allow dirty */ true,
1151 /* silent */ false,
1152 /* option */ VerifyOption_G1UsePrevMarking);
1153 }
1154
1155 G1CollectorPolicy* g1p = g1h->g1_policy();
1820 g1_par_note_end_task.max_live_bytes());
1821
1822 // Statistics.
1823 double end = os::elapsedTime();
1824 _cleanup_times.add((end - start) * 1000.0);
1825
1826 // G1CollectedHeap::heap()->print();
1827 // gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d",
1828 // G1CollectedHeap::heap()->get_gc_time_stamp());
1829
1830 if (PrintGC || PrintGCDetails) {
1831 g1h->print_size_transition(gclog_or_tty,
1832 start_used_bytes,
1833 g1h->used(),
1834 g1h->capacity());
1835 }
1836
1837 size_t cleaned_up_bytes = start_used_bytes - g1h->used();
1838 g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
1839
1840 // We need to make this be a "collection" so any collection pause that
1841 // races with it goes around and waits for completeCleanup to finish.
1842 g1h->increment_total_collections();
1843
1844 if (VerifyDuringGC) {
1845 HandleMark hm; // handle scope
1846 gclog_or_tty->print(" VerifyDuringGC:(after)");
1847 Universe::heap()->prepare_for_verify();
1848 Universe::verify(/* allow dirty */ true,
1849 /* silent */ false,
1850 /* option */ VerifyOption_G1UsePrevMarking);
1851 }
1852
1853 g1h->verify_region_sets_optional();
1854 }
1855
1856 void ConcurrentMark::completeCleanup() {
1857 if (has_aborted()) return;
1858
1859 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2055 // We call CMTask::do_marking_step() to completely drain the local and
2056 // global marking stacks. The routine is called in a loop, which we'll
2057 // exit if there's nothing more to do (i.e. we'completely drained the
2058 // entries that were pushed as a result of applying the
2059 // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref
2060 // lists above) or we overflow the global marking stack.
2061 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
2062 // while there may still be some work to do. (See the comment at the
2063 // beginning of CMTask::do_marking_step() for those conditions - one of which
2064 // is reaching the specified time target.) It is only when
2065 // CMTask::do_marking_step() returns without setting the has_aborted() flag
2066 // that the marking has completed.
2067
2068 _task->do_marking_step(1000000000.0 /* something very large */,
2069 true /* do_stealing */,
2070 true /* do_termination */);
2071 } while (_task->has_aborted() && !_cm->has_overflown());
2072 }
2073 };
2074
2075 // Implementation of AbstractRefProcTaskExecutor for G1
2076 class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2077 private:
2078 G1CollectedHeap* _g1h;
2079 ConcurrentMark* _cm;
2080 CMBitMap* _bitmap;
2081 WorkGang* _workers;
2082 int _active_workers;
2083
2084 public:
2085 G1RefProcTaskExecutor(G1CollectedHeap* g1h,
2086 ConcurrentMark* cm,
2087 CMBitMap* bitmap,
2088 WorkGang* workers,
2089 int n_workers) :
2090 _g1h(g1h), _cm(cm), _bitmap(bitmap),
2091 _workers(workers), _active_workers(n_workers)
2092 { }
2093
2094 // Executes the given task using concurrent marking worker threads.
2095 virtual void execute(ProcessTask& task);
2096 virtual void execute(EnqueueTask& task);
2097 };
2098
2099 class G1RefProcTaskProxy: public AbstractGangTask {
2100 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2101 ProcessTask& _proc_task;
2102 G1CollectedHeap* _g1h;
2103 ConcurrentMark* _cm;
2104 CMBitMap* _bitmap;
2105
2106 public:
2107 G1RefProcTaskProxy(ProcessTask& proc_task,
2108 G1CollectedHeap* g1h,
2109 ConcurrentMark* cm,
2110 CMBitMap* bitmap) :
2111 AbstractGangTask("Process reference objects in parallel"),
2112 _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap)
2113 {}
2114
2115 virtual void work(int i) {
2116 CMTask* marking_task = _cm->task(i);
2117 G1CMIsAliveClosure g1_is_alive(_g1h);
2118 G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap);
2119 G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
2120
2121 _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2122 }
2123 };
2124
2125 void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) {
2126 assert(_workers != NULL, "Need parallel worker threads.");
2127
2128 G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
2129
2130 // We need to reset the phase for each task execution so that
2131 // the termination protocol of CMTask::do_marking_step works.
2132 _cm->set_phase(_active_workers, false /* concurrent */);
2133 _g1h->set_par_threads(_active_workers);
2134 _workers->run_task(&proc_task_proxy);
2135 _g1h->set_par_threads(0);
2136 }
2137
2138 class G1RefEnqueueTaskProxy: public AbstractGangTask {
2139 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2140 EnqueueTask& _enq_task;
2141
2142 public:
2143 G1RefEnqueueTaskProxy(EnqueueTask& enq_task) :
2144 AbstractGangTask("Enqueue reference objects in parallel"),
2145 _enq_task(enq_task)
2146 { }
2147
2148 virtual void work(int i) {
2149 _enq_task.work(i);
2150 }
2151 };
2152
2153 void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2154 assert(_workers != NULL, "Need parallel worker threads.");
2155
2156 G1RefEnqueueTaskProxy enq_task_proxy(enq_task);
2157
2158 _g1h->set_par_threads(_active_workers);
2159 _workers->run_task(&enq_task_proxy);
2160 _g1h->set_par_threads(0);
2161 }
2162
2163 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2164 ResourceMark rm;
2165 HandleMark hm;
2166 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2167 ReferenceProcessor* rp = g1h->ref_processor();
2168
2169 // See the comment in G1CollectedHeap::ref_processing_init()
2170 // about how reference processing currently works in G1.
2171
2172 // Process weak references.
2173 rp->setup_policy(clear_all_soft_refs);
2174 assert(_markStack.isEmpty(), "mark stack should be empty");
2175
2176 G1CMIsAliveClosure g1_is_alive(g1h);
2177 G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
2178 G1CMDrainMarkingStackClosure
2179 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
2180 // We use the work gang from the G1CollectedHeap and we utilize all
2181 // the worker threads.
2182 int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
2183 active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
2184
2185 G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
2186 g1h->workers(), active_workers);
2187
2188
2189 if (rp->processing_is_mt()) {
2190 // Set the degree of MT here. If the discovery is done MT, there
2191 // may have been a different number of threads doing the discovery
2192 // and a different number of discovered lists may have Ref objects.
2193 // That is OK as long as the Reference lists are balanced (see
2194 // balance_all_queues() and balance_queues()).
2195 rp->set_active_mt_degree(active_workers);
2196
2197 rp->process_discovered_references(&g1_is_alive,
2198 &g1_keep_alive,
2199 &g1_drain_mark_stack,
2200 &par_task_executor);
2201
2202 // The work routines of the parallel keep_alive and drain_marking_stack
2203 // will set the has_overflown flag if we overflow the global marking
2204 // stack.
2205 } else {
2209 NULL);
2210
2211 }
2212
2213 assert(_markStack.overflow() || _markStack.isEmpty(),
2214 "mark stack should be empty (unless it overflowed)");
2215 if (_markStack.overflow()) {
2216 // Should have been done already when we tried to push an
2217 // entry on to the global mark stack. But let's do it again.
2218 set_has_overflown();
2219 }
2220
2221 if (rp->processing_is_mt()) {
2222 assert(rp->num_q() == active_workers, "why not");
2223 rp->enqueue_discovered_references(&par_task_executor);
2224 } else {
2225 rp->enqueue_discovered_references();
2226 }
2227
2228 rp->verify_no_references_recorded();
2229 assert(!rp->discovery_enabled(), "should have been disabled");
2230
2231 // Now clean up stale oops in StringTable
2232 StringTable::unlink(&g1_is_alive);
2233 // Clean up unreferenced symbols in symbol table.
2234 SymbolTable::unlink();
2235 }
2236
2237 void ConcurrentMark::swapMarkBitMaps() {
2238 CMBitMapRO* temp = _prevMarkBitMap;
2239 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
2240 _nextMarkBitMap = (CMBitMap*) temp;
2241 }
2242
2243 class CMRemarkTask: public AbstractGangTask {
2244 private:
2245 ConcurrentMark *_cm;
2246
2247 public:
2248 void work(int worker_i) {
2249 // Since all available tasks are actually started, we should
3312 // processing SATB buffers.
3313 class CMObjectClosure : public ObjectClosure {
3314 private:
3315 CMTask* _task;
3316
3317 public:
3318 void do_object(oop obj) {
3319 _task->deal_with_reference(obj);
3320 }
3321
3322 CMObjectClosure(CMTask* task) : _task(task) { }
3323 };
3324
3325 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3326 ConcurrentMark* cm,
3327 CMTask* task)
3328 : _g1h(g1h), _cm(cm), _task(task) {
3329 assert(_ref_processor == NULL, "should be initialized to NULL");
3330
3331 if (G1UseConcMarkReferenceProcessing) {
3332 _ref_processor = g1h->ref_processor();
3333 assert(_ref_processor != NULL, "should not be NULL");
3334 }
3335 }
3336
3337 void CMTask::setup_for_region(HeapRegion* hr) {
3338 // Separated the asserts so that we know which one fires.
3339 assert(hr != NULL,
3340 "claim_region() should have filtered out continues humongous regions");
3341 assert(!hr->continuesHumongous(),
3342 "claim_region() should have filtered out continues humongous regions");
3343
3344 if (_cm->verbose_low()) {
3345 gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT,
3346 _task_id, hr);
3347 }
3348
3349 _curr_region = hr;
3350 _finger = hr->bottom();
3351 update_region_limit();
3352 }
|
801 // Initialise marking structures. This has to be done in a STW phase.
802 reset();
803 }
804
805
806 void ConcurrentMark::checkpointRootsInitialPost() {
807 G1CollectedHeap* g1h = G1CollectedHeap::heap();
808
809 // If we force an overflow during remark, the remark operation will
810 // actually abort and we'll restart concurrent marking. If we always
811 // force an oveflow during remark we'll never actually complete the
812 // marking phase. So, we initilize this here, at the start of the
813 // cycle, so that at the remaining overflow number will decrease at
814 // every remark and we'll eventually not need to cause one.
815 force_overflow_stw()->init();
816
817 // For each region note start of marking.
818 NoteStartOfMarkHRClosure startcl;
819 g1h->heap_region_iterate(&startcl);
820
821 // Start Concurrent Marking weak-reference discovery.
822 ReferenceProcessor* rp = g1h->ref_processor_cm();
823 // enable ("weak") refs discovery
824 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
825 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
826
827 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
828 // This is the start of the marking cycle, we're expected all
829 // threads to have SATB queues with active set to false.
830 satb_mq_set.set_active_all_threads(true, /* new active value */
831 false /* expected_active */);
832
833 // update_g1_committed() will be called at the end of an evac pause
834 // when marking is on. So, it's also called at the end of the
835 // initial-mark pause to update the heap end, if the heap expands
836 // during it. No need to call it here.
837 }
838
839 /*
840 * Notice that in the next two methods, we actually leave the STS
841 * during the barrier sync and join it immediately afterwards. If we
842 * do not do this, the following deadlock can occur: one thread could
843 * be in the barrier sync code, waiting for the other thread to also
844 * sync up, whereas another one could be trying to yield, while also
1116
1117 _restart_for_overflow = false;
1118
1119 size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
1120 force_overflow_conc()->init();
1121 set_phase(active_workers, true /* concurrent */);
1122
1123 CMConcurrentMarkingTask markingTask(this, cmThread());
1124 if (parallel_marking_threads() > 0) {
1125 _parallel_workers->run_task(&markingTask);
1126 } else {
1127 markingTask.work(0);
1128 }
1129 print_stats();
1130 }
1131
1132 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1133 // world is stopped at this checkpoint
1134 assert(SafepointSynchronize::is_at_safepoint(),
1135 "world should be stopped");
1136
1137 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1138
1139 // If a full collection has happened, we shouldn't do this.
1140 if (has_aborted()) {
1141 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1142 return;
1143 }
1144
1145 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1146
1147 if (VerifyDuringGC) {
1148 HandleMark hm; // handle scope
1149 gclog_or_tty->print(" VerifyDuringGC:(before)");
1150 Universe::heap()->prepare_for_verify();
1151 Universe::verify(/* allow dirty */ true,
1152 /* silent */ false,
1153 /* option */ VerifyOption_G1UsePrevMarking);
1154 }
1155
1156 G1CollectorPolicy* g1p = g1h->g1_policy();
1821 g1_par_note_end_task.max_live_bytes());
1822
1823 // Statistics.
1824 double end = os::elapsedTime();
1825 _cleanup_times.add((end - start) * 1000.0);
1826
1827 // G1CollectedHeap::heap()->print();
1828 // gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d",
1829 // G1CollectedHeap::heap()->get_gc_time_stamp());
1830
1831 if (PrintGC || PrintGCDetails) {
1832 g1h->print_size_transition(gclog_or_tty,
1833 start_used_bytes,
1834 g1h->used(),
1835 g1h->capacity());
1836 }
1837
1838 size_t cleaned_up_bytes = start_used_bytes - g1h->used();
1839 g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
1840
1841 // Clean up will have freed any regions completely full of garbage.
1842 // Update the soft reference policy with the new heap occupancy.
1843 Universe::update_heap_info_at_gc();
1844
1845 // We need to make this be a "collection" so any collection pause that
1846 // races with it goes around and waits for completeCleanup to finish.
1847 g1h->increment_total_collections();
1848
1849 if (VerifyDuringGC) {
1850 HandleMark hm; // handle scope
1851 gclog_or_tty->print(" VerifyDuringGC:(after)");
1852 Universe::heap()->prepare_for_verify();
1853 Universe::verify(/* allow dirty */ true,
1854 /* silent */ false,
1855 /* option */ VerifyOption_G1UsePrevMarking);
1856 }
1857
1858 g1h->verify_region_sets_optional();
1859 }
1860
1861 void ConcurrentMark::completeCleanup() {
1862 if (has_aborted()) return;
1863
1864 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2060 // We call CMTask::do_marking_step() to completely drain the local and
2061 // global marking stacks. The routine is called in a loop, which we'll
2062 // exit if there's nothing more to do (i.e. we'completely drained the
2063 // entries that were pushed as a result of applying the
2064 // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref
2065 // lists above) or we overflow the global marking stack.
2066 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
2067 // while there may still be some work to do. (See the comment at the
2068 // beginning of CMTask::do_marking_step() for those conditions - one of which
2069 // is reaching the specified time target.) It is only when
2070 // CMTask::do_marking_step() returns without setting the has_aborted() flag
2071 // that the marking has completed.
2072
2073 _task->do_marking_step(1000000000.0 /* something very large */,
2074 true /* do_stealing */,
2075 true /* do_termination */);
2076 } while (_task->has_aborted() && !_cm->has_overflown());
2077 }
2078 };
2079
2080 // Implementation of AbstractRefProcTaskExecutor for parallel
2081 // reference processing at the end of G1 concurrent marking
2082
2083 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2084 private:
2085 G1CollectedHeap* _g1h;
2086 ConcurrentMark* _cm;
2087 CMBitMap* _bitmap;
2088 WorkGang* _workers;
2089 int _active_workers;
2090
2091 public:
2092 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2093 ConcurrentMark* cm,
2094 CMBitMap* bitmap,
2095 WorkGang* workers,
2096 int n_workers) :
2097 _g1h(g1h), _cm(cm), _bitmap(bitmap),
2098 _workers(workers), _active_workers(n_workers)
2099 { }
2100
2101 // Executes the given task using concurrent marking worker threads.
2102 virtual void execute(ProcessTask& task);
2103 virtual void execute(EnqueueTask& task);
2104 };
2105
2106 class G1CMRefProcTaskProxy: public AbstractGangTask {
2107 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2108 ProcessTask& _proc_task;
2109 G1CollectedHeap* _g1h;
2110 ConcurrentMark* _cm;
2111 CMBitMap* _bitmap;
2112
2113 public:
2114 G1CMRefProcTaskProxy(ProcessTask& proc_task,
2115 G1CollectedHeap* g1h,
2116 ConcurrentMark* cm,
2117 CMBitMap* bitmap) :
2118 AbstractGangTask("Process reference objects in parallel"),
2119 _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap)
2120 {}
2121
2122 virtual void work(int i) {
2123 CMTask* marking_task = _cm->task(i);
2124 G1CMIsAliveClosure g1_is_alive(_g1h);
2125 G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap);
2126 G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
2127
2128 _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2129 }
2130 };
2131
2132 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2133 assert(_workers != NULL, "Need parallel worker threads.");
2134
2135 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
2136
2137 // We need to reset the phase for each task execution so that
2138 // the termination protocol of CMTask::do_marking_step works.
2139 _cm->set_phase(_active_workers, false /* concurrent */);
2140 _g1h->set_par_threads(_active_workers);
2141 _workers->run_task(&proc_task_proxy);
2142 _g1h->set_par_threads(0);
2143 }
2144
2145 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2146 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2147 EnqueueTask& _enq_task;
2148
2149 public:
2150 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2151 AbstractGangTask("Enqueue reference objects in parallel"),
2152 _enq_task(enq_task)
2153 { }
2154
2155 virtual void work(int i) {
2156 _enq_task.work(i);
2157 }
2158 };
2159
2160 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2161 assert(_workers != NULL, "Need parallel worker threads.");
2162
2163 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2164
2165 _g1h->set_par_threads(_active_workers);
2166 _workers->run_task(&enq_task_proxy);
2167 _g1h->set_par_threads(0);
2168 }
2169
2170 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2171 ResourceMark rm;
2172 HandleMark hm;
2173 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2174 ReferenceProcessor* rp = g1h->ref_processor_cm();
2175
2176 // See the comment in G1CollectedHeap::ref_processing_init()
2177 // about how reference processing currently works in G1.
2178
2179 // Process weak references.
2180 rp->setup_policy(clear_all_soft_refs);
2181 assert(_markStack.isEmpty(), "mark stack should be empty");
2182
2183 G1CMIsAliveClosure g1_is_alive(g1h);
2184 G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
2185 G1CMDrainMarkingStackClosure
2186 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
2187 // We use the work gang from the G1CollectedHeap and we utilize all
2188 // the worker threads.
2189 int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
2190 active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
2191
2192 G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
2193 g1h->workers(), active_workers);
2194
2195
2196 if (rp->processing_is_mt()) {
2197 // Set the degree of MT here. If the discovery is done MT, there
2198 // may have been a different number of threads doing the discovery
2199 // and a different number of discovered lists may have Ref objects.
2200 // That is OK as long as the Reference lists are balanced (see
2201 // balance_all_queues() and balance_queues()).
2202 rp->set_active_mt_degree(active_workers);
2203
2204 rp->process_discovered_references(&g1_is_alive,
2205 &g1_keep_alive,
2206 &g1_drain_mark_stack,
2207 &par_task_executor);
2208
2209 // The work routines of the parallel keep_alive and drain_marking_stack
2210 // will set the has_overflown flag if we overflow the global marking
2211 // stack.
2212 } else {
2216 NULL);
2217
2218 }
2219
2220 assert(_markStack.overflow() || _markStack.isEmpty(),
2221 "mark stack should be empty (unless it overflowed)");
2222 if (_markStack.overflow()) {
2223 // Should have been done already when we tried to push an
2224 // entry on to the global mark stack. But let's do it again.
2225 set_has_overflown();
2226 }
2227
2228 if (rp->processing_is_mt()) {
2229 assert(rp->num_q() == active_workers, "why not");
2230 rp->enqueue_discovered_references(&par_task_executor);
2231 } else {
2232 rp->enqueue_discovered_references();
2233 }
2234
2235 rp->verify_no_references_recorded();
2236 assert(!rp->discovery_enabled(), "Post condition");
2237
2238 // Now clean up stale oops in StringTable
2239 StringTable::unlink(&g1_is_alive);
2240 // Clean up unreferenced symbols in symbol table.
2241 SymbolTable::unlink();
2242 }
2243
2244 void ConcurrentMark::swapMarkBitMaps() {
2245 CMBitMapRO* temp = _prevMarkBitMap;
2246 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
2247 _nextMarkBitMap = (CMBitMap*) temp;
2248 }
2249
2250 class CMRemarkTask: public AbstractGangTask {
2251 private:
2252 ConcurrentMark *_cm;
2253
2254 public:
2255 void work(int worker_i) {
2256 // Since all available tasks are actually started, we should
3319 // processing SATB buffers.
3320 class CMObjectClosure : public ObjectClosure {
3321 private:
3322 CMTask* _task;
3323
3324 public:
3325 void do_object(oop obj) {
3326 _task->deal_with_reference(obj);
3327 }
3328
3329 CMObjectClosure(CMTask* task) : _task(task) { }
3330 };
3331
3332 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3333 ConcurrentMark* cm,
3334 CMTask* task)
3335 : _g1h(g1h), _cm(cm), _task(task) {
3336 assert(_ref_processor == NULL, "should be initialized to NULL");
3337
3338 if (G1UseConcMarkReferenceProcessing) {
3339 _ref_processor = g1h->ref_processor_cm();
3340 assert(_ref_processor != NULL, "should not be NULL");
3341 }
3342 }
3343
3344 void CMTask::setup_for_region(HeapRegion* hr) {
3345 // Separated the asserts so that we know which one fires.
3346 assert(hr != NULL,
3347 "claim_region() should have filtered out continues humongous regions");
3348 assert(!hr->continuesHumongous(),
3349 "claim_region() should have filtered out continues humongous regions");
3350
3351 if (_cm->verbose_low()) {
3352 gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT,
3353 _task_id, hr);
3354 }
3355
3356 _curr_region = hr;
3357 _finger = hr->bottom();
3358 update_region_limit();
3359 }
|