11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #if !defined(__clang_major__) && defined(__GNUC__)
26 // FIXME, formats have issues. Disable this macro definition, compile, and study warnings for more information.
27 #define ATTRIBUTE_PRINTF(x,y)
28 #endif
29
30 #include "precompiled.hpp"
31 #include "classfile/stringTable.hpp"
32 #include "code/codeCache.hpp"
33 #include "code/icBuffer.hpp"
34 #include "gc_implementation/g1/bufferingOopClosure.hpp"
35 #include "gc_implementation/g1/concurrentG1Refine.hpp"
36 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
37 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
38 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
39 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
40 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
41 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
42 #include "gc_implementation/g1/g1EvacFailure.hpp"
43 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
44 #include "gc_implementation/g1/g1Log.hpp"
45 #include "gc_implementation/g1/g1MarkSweep.hpp"
46 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
47 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
48 #include "gc_implementation/g1/g1RemSet.inline.hpp"
49 #include "gc_implementation/g1/g1StringDedup.hpp"
50 #include "gc_implementation/g1/g1YCTypes.hpp"
74 // to-be-collected) are printed at "strategic" points before / during
75 // / after the collection --- this is useful for debugging
76 #define YOUNG_LIST_VERBOSE 0
77 // CURRENT STATUS
78 // This file is under construction. Search for "FIXME".
79
80 // INVARIANTS/NOTES
81 //
82 // All allocation activity covered by the G1CollectedHeap interface is
83 // serialized by acquiring the HeapLock. This happens in mem_allocate
84 // and allocate_new_tlab, which are the "entry" points to the
85 // allocation code from the rest of the JVM. (Note that this does not
86 // apply to TLAB allocation, which is not part of this interface: it
87 // is done by clients of this interface.)
88
89 // Notes on implementation of parallelism in different tasks.
90 //
91 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
92 // The number of GC workers is passed to heap_region_par_iterate_chunked().
93 // It does use run_task() which sets _n_workers in the task.
94 // G1ParTask executes g1_process_strong_roots() ->
95 // SharedHeap::process_strong_roots() which calls eventually to
96 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
97 // SequentialSubTasksDone. SharedHeap::process_strong_roots() also
98 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
99 //
100
101 // Local to this file.
102
103 class RefineCardTableEntryClosure: public CardTableEntryClosure {
104 bool _concurrent;
105 public:
106 RefineCardTableEntryClosure() : _concurrent(true) { }
107
108 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
109 bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
110 // This path is executed by the concurrent refine or mutator threads,
111 // concurrently, and so we do not care if card_ptr contains references
112 // that point into the collection set.
113 assert(!oops_into_cset, "should be");
114
115 if (_concurrent && SuspendibleThreadSet::should_yield()) {
116 // Caller will actually yield.
117 return false;
3362 void work(uint worker_id) {
3363 HandleMark hm;
3364 VerifyRegionClosure blk(true, _vo);
3365 _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3366 _g1h->workers()->active_workers(),
3367 HeapRegion::ParVerifyClaimValue);
3368 if (blk.failures()) {
3369 _failures = true;
3370 }
3371 }
3372 };
3373
3374 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3375 if (SafepointSynchronize::is_at_safepoint()) {
3376 assert(Thread::current()->is_VM_thread(),
3377 "Expected to be executed serially by the VM thread at this point");
3378
3379 if (!silent) { gclog_or_tty->print("Roots "); }
3380 VerifyRootsClosure rootsCl(vo);
3381 VerifyKlassClosure klassCl(this, &rootsCl);
3382
3383 // We apply the relevant closures to all the oops in the
3384 // system dictionary, class loader data graph and the string table.
3385 // Don't verify the code cache here, since it's verified below.
3386 const int so = SO_AllClasses | SO_Strings;
3387
3388 // Need cleared claim bits for the strong roots processing
3389 ClassLoaderDataGraph::clear_claimed_marks();
3390
3391 process_strong_roots(true, // activate StrongRootsScope
3392 ScanningOption(so), // roots scanning options
3393 &rootsCl,
3394 &klassCl
3395 );
3396
3397 // Verify the nmethods in the code cache.
3398 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3399 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3400 CodeCache::blobs_do(&blobsCl);
3401
3402 bool failures = rootsCl.failures() || codeRootsCl.failures();
3403
3404 if (vo != VerifyOption_G1UseMarkWord) {
3405 // If we're verifying during a full GC then the region sets
3406 // will have been torn down at the start of the GC. Therefore
3407 // verifying the region sets will fail. So we only verify
3408 // the region sets when not in a full GC.
3409 if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3410 verify_region_sets();
3411 }
3412
3413 if (!silent) { gclog_or_tty->print("HeapRegions "); }
3414 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3415 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3416 "sanity check");
3417
3418 G1ParVerifyTask task(this, vo);
3419 assert(UseDynamicNumberOfGCThreads ||
3420 workers()->active_workers() == workers()->total_workers(),
3962 if (!G1StressConcRegionFreeing) {
3963 append_secondary_free_list_if_not_empty_with_lock();
3964 }
3965
3966 assert(check_young_list_well_formed(), "young list should be well formed");
3967 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3968 "sanity check");
3969
3970 // Don't dynamically change the number of GC threads this early. A value of
3971 // 0 is used to indicate serial work. When parallel work is done,
3972 // it will be set.
3973
3974 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3975 IsGCActiveMark x;
3976
3977 gc_prologue(false);
3978 increment_total_collections(false /* full gc */);
3979 increment_gc_time_stamp();
3980
3981 verify_before_gc();
3982 check_bitmaps("GC Start");
3983
3984 COMPILER2_PRESENT(DerivedPointerTable::clear());
3985
3986 // Please see comment in g1CollectedHeap.hpp and
3987 // G1CollectedHeap::ref_processing_init() to see how
3988 // reference processing currently works in G1.
3989
3990 // Enable discovery in the STW reference processor
3991 ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
3992 true /*verify_no_refs*/);
3993
3994 {
3995 // We want to temporarily turn off discovery by the
3996 // CM ref processor, if necessary, and turn it back on
3997 // on again later if we do. Using a scoped
3998 // NoRefDiscovery object will do this.
3999 NoRefDiscovery no_cm_discovery(ref_processor_cm());
4000
4001 // Forget the current alloc region (we might even choose it to be part
4312 break;
4313 }
4314
4315 // Prevent humongous PLAB sizes for two reasons:
4316 // * PLABs are allocated using a similar paths as oops, but should
4317 // never be in a humongous region
4318 // * Allowing humongous PLABs needlessly churns the region free lists
4319 return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
4320 }
4321
4322 void G1CollectedHeap::init_mutator_alloc_region() {
4323 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
4324 _mutator_alloc_region.init();
4325 }
4326
4327 void G1CollectedHeap::release_mutator_alloc_region() {
4328 _mutator_alloc_region.release();
4329 assert(_mutator_alloc_region.get() == NULL, "post-condition");
4330 }
4331
4332 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
4333 assert_at_safepoint(true /* should_be_vm_thread */);
4334
4335 _survivor_gc_alloc_region.init();
4336 _old_gc_alloc_region.init();
4337 HeapRegion* retained_region = _retained_old_gc_alloc_region;
4338 _retained_old_gc_alloc_region = NULL;
4339
4340 // We will discard the current GC alloc region if:
4341 // a) it's in the collection set (it can happen!),
4342 // b) it's already full (no point in using it),
4343 // c) it's empty (this means that it was emptied during
4344 // a cleanup and it should be on the free list now), or
4345 // d) it's humongous (this means that it was emptied
4346 // during a cleanup and was added to the free list, but
4347 // has been subsequently used to allocate a humongous
4348 // object that may be less than the region size).
4349 if (retained_region != NULL &&
4350 !retained_region->in_collection_set() &&
4351 !(retained_region->top() == retained_region->end()) &&
4352 !retained_region->is_empty() &&
4353 !retained_region->isHumongous()) {
4354 retained_region->record_top_and_timestamp();
4355 // The retained region was added to the old region set when it was
4356 // retired. We have to remove it now, since we don't allow regions
4357 // we allocate to in the region sets. We'll re-add it later, when
4358 // it's retired again.
4359 _old_set.remove(retained_region);
4360 bool during_im = g1_policy()->during_initial_mark_pause();
4361 retained_region->note_start_of_copying(during_im);
4362 _old_gc_alloc_region.set(retained_region);
4363 _hr_printer.reuse(retained_region);
4364 evacuation_info.set_alloc_regions_used_before(retained_region->used());
4365 }
4366 }
4367
4368 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
4369 evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
4370 _old_gc_alloc_region.count());
4371 _survivor_gc_alloc_region.release();
4372 // If we have an old GC alloc region to release, we'll save it in
4373 // _retained_old_gc_alloc_region. If we don't
4374 // _retained_old_gc_alloc_region will become NULL. This is what we
4375 // want either way so no reason to check explicitly for either
4376 // condition.
4377 _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
4378
4379 if (ResizePLAB) {
4380 _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4381 _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4382 }
4383 }
4384
4385 void G1CollectedHeap::abandon_gc_alloc_regions() {
4386 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
4387 assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
4570 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4571 assert(from_obj != to_obj, "should not be self-forwarded");
4572
4573 assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
4574 assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
4575
4576 // The object might be in the process of being copied by another
4577 // worker so we cannot trust that its to-space image is
4578 // well-formed. So we have to read its size from its from-space
4579 // image which we know should not be changing.
4580 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4581 }
4582
4583 template <class T>
4584 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4585 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4586 _scanned_klass->record_modified_oops();
4587 }
4588 }
4589
4590 template <G1Barrier barrier, bool do_mark_object>
4591 template <class T>
4592 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4593 T heap_oop = oopDesc::load_heap_oop(p);
4594
4595 if (oopDesc::is_null(heap_oop)) {
4596 return;
4597 }
4598
4599 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4600
4601 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4602
4603 if (_g1->in_cset_fast_test(obj)) {
4604 oop forwardee;
4605 if (obj->is_forwarded()) {
4606 forwardee = obj->forwardee();
4607 } else {
4608 forwardee = _par_scan_state->copy_to_survivor_space(obj);
4609 }
4610 assert(forwardee != NULL, "forwardee should not be NULL");
4611 oopDesc::encode_store_heap_oop(p, forwardee);
4612 if (do_mark_object && forwardee != obj) {
4613 // If the object is self-forwarded we don't need to explicitly
4614 // mark it, the evacuation failure protocol will do so.
4615 mark_forwarded_object(obj, forwardee);
4616 }
4617
4618 if (barrier == G1BarrierKlass) {
4619 do_klass_barrier(p, forwardee);
4620 }
4621 } else {
4622 // The object is not in collection set. If we're a root scanning
4623 // closure during an initial mark pause (i.e. do_mark_object will
4624 // be true) then attempt to mark the object.
4625 if (do_mark_object) {
4626 mark_object(obj);
4627 }
4628 }
4629
4630 if (barrier == G1BarrierEvac) {
4631 _par_scan_state->update_rs(_from, p, _worker_id);
4632 }
4633 }
4634
4635 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
4636 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4637
4638 class G1ParEvacuateFollowersClosure : public VoidClosure {
4639 protected:
4640 G1CollectedHeap* _g1h;
4641 G1ParScanThreadState* _par_scan_state;
4642 RefToScanQueueSet* _queues;
4643 ParallelTaskTerminator* _terminator;
4644
4645 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
4646 RefToScanQueueSet* queues() { return _queues; }
4647 ParallelTaskTerminator* terminator() { return _terminator; }
4648
4649 public:
4650 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4651 G1ParScanThreadState* par_scan_state,
4652 RefToScanQueueSet* queues,
4653 ParallelTaskTerminator* terminator)
4654 : _g1h(g1h), _par_scan_state(par_scan_state),
4655 _queues(queues), _terminator(terminator) {}
4656
4729 RefToScanQueueSet* queues() { return _queues; }
4730
4731 RefToScanQueue *work_queue(int i) {
4732 return queues()->queue(i);
4733 }
4734
4735 ParallelTaskTerminator* terminator() { return &_terminator; }
4736
4737 virtual void set_for_termination(int active_workers) {
4738 // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4739 // in the young space (_par_seq_tasks) in the G1 heap
4740 // for SequentialSubTasksDone.
4741 // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
4742 // both of which need setting by set_n_termination().
4743 _g1h->SharedHeap::set_n_termination(active_workers);
4744 _g1h->set_n_termination(active_workers);
4745 terminator()->reset_for_reuse(active_workers);
4746 _n_workers = active_workers;
4747 }
4748
4749 void work(uint worker_id) {
4750 if (worker_id >= _n_workers) return; // no work needed this round
4751
4752 double start_time_ms = os::elapsedTime() * 1000.0;
4753 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4754
4755 {
4756 ResourceMark rm;
4757 HandleMark hm;
4758
4759 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4760
4761 G1ParScanThreadState pss(_g1h, worker_id, rp);
4762 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4763
4764 pss.set_evac_failure_closure(&evac_failure_cl);
4765
4766 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp);
4767 G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp);
4768
4769 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4770 G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
4771
4772 bool only_young = _g1h->g1_policy()->gcs_are_young();
4773 G1KlassScanClosure scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
4774 G1KlassScanClosure only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
4775
4776 OopClosure* scan_root_cl = &only_scan_root_cl;
4777 G1KlassScanClosure* scan_klasses_cl = &only_scan_klasses_cl_s;
4778
4779 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4780 // We also need to mark copied objects.
4781 scan_root_cl = &scan_mark_root_cl;
4782 scan_klasses_cl = &scan_mark_klasses_cl_s;
4783 }
4784
4785 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4786
4787 // Don't scan the scavengable methods in the code cache as part
4788 // of strong root scanning. The code roots that point into a
4789 // region in the collection set are scanned when we scan the
4790 // region's RSet.
4791 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
4792
4793 pss.start_strong_roots();
4794 _g1h->g1_process_strong_roots(/* is scavenging */ true,
4795 SharedHeap::ScanningOption(so),
4796 scan_root_cl,
4797 &push_heap_rs_cl,
4798 scan_klasses_cl,
4799 worker_id);
4800 pss.end_strong_roots();
4801
4802 {
4803 double start = os::elapsedTime();
4804 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4805 evac.do_void();
4806 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4807 double term_ms = pss.term_time()*1000.0;
4808 _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4809 _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4810 }
4811 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4812 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4813
4814 if (ParallelGCVerbose) {
4815 MutexLocker x(stats_lock());
4816 pss.print_termination_stats(worker_id);
4817 }
4818
4819 assert(pss.queue_is_empty(), "should be empty");
4820
4821 // Close the inner scope so that the ResourceMark and HandleMark
4822 // destructors are executed here and are included as part of the
4823 // "GC Worker Time".
4824 }
4825
4826 double end_time_ms = os::elapsedTime() * 1000.0;
4827 _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4828 }
4829 };
4830
4831 // *** Common G1 Evacuation Stuff
4832
4833 // This method is run in a GC worker.
4834
4835 void
4836 G1CollectedHeap::
4837 g1_process_strong_roots(bool is_scavenging,
4838 ScanningOption so,
4839 OopClosure* scan_non_heap_roots,
4840 OopsInHeapRegionClosure* scan_rs,
4841 G1KlassScanClosure* scan_klasses,
4842 uint worker_i) {
4843
4844 // First scan the strong roots
4845 double ext_roots_start = os::elapsedTime();
4846 double closure_app_time_sec = 0.0;
4847
4848 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4849
4850 process_strong_roots(false, // no scoping; this is parallel code
4851 so,
4852 &buf_scan_non_heap_roots,
4853 scan_klasses
4854 );
4855
4856 // Now the CM ref_processor roots.
4857 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4858 // We need to treat the discovered reference lists of the
4859 // concurrent mark ref processor as roots and keep entries
4860 // (which are added by the marking threads) on them live
4861 // until they can be processed at the end of marking.
4862 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4863 }
4864
4865 // Finish up any enqueued closure apps (attributed as object copy time).
4866 buf_scan_non_heap_roots.done();
4867
4868 double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
4869
4870 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4871
4872 double ext_root_time_ms =
4873 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4874
4875 g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4876
4877 // During conc marking we have to filter the per-thread SATB buffers
4878 // to make sure we remove any oops into the CSet (which will show up
4879 // as implicitly live).
4880 double satb_filtering_ms = 0.0;
4881 if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4882 if (mark_in_progress()) {
4883 double satb_filter_start = os::elapsedTime();
4884
4885 JavaThread::satb_mark_queue_set().filter_thread_buffers();
4886
4887 satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
4888 }
4889 }
4890 g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4891
4892 // If this is an initial mark pause, and we're not scanning
4893 // the entire code cache, we need to mark the oops in the
4894 // strong code root lists for the regions that are not in
4895 // the collection set.
4896 // Note all threads participate in this set of root tasks.
4897 double mark_strong_code_roots_ms = 0.0;
4898 if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) {
4899 double mark_strong_roots_start = os::elapsedTime();
4900 mark_strong_code_roots(worker_i);
4901 mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
4902 }
4903 g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
4904
4905 // Now scan the complement of the collection set.
4906 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
4907 g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
4908
4909 _process_strong_tasks->all_tasks_completed();
4910 }
4911
4912 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4913 private:
4914 BoolObjectClosure* _is_alive;
4915 int _initial_string_table_size;
4916 int _initial_symbol_table_size;
4917
4918 bool _process_strings;
4919 int _strings_processed;
4920 int _strings_removed;
4921
4922 bool _process_symbols;
4923 int _symbols_processed;
4924 int _symbols_removed;
4925
4926 bool _do_in_parallel;
4927 public:
4928 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4929 AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
4930 _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
4931 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
4932 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4933
4934 _initial_string_table_size = StringTable::the_table()->table_size();
4935 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4936 if (process_strings) {
4937 StringTable::clear_parallel_claimed_index();
4938 }
4939 if (process_symbols) {
4940 SymbolTable::clear_parallel_claimed_index();
4941 }
4942 }
4943
4944 ~G1StringSymbolTableUnlinkTask() {
4945 guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4946 err_msg("claim value %d after unlink less than initial string table size %d",
4947 StringTable::parallel_claimed_index(), _initial_string_table_size));
4948 guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4949 err_msg("claim value %d after unlink less than initial symbol table size %d",
4950 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
4951 }
4952
4953 void work(uint worker_id) {
4954 if (_do_in_parallel) {
4955 int strings_processed = 0;
4956 int strings_removed = 0;
4957 int symbols_processed = 0;
4958 int symbols_removed = 0;
4959 if (_process_strings) {
4960 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4961 Atomic::add(strings_processed, &_strings_processed);
4962 Atomic::add(strings_removed, &_strings_removed);
4963 }
4964 if (_process_symbols) {
4965 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4966 Atomic::add(symbols_processed, &_symbols_processed);
4967 Atomic::add(symbols_removed, &_symbols_removed);
4968 }
4969 } else {
4970 if (_process_strings) {
4971 StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
4972 }
4973 if (_process_symbols) {
4974 SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
4975 }
4976 }
4977 }
4978
4979 size_t strings_processed() const { return (size_t)_strings_processed; }
4980 size_t strings_removed() const { return (size_t)_strings_removed; }
4981
4982 size_t symbols_processed() const { return (size_t)_symbols_processed; }
4983 size_t symbols_removed() const { return (size_t)_symbols_removed; }
4984 };
4985
4986 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
4987 bool process_strings, bool process_symbols) {
4988 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
4989 _g1h->workers()->active_workers() : 1);
4990
4991 G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
4992 if (G1CollectedHeap::use_parallel_gc_threads()) {
4993 set_par_threads(n_workers);
4994 workers()->run_task(&g1_unlink_task);
4995 set_par_threads(0);
4996 } else {
4997 g1_unlink_task.work(0);
4998 }
4999 if (G1TraceStringSymbolTableScrubbing) {
5000 gclog_or_tty->print_cr("Cleaned string and symbol table, "
5001 "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
5002 "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
5003 g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
5004 g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
5005 }
5006
5007 if (G1StringDedup::is_enabled()) {
5008 G1StringDedup::unlink(is_alive);
5009 }
5010 }
5011
5012 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5013 private:
5014 DirtyCardQueueSet* _queue;
5015 public:
5016 G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5017
5018 virtual void work(uint worker_id) {
5019 double start_time = os::elapsedTime();
5020
5021 RedirtyLoggedCardTableEntryClosure cl;
5022 if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
5023 _queue->par_apply_closure_to_all_completed_buffers(&cl);
5024 } else {
5577 workers()->set_active_workers(n_workers);
5578 set_par_threads(n_workers);
5579 } else {
5580 assert(n_par_threads() == 0,
5581 "Should be the original non-parallel value");
5582 n_workers = 1;
5583 }
5584
5585 G1ParTask g1_par_task(this, _task_queues);
5586
5587 init_for_evac_failure(NULL);
5588
5589 rem_set()->prepare_for_younger_refs_iterate(true);
5590
5591 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5592 double start_par_time_sec = os::elapsedTime();
5593 double end_par_time_sec;
5594
5595 {
5596 StrongRootsScope srs(this);
5597
5598 if (G1CollectedHeap::use_parallel_gc_threads()) {
5599 // The individual threads will set their evac-failure closures.
5600 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5601 // These tasks use ShareHeap::_process_strong_tasks
5602 assert(UseDynamicNumberOfGCThreads ||
5603 workers()->active_workers() == workers()->total_workers(),
5604 "If not dynamic should be using all the workers");
5605 workers()->run_task(&g1_par_task);
5606 } else {
5607 g1_par_task.set_for_termination(n_workers);
5608 g1_par_task.work(0);
5609 }
5610 end_par_time_sec = os::elapsedTime();
5611
5612 // Closing the inner scope will execute the destructor
5613 // for the StrongRootsScope object. We record the current
5614 // elapsed time before closing the scope so that time
5615 // taken for the SRS destructor is NOT included in the
5616 // reported parallel time.
6610 err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
6611 HR_FORMAT_PARAMS(hr)));
6612 hr->migrate_strong_code_roots();
6613 return false;
6614 }
6615 };
6616
6617 void G1CollectedHeap::migrate_strong_code_roots() {
6618 MigrateCodeRootsHeapRegionClosure cl;
6619 double migrate_start = os::elapsedTime();
6620 collection_set_iterate(&cl);
6621 double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
6622 g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
6623 }
6624
6625 void G1CollectedHeap::purge_code_root_memory() {
6626 double purge_start = os::elapsedTime();
6627 G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
6628 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
6629 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
6630 }
6631
6632 // Mark all the code roots that point into regions *not* in the
6633 // collection set.
6634 //
6635 // Note we do not want to use a "marking" CodeBlobToOopClosure while
6636 // walking the the code roots lists of regions not in the collection
6637 // set. Suppose we have an nmethod (M) that points to objects in two
6638 // separate regions - one in the collection set (R1) and one not (R2).
6639 // Using a "marking" CodeBlobToOopClosure here would result in "marking"
6640 // nmethod M when walking the code roots for R1. When we come to scan
6641 // the code roots for R2, we would see that M is already marked and it
6642 // would be skipped and the objects in R2 that are referenced from M
6643 // would not be evacuated.
6644
6645 class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
6646
6647 class MarkStrongCodeRootOopClosure: public OopClosure {
6648 ConcurrentMark* _cm;
6649 HeapRegion* _hr;
6650 uint _worker_id;
6651
6652 template <class T> void do_oop_work(T* p) {
6653 T heap_oop = oopDesc::load_heap_oop(p);
6654 if (!oopDesc::is_null(heap_oop)) {
6655 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6656 // Only mark objects in the region (which is assumed
6657 // to be not in the collection set).
6658 if (_hr->is_in(obj)) {
6659 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
6660 }
6661 }
6662 }
6663
6664 public:
6665 MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
6666 _cm(cm), _hr(hr), _worker_id(worker_id) {
6667 assert(!_hr->in_collection_set(), "sanity");
6668 }
6669
6670 void do_oop(narrowOop* p) { do_oop_work(p); }
6671 void do_oop(oop* p) { do_oop_work(p); }
6672 };
6673
6674 MarkStrongCodeRootOopClosure _oop_cl;
6675
6676 public:
6677 MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
6678 _oop_cl(cm, hr, worker_id) {}
6679
6680 void do_code_blob(CodeBlob* cb) {
6681 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
6682 if (nm != NULL) {
6683 nm->oops_do(&_oop_cl);
6684 }
6685 }
6686 };
6687
6688 class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
6689 G1CollectedHeap* _g1h;
6690 uint _worker_id;
6691
6692 public:
6693 MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
6694 _g1h(g1h), _worker_id(worker_id) {}
6695
6696 bool doHeapRegion(HeapRegion *hr) {
6697 HeapRegionRemSet* hrrs = hr->rem_set();
6698 if (hr->continuesHumongous()) {
6699 // Code roots should never be attached to a continuation of a humongous region
6700 assert(hrrs->strong_code_roots_list_length() == 0,
6701 err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
6702 " starting at "HR_FORMAT", but has "SIZE_FORMAT,
6703 HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
6704 hrrs->strong_code_roots_list_length()));
6705 return false;
6706 }
6707
6708 if (hr->in_collection_set()) {
6709 // Don't mark code roots into regions in the collection set here.
6710 // They will be marked when we scan them.
6711 return false;
6712 }
6713
6714 MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
6715 hr->strong_code_roots_do(&cb_cl);
6716 return false;
6717 }
6718 };
6719
6720 void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
6721 MarkStrongCodeRootsHRClosure cl(this, worker_id);
6722 if (G1CollectedHeap::use_parallel_gc_threads()) {
6723 heap_region_par_iterate_chunked(&cl,
6724 worker_id,
6725 workers()->active_workers(),
6726 HeapRegion::ParMarkRootClaimValue);
6727 } else {
6728 heap_region_iterate(&cl);
6729 }
6730 }
6731
6732 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
6733 G1CollectedHeap* _g1h;
6734
6735 public:
6736 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
6737 _g1h(g1h) {}
6738
6739 void do_code_blob(CodeBlob* cb) {
6740 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
6741 if (nm == NULL) {
6742 return;
6743 }
6744
6745 if (ScavengeRootsInCode) {
6746 _g1h->register_nmethod(nm);
6747 }
6748 }
6749 };
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #if !defined(__clang_major__) && defined(__GNUC__)
26 // FIXME, formats have issues. Disable this macro definition, compile, and study warnings for more information.
27 #define ATTRIBUTE_PRINTF(x,y)
28 #endif
29
30 #include "precompiled.hpp"
31 #include "classfile/metadataOnStackMark.hpp"
32 #include "classfile/stringTable.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/icBuffer.hpp"
35 #include "gc_implementation/g1/bufferingOopClosure.hpp"
36 #include "gc_implementation/g1/concurrentG1Refine.hpp"
37 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
38 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
39 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
40 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
41 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
42 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
43 #include "gc_implementation/g1/g1EvacFailure.hpp"
44 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
45 #include "gc_implementation/g1/g1Log.hpp"
46 #include "gc_implementation/g1/g1MarkSweep.hpp"
47 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
48 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
49 #include "gc_implementation/g1/g1RemSet.inline.hpp"
50 #include "gc_implementation/g1/g1StringDedup.hpp"
51 #include "gc_implementation/g1/g1YCTypes.hpp"
75 // to-be-collected) are printed at "strategic" points before / during
76 // / after the collection --- this is useful for debugging
77 #define YOUNG_LIST_VERBOSE 0
78 // CURRENT STATUS
79 // This file is under construction. Search for "FIXME".
80
81 // INVARIANTS/NOTES
82 //
83 // All allocation activity covered by the G1CollectedHeap interface is
84 // serialized by acquiring the HeapLock. This happens in mem_allocate
85 // and allocate_new_tlab, which are the "entry" points to the
86 // allocation code from the rest of the JVM. (Note that this does not
87 // apply to TLAB allocation, which is not part of this interface: it
88 // is done by clients of this interface.)
89
90 // Notes on implementation of parallelism in different tasks.
91 //
92 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
93 // The number of GC workers is passed to heap_region_par_iterate_chunked().
94 // It does use run_task() which sets _n_workers in the task.
95 // G1ParTask executes g1_process_roots() ->
96 // SharedHeap::process_roots() which calls eventually to
97 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
98 // SequentialSubTasksDone. SharedHeap::process_roots() also
99 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
100 //
101
102 // Local to this file.
103
104 class RefineCardTableEntryClosure: public CardTableEntryClosure {
105 bool _concurrent;
106 public:
107 RefineCardTableEntryClosure() : _concurrent(true) { }
108
109 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
110 bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
111 // This path is executed by the concurrent refine or mutator threads,
112 // concurrently, and so we do not care if card_ptr contains references
113 // that point into the collection set.
114 assert(!oops_into_cset, "should be");
115
116 if (_concurrent && SuspendibleThreadSet::should_yield()) {
117 // Caller will actually yield.
118 return false;
3363 void work(uint worker_id) {
3364 HandleMark hm;
3365 VerifyRegionClosure blk(true, _vo);
3366 _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3367 _g1h->workers()->active_workers(),
3368 HeapRegion::ParVerifyClaimValue);
3369 if (blk.failures()) {
3370 _failures = true;
3371 }
3372 }
3373 };
3374
3375 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3376 if (SafepointSynchronize::is_at_safepoint()) {
3377 assert(Thread::current()->is_VM_thread(),
3378 "Expected to be executed serially by the VM thread at this point");
3379
3380 if (!silent) { gclog_or_tty->print("Roots "); }
3381 VerifyRootsClosure rootsCl(vo);
3382 VerifyKlassClosure klassCl(this, &rootsCl);
3383 CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3384
3385 // We apply the relevant closures to all the oops in the
3386 // system dictionary, class loader data graph, the string table
3387 // and the nmethods in the code cache.
3388 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3389 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3390
3391 process_all_roots(true, // activate StrongRootsScope
3392 SO_AllCodeCache, // roots scanning options
3393 &rootsCl,
3394 &cldCl,
3395 &blobsCl);
3396
3397 bool failures = rootsCl.failures() || codeRootsCl.failures();
3398
3399 if (vo != VerifyOption_G1UseMarkWord) {
3400 // If we're verifying during a full GC then the region sets
3401 // will have been torn down at the start of the GC. Therefore
3402 // verifying the region sets will fail. So we only verify
3403 // the region sets when not in a full GC.
3404 if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3405 verify_region_sets();
3406 }
3407
3408 if (!silent) { gclog_or_tty->print("HeapRegions "); }
3409 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3410 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3411 "sanity check");
3412
3413 G1ParVerifyTask task(this, vo);
3414 assert(UseDynamicNumberOfGCThreads ||
3415 workers()->active_workers() == workers()->total_workers(),
3957 if (!G1StressConcRegionFreeing) {
3958 append_secondary_free_list_if_not_empty_with_lock();
3959 }
3960
3961 assert(check_young_list_well_formed(), "young list should be well formed");
3962 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3963 "sanity check");
3964
3965 // Don't dynamically change the number of GC threads this early. A value of
3966 // 0 is used to indicate serial work. When parallel work is done,
3967 // it will be set.
3968
3969 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3970 IsGCActiveMark x;
3971
3972 gc_prologue(false);
3973 increment_total_collections(false /* full gc */);
3974 increment_gc_time_stamp();
3975
3976 verify_before_gc();
3977
3978 check_bitmaps("GC Start");
3979
3980 COMPILER2_PRESENT(DerivedPointerTable::clear());
3981
3982 // Please see comment in g1CollectedHeap.hpp and
3983 // G1CollectedHeap::ref_processing_init() to see how
3984 // reference processing currently works in G1.
3985
3986 // Enable discovery in the STW reference processor
3987 ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
3988 true /*verify_no_refs*/);
3989
3990 {
3991 // We want to temporarily turn off discovery by the
3992 // CM ref processor, if necessary, and turn it back on
3993 // on again later if we do. Using a scoped
3994 // NoRefDiscovery object will do this.
3995 NoRefDiscovery no_cm_discovery(ref_processor_cm());
3996
3997 // Forget the current alloc region (we might even choose it to be part
4308 break;
4309 }
4310
4311 // Prevent humongous PLAB sizes for two reasons:
4312 // * PLABs are allocated using a similar paths as oops, but should
4313 // never be in a humongous region
4314 // * Allowing humongous PLABs needlessly churns the region free lists
4315 return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
4316 }
4317
4318 void G1CollectedHeap::init_mutator_alloc_region() {
4319 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
4320 _mutator_alloc_region.init();
4321 }
4322
4323 void G1CollectedHeap::release_mutator_alloc_region() {
4324 _mutator_alloc_region.release();
4325 assert(_mutator_alloc_region.get() == NULL, "post-condition");
4326 }
4327
4328 void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) {
4329 HeapRegion* retained_region = _retained_old_gc_alloc_region;
4330 _retained_old_gc_alloc_region = NULL;
4331
4332 // We will discard the current GC alloc region if:
4333 // a) it's in the collection set (it can happen!),
4334 // b) it's already full (no point in using it),
4335 // c) it's empty (this means that it was emptied during
4336 // a cleanup and it should be on the free list now), or
4337 // d) it's humongous (this means that it was emptied
4338 // during a cleanup and was added to the free list, but
4339 // has been subsequently used to allocate a humongous
4340 // object that may be less than the region size).
4341 if (retained_region != NULL &&
4342 !retained_region->in_collection_set() &&
4343 !(retained_region->top() == retained_region->end()) &&
4344 !retained_region->is_empty() &&
4345 !retained_region->isHumongous()) {
4346 retained_region->record_top_and_timestamp();
4347 // The retained region was added to the old region set when it was
4348 // retired. We have to remove it now, since we don't allow regions
4349 // we allocate to in the region sets. We'll re-add it later, when
4350 // it's retired again.
4351 _old_set.remove(retained_region);
4352 bool during_im = g1_policy()->during_initial_mark_pause();
4353 retained_region->note_start_of_copying(during_im);
4354 _old_gc_alloc_region.set(retained_region);
4355 _hr_printer.reuse(retained_region);
4356 evacuation_info.set_alloc_regions_used_before(retained_region->used());
4357 }
4358 }
4359
4360 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
4361 assert_at_safepoint(true /* should_be_vm_thread */);
4362
4363 _survivor_gc_alloc_region.init();
4364 _old_gc_alloc_region.init();
4365
4366 use_retained_old_gc_alloc_region(evacuation_info);
4367 }
4368
4369 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
4370 evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
4371 _old_gc_alloc_region.count());
4372 _survivor_gc_alloc_region.release();
4373 // If we have an old GC alloc region to release, we'll save it in
4374 // _retained_old_gc_alloc_region. If we don't
4375 // _retained_old_gc_alloc_region will become NULL. This is what we
4376 // want either way so no reason to check explicitly for either
4377 // condition.
4378 _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
4379
4380 if (ResizePLAB) {
4381 _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4382 _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4383 }
4384 }
4385
4386 void G1CollectedHeap::abandon_gc_alloc_regions() {
4387 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
4388 assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
4571 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4572 assert(from_obj != to_obj, "should not be self-forwarded");
4573
4574 assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
4575 assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
4576
4577 // The object might be in the process of being copied by another
4578 // worker so we cannot trust that its to-space image is
4579 // well-formed. So we have to read its size from its from-space
4580 // image which we know should not be changing.
4581 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4582 }
4583
4584 template <class T>
4585 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4586 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4587 _scanned_klass->record_modified_oops();
4588 }
4589 }
4590
4591 template <G1Barrier barrier, G1Mark do_mark_object>
4592 template <class T>
4593 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4594 T heap_oop = oopDesc::load_heap_oop(p);
4595
4596 if (oopDesc::is_null(heap_oop)) {
4597 return;
4598 }
4599
4600 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4601
4602 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4603
4604 if (_g1->in_cset_fast_test(obj)) {
4605 oop forwardee;
4606 if (obj->is_forwarded()) {
4607 forwardee = obj->forwardee();
4608 } else {
4609 forwardee = _par_scan_state->copy_to_survivor_space(obj);
4610 }
4611 assert(forwardee != NULL, "forwardee should not be NULL");
4612 oopDesc::encode_store_heap_oop(p, forwardee);
4613 if (do_mark_object != G1MarkNone && forwardee != obj) {
4614 // If the object is self-forwarded we don't need to explicitly
4615 // mark it, the evacuation failure protocol will do so.
4616 mark_forwarded_object(obj, forwardee);
4617 }
4618
4619 if (barrier == G1BarrierKlass) {
4620 do_klass_barrier(p, forwardee);
4621 }
4622 } else {
4623 // The object is not in collection set. If we're a root scanning
4624 // closure during an initial mark pause then attempt to mark the object.
4625 if (do_mark_object == G1MarkFromRoot) {
4626 mark_object(obj);
4627 }
4628 }
4629
4630 if (barrier == G1BarrierEvac) {
4631 _par_scan_state->update_rs(_from, p, _worker_id);
4632 }
4633 }
4634
4635 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4636 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4637
4638 class G1ParEvacuateFollowersClosure : public VoidClosure {
4639 protected:
4640 G1CollectedHeap* _g1h;
4641 G1ParScanThreadState* _par_scan_state;
4642 RefToScanQueueSet* _queues;
4643 ParallelTaskTerminator* _terminator;
4644
4645 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
4646 RefToScanQueueSet* queues() { return _queues; }
4647 ParallelTaskTerminator* terminator() { return _terminator; }
4648
4649 public:
4650 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4651 G1ParScanThreadState* par_scan_state,
4652 RefToScanQueueSet* queues,
4653 ParallelTaskTerminator* terminator)
4654 : _g1h(g1h), _par_scan_state(par_scan_state),
4655 _queues(queues), _terminator(terminator) {}
4656
4729 RefToScanQueueSet* queues() { return _queues; }
4730
4731 RefToScanQueue *work_queue(int i) {
4732 return queues()->queue(i);
4733 }
4734
4735 ParallelTaskTerminator* terminator() { return &_terminator; }
4736
4737 virtual void set_for_termination(int active_workers) {
4738 // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4739 // in the young space (_par_seq_tasks) in the G1 heap
4740 // for SequentialSubTasksDone.
4741 // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
4742 // both of which need setting by set_n_termination().
4743 _g1h->SharedHeap::set_n_termination(active_workers);
4744 _g1h->set_n_termination(active_workers);
4745 terminator()->reset_for_reuse(active_workers);
4746 _n_workers = active_workers;
4747 }
4748
4749 // Helps out with CLD processing.
4750 //
4751 // During InitialMark we need to:
4752 // 1) Scavenge all CLDs for the young GC.
4753 // 2) Mark all objects directly reachable from strong CLDs.
4754 template <G1Mark do_mark_object>
4755 class G1CLDClosure : public CLDClosure {
4756 G1ParCopyClosure<G1BarrierNone, do_mark_object>* _oop_closure;
4757 G1ParCopyClosure<G1BarrierKlass, do_mark_object> _oop_in_klass_closure;
4758 G1KlassScanClosure _klass_in_cld_closure;
4759 bool _claim;
4760
4761 public:
4762 G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4763 bool only_young, bool claim)
4764 : _oop_closure(oop_closure),
4765 _oop_in_klass_closure(oop_closure->g1(),
4766 oop_closure->pss(),
4767 oop_closure->rp()),
4768 _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
4769 _claim(claim) {
4770
4771 }
4772
4773 void do_cld(ClassLoaderData* cld) {
4774 cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4775 }
4776 };
4777
4778 class G1CodeBlobClosure: public CodeBlobClosure {
4779 OopClosure* _f;
4780
4781 public:
4782 G1CodeBlobClosure(OopClosure* f) : _f(f) {}
4783 void do_code_blob(CodeBlob* blob) {
4784 nmethod* that = blob->as_nmethod_or_null();
4785 if (that != NULL) {
4786 if (!that->test_set_oops_do_mark()) {
4787 that->oops_do(_f);
4788 that->fix_oop_relocations();
4789 }
4790 }
4791 }
4792 };
4793
4794 void work(uint worker_id) {
4795 if (worker_id >= _n_workers) return; // no work needed this round
4796
4797 double start_time_ms = os::elapsedTime() * 1000.0;
4798 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4799
4800 {
4801 ResourceMark rm;
4802 HandleMark hm;
4803
4804 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4805
4806 G1ParScanThreadState pss(_g1h, worker_id, rp);
4807 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4808
4809 pss.set_evac_failure_closure(&evac_failure_cl);
4810
4811 bool only_young = _g1h->g1_policy()->gcs_are_young();
4812
4813 // Non-IM young GC.
4814 G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp);
4815 G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
4816 only_young, // Only process dirty klasses.
4817 false); // No need to claim CLDs.
4818 // IM young GC.
4819 // Strong roots closures.
4820 G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp);
4821 G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
4822 false, // Process all klasses.
4823 true); // Need to claim CLDs.
4824 // Weak roots closures.
4825 G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4826 G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4827 false, // Process all klasses.
4828 true); // Need to claim CLDs.
4829
4830 G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
4831 G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
4832 // IM Weak code roots are handled later.
4833
4834 OopClosure* strong_root_cl;
4835 OopClosure* weak_root_cl;
4836 CLDClosure* strong_cld_cl;
4837 CLDClosure* weak_cld_cl;
4838 CodeBlobClosure* strong_code_cl;
4839
4840 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4841 // We also need to mark copied objects.
4842 strong_root_cl = &scan_mark_root_cl;
4843 weak_root_cl = &scan_mark_weak_root_cl;
4844 strong_cld_cl = &scan_mark_cld_cl;
4845 weak_cld_cl = &scan_mark_weak_cld_cl;
4846 strong_code_cl = &scan_mark_code_cl;
4847 } else {
4848 strong_root_cl = &scan_only_root_cl;
4849 weak_root_cl = &scan_only_root_cl;
4850 strong_cld_cl = &scan_only_cld_cl;
4851 weak_cld_cl = &scan_only_cld_cl;
4852 strong_code_cl = &scan_only_code_cl;
4853 }
4854
4855
4856 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4857
4858 pss.start_strong_roots();
4859 _g1h->g1_process_roots(strong_root_cl,
4860 weak_root_cl,
4861 &push_heap_rs_cl,
4862 strong_cld_cl,
4863 weak_cld_cl,
4864 strong_code_cl,
4865 worker_id);
4866
4867 pss.end_strong_roots();
4868
4869 {
4870 double start = os::elapsedTime();
4871 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4872 evac.do_void();
4873 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4874 double term_ms = pss.term_time()*1000.0;
4875 _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4876 _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4877 }
4878 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4879 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4880
4881 if (ParallelGCVerbose) {
4882 MutexLocker x(stats_lock());
4883 pss.print_termination_stats(worker_id);
4884 }
4885
4886 assert(pss.queue_is_empty(), "should be empty");
4887
4888 // Close the inner scope so that the ResourceMark and HandleMark
4889 // destructors are executed here and are included as part of the
4890 // "GC Worker Time".
4891 }
4892
4893 double end_time_ms = os::elapsedTime() * 1000.0;
4894 _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4895 }
4896 };
4897
4898 // *** Common G1 Evacuation Stuff
4899
4900 // This method is run in a GC worker.
4901
4902 void
4903 G1CollectedHeap::
4904 g1_process_roots(OopClosure* scan_non_heap_roots,
4905 OopClosure* scan_non_heap_weak_roots,
4906 OopsInHeapRegionClosure* scan_rs,
4907 CLDClosure* scan_strong_clds,
4908 CLDClosure* scan_weak_clds,
4909 CodeBlobClosure* scan_strong_code,
4910 uint worker_i) {
4911
4912 // First scan the shared roots.
4913 double ext_roots_start = os::elapsedTime();
4914 double closure_app_time_sec = 0.0;
4915
4916 bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
4917
4918 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4919 BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
4920
4921 process_roots(false, // no scoping; this is parallel code
4922 SharedHeap::SO_None,
4923 &buf_scan_non_heap_roots,
4924 &buf_scan_non_heap_weak_roots,
4925 scan_strong_clds,
4926 // Initial Mark handles the weak CLDs separately.
4927 (during_im ? NULL : scan_weak_clds),
4928 scan_strong_code);
4929
4930 // Now the CM ref_processor roots.
4931 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4932 // We need to treat the discovered reference lists of the
4933 // concurrent mark ref processor as roots and keep entries
4934 // (which are added by the marking threads) on them live
4935 // until they can be processed at the end of marking.
4936 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4937 }
4938
4939 if (during_im) {
4940 // Barrier to make sure all workers passed
4941 // the strong CLD and strong nmethods phases.
4942 active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
4943
4944 // Now take the complement of the strong CLDs.
4945 ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4946 }
4947
4948 // Finish up any enqueued closure apps (attributed as object copy time).
4949 buf_scan_non_heap_roots.done();
4950 buf_scan_non_heap_weak_roots.done();
4951
4952 double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4953 + buf_scan_non_heap_weak_roots.closure_app_seconds();
4954
4955 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4956
4957 double ext_root_time_ms =
4958 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4959
4960 g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4961
4962 // During conc marking we have to filter the per-thread SATB buffers
4963 // to make sure we remove any oops into the CSet (which will show up
4964 // as implicitly live).
4965 double satb_filtering_ms = 0.0;
4966 if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4967 if (mark_in_progress()) {
4968 double satb_filter_start = os::elapsedTime();
4969
4970 JavaThread::satb_mark_queue_set().filter_thread_buffers();
4971
4972 satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
4973 }
4974 }
4975 g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4976
4977 // Now scan the complement of the collection set.
4978 MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations);
4979
4980 g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4981
4982 _process_strong_tasks->all_tasks_completed();
4983 }
4984
4985 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4986 private:
4987 BoolObjectClosure* _is_alive;
4988 int _initial_string_table_size;
4989 int _initial_symbol_table_size;
4990
4991 bool _process_strings;
4992 int _strings_processed;
4993 int _strings_removed;
4994
4995 bool _process_symbols;
4996 int _symbols_processed;
4997 int _symbols_removed;
4998
4999 bool _do_in_parallel;
5000 public:
5001 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
5002 AbstractGangTask("String/Symbol Unlinking"),
5003 _is_alive(is_alive),
5004 _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
5005 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
5006 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
5007
5008 _initial_string_table_size = StringTable::the_table()->table_size();
5009 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
5010 if (process_strings) {
5011 StringTable::clear_parallel_claimed_index();
5012 }
5013 if (process_symbols) {
5014 SymbolTable::clear_parallel_claimed_index();
5015 }
5016 }
5017
5018 ~G1StringSymbolTableUnlinkTask() {
5019 guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
5020 err_msg("claim value %d after unlink less than initial string table size %d",
5021 StringTable::parallel_claimed_index(), _initial_string_table_size));
5022 guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
5023 err_msg("claim value %d after unlink less than initial symbol table size %d",
5024 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
5025
5026 if (G1TraceStringSymbolTableScrubbing) {
5027 gclog_or_tty->print_cr("Cleaned string and symbol table, "
5028 "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
5029 "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
5030 strings_processed(), strings_removed(),
5031 symbols_processed(), symbols_removed());
5032 }
5033 }
5034
5035 void work(uint worker_id) {
5036 if (_do_in_parallel) {
5037 int strings_processed = 0;
5038 int strings_removed = 0;
5039 int symbols_processed = 0;
5040 int symbols_removed = 0;
5041 if (_process_strings) {
5042 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
5043 Atomic::add(strings_processed, &_strings_processed);
5044 Atomic::add(strings_removed, &_strings_removed);
5045 }
5046 if (_process_symbols) {
5047 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
5048 Atomic::add(symbols_processed, &_symbols_processed);
5049 Atomic::add(symbols_removed, &_symbols_removed);
5050 }
5051 } else {
5052 if (_process_strings) {
5053 StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
5054 }
5055 if (_process_symbols) {
5056 SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
5057 }
5058 }
5059 }
5060
5061 size_t strings_processed() const { return (size_t)_strings_processed; }
5062 size_t strings_removed() const { return (size_t)_strings_removed; }
5063
5064 size_t symbols_processed() const { return (size_t)_symbols_processed; }
5065 size_t symbols_removed() const { return (size_t)_symbols_removed; }
5066 };
5067
5068 class G1CodeCacheUnloadingTask {
5069 private:
5070 static Monitor* _lock;
5071
5072 BoolObjectClosure* const _is_alive;
5073 const bool _unloading_occurred;
5074 const uint _num_workers;
5075
5076 // Variables used to claim nmethods.
5077 nmethod* _first_nmethod;
5078 volatile nmethod* _claimed_nmethod;
5079
5080 // The list of nmethods that need to be processed by the second pass.
5081 volatile nmethod* _postponed_list;
5082 volatile uint _num_entered_barrier;
5083
5084 public:
5085 G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
5086 _is_alive(is_alive),
5087 _unloading_occurred(unloading_occurred),
5088 _num_workers(num_workers),
5089 _first_nmethod(NULL),
5090 _claimed_nmethod(NULL),
5091 _postponed_list(NULL),
5092 _num_entered_barrier(0)
5093 {
5094 nmethod::increase_unloading_clock();
5095 _first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
5096 _claimed_nmethod = (volatile nmethod*)_first_nmethod;
5097 }
5098
5099 ~G1CodeCacheUnloadingTask() {
5100 CodeCache::verify_clean_inline_caches();
5101
5102 CodeCache::set_needs_cache_clean(false);
5103 guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
5104
5105 CodeCache::verify_icholder_relocations();
5106 }
5107
5108 private:
5109 void add_to_postponed_list(nmethod* nm) {
5110 nmethod* old;
5111 do {
5112 old = (nmethod*)_postponed_list;
5113 nm->set_unloading_next(old);
5114 } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
5115 }
5116
5117 void clean_nmethod(nmethod* nm) {
5118 bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
5119
5120 if (postponed) {
5121 // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
5122 add_to_postponed_list(nm);
5123 }
5124
5125 // Mark that this thread has been cleaned/unloaded.
5126 // After this call, it will be safe to ask if this nmethod was unloaded or not.
5127 nm->set_unloading_clock(nmethod::global_unloading_clock());
5128 }
5129
5130 void clean_nmethod_postponed(nmethod* nm) {
5131 nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
5132 }
5133
5134 static const int MaxClaimNmethods = 16;
5135
5136 void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
5137 nmethod* first;
5138 nmethod* last;
5139
5140 do {
5141 *num_claimed_nmethods = 0;
5142
5143 first = last = (nmethod*)_claimed_nmethod;
5144
5145 if (first != NULL) {
5146 for (int i = 0; i < MaxClaimNmethods; i++) {
5147 last = CodeCache::alive_nmethod(CodeCache::next(last));
5148
5149 if (last == NULL) {
5150 break;
5151 }
5152
5153 claimed_nmethods[i] = last;
5154 (*num_claimed_nmethods)++;
5155 }
5156 }
5157
5158 } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
5159 }
5160
5161 nmethod* claim_postponed_nmethod() {
5162 nmethod* claim;
5163 nmethod* next;
5164
5165 do {
5166 claim = (nmethod*)_postponed_list;
5167 if (claim == NULL) {
5168 return NULL;
5169 }
5170
5171 next = claim->unloading_next();
5172
5173 } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
5174
5175 return claim;
5176 }
5177
5178 public:
5179 // Mark that we're done with the first pass of nmethod cleaning.
5180 void barrier_mark(uint worker_id) {
5181 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
5182 _num_entered_barrier++;
5183 if (_num_entered_barrier == _num_workers) {
5184 ml.notify_all();
5185 }
5186 }
5187
5188 // See if we have to wait for the other workers to
5189 // finish their first-pass nmethod cleaning work.
5190 void barrier_wait(uint worker_id) {
5191 if (_num_entered_barrier < _num_workers) {
5192 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
5193 while (_num_entered_barrier < _num_workers) {
5194 ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
5195 }
5196 }
5197 }
5198
5199 // Cleaning and unloading of nmethods. Some work has to be postponed
5200 // to the second pass, when we know which nmethods survive.
5201 void work_first_pass(uint worker_id) {
5202 // The first nmethods is claimed by the first worker.
5203 if (worker_id == 0 && _first_nmethod != NULL) {
5204 clean_nmethod(_first_nmethod);
5205 _first_nmethod = NULL;
5206 }
5207
5208 int num_claimed_nmethods;
5209 nmethod* claimed_nmethods[MaxClaimNmethods];
5210
5211 while (true) {
5212 claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
5213
5214 if (num_claimed_nmethods == 0) {
5215 break;
5216 }
5217
5218 for (int i = 0; i < num_claimed_nmethods; i++) {
5219 clean_nmethod(claimed_nmethods[i]);
5220 }
5221 }
5222 }
5223
5224 void work_second_pass(uint worker_id) {
5225 nmethod* nm;
5226 // Take care of postponed nmethods.
5227 while ((nm = claim_postponed_nmethod()) != NULL) {
5228 clean_nmethod_postponed(nm);
5229 }
5230 }
5231 };
5232
5233 Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
5234
5235 class G1KlassCleaningTask {
5236 BoolObjectClosure* _is_alive;
5237 volatile jint _clean_klass_tree_claimed;
5238 ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
5239
5240 public:
5241 G1KlassCleaningTask(BoolObjectClosure* is_alive) :
5242 _is_alive(is_alive),
5243 _clean_klass_tree_claimed(0),
5244 _klass_iterator() {
5245 }
5246
5247 private:
5248 bool claim_clean_klass_tree_task() {
5249 if (_clean_klass_tree_claimed) {
5250 return false;
5251 }
5252
5253 return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
5254 }
5255
5256 InstanceKlass* claim_next_klass() {
5257 Klass* klass;
5258 do {
5259 klass =_klass_iterator.next_klass();
5260 } while (klass != NULL && !klass->oop_is_instance());
5261
5262 return (InstanceKlass*)klass;
5263 }
5264
5265 public:
5266
5267 void clean_klass(InstanceKlass* ik) {
5268 ik->clean_implementors_list(_is_alive);
5269 ik->clean_method_data(_is_alive);
5270
5271 // G1 specific cleanup work that has
5272 // been moved here to be done in parallel.
5273 ik->clean_dependent_nmethods();
5274 }
5275
5276 void work() {
5277 ResourceMark rm;
5278
5279 // One worker will clean the subklass/sibling klass tree.
5280 if (claim_clean_klass_tree_task()) {
5281 Klass::clean_subklass_tree(_is_alive);
5282 }
5283
5284 // All workers will help cleaning the classes,
5285 InstanceKlass* klass;
5286 while ((klass = claim_next_klass()) != NULL) {
5287 clean_klass(klass);
5288 }
5289 }
5290 };
5291
5292
5293 class G1ParallelCleaningTask : public AbstractGangTask {
5294 private:
5295 G1StringSymbolTableUnlinkTask _string_symbol_task;
5296 G1CodeCacheUnloadingTask _code_cache_task;
5297 G1KlassCleaningTask _klass_cleaning_task;
5298
5299 public:
5300 G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
5301 AbstractGangTask("Parallel Cleaning"),
5302 _string_symbol_task(is_alive, process_strings, process_symbols),
5303 _code_cache_task(num_workers, is_alive, unloading_occurred),
5304 _klass_cleaning_task(is_alive) {
5305 }
5306
5307 void work(uint worker_id) {
5308 // Do first pass of code cache cleaning.
5309 _code_cache_task.work_first_pass(worker_id);
5310
5311 // Let the threads, mark that the first pass is done.
5312 _code_cache_task.barrier_mark(worker_id);
5313
5314 // Clean the Strings and Symbols.
5315 _string_symbol_task.work(worker_id);
5316
5317 // Wait for all workers to finish the first code cache cleaning pass.
5318 _code_cache_task.barrier_wait(worker_id);
5319
5320 // Do the second code cache cleaning work, which realize on
5321 // the liveness information gathered during the first pass.
5322 _code_cache_task.work_second_pass(worker_id);
5323
5324 // Clean all klasses that were not unloaded.
5325 _klass_cleaning_task.work();
5326 }
5327 };
5328
5329
5330 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
5331 bool process_strings,
5332 bool process_symbols,
5333 bool class_unloading_occurred) {
5334 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5335 workers()->active_workers() : 1);
5336
5337 G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
5338 n_workers, class_unloading_occurred);
5339 if (G1CollectedHeap::use_parallel_gc_threads()) {
5340 set_par_threads(n_workers);
5341 workers()->run_task(&g1_unlink_task);
5342 set_par_threads(0);
5343 } else {
5344 g1_unlink_task.work(0);
5345 }
5346 }
5347
5348 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5349 bool process_strings, bool process_symbols) {
5350 {
5351 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5352 _g1h->workers()->active_workers() : 1);
5353 G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5354 if (G1CollectedHeap::use_parallel_gc_threads()) {
5355 set_par_threads(n_workers);
5356 workers()->run_task(&g1_unlink_task);
5357 set_par_threads(0);
5358 } else {
5359 g1_unlink_task.work(0);
5360 }
5361 }
5362
5363 if (G1StringDedup::is_enabled()) {
5364 G1StringDedup::unlink(is_alive);
5365 }
5366 }
5367
5368 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5369 private:
5370 DirtyCardQueueSet* _queue;
5371 public:
5372 G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5373
5374 virtual void work(uint worker_id) {
5375 double start_time = os::elapsedTime();
5376
5377 RedirtyLoggedCardTableEntryClosure cl;
5378 if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
5379 _queue->par_apply_closure_to_all_completed_buffers(&cl);
5380 } else {
5933 workers()->set_active_workers(n_workers);
5934 set_par_threads(n_workers);
5935 } else {
5936 assert(n_par_threads() == 0,
5937 "Should be the original non-parallel value");
5938 n_workers = 1;
5939 }
5940
5941 G1ParTask g1_par_task(this, _task_queues);
5942
5943 init_for_evac_failure(NULL);
5944
5945 rem_set()->prepare_for_younger_refs_iterate(true);
5946
5947 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5948 double start_par_time_sec = os::elapsedTime();
5949 double end_par_time_sec;
5950
5951 {
5952 StrongRootsScope srs(this);
5953 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5954 if (g1_policy()->during_initial_mark_pause()) {
5955 ClassLoaderDataGraph::clear_claimed_marks();
5956 }
5957
5958 if (G1CollectedHeap::use_parallel_gc_threads()) {
5959 // The individual threads will set their evac-failure closures.
5960 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5961 // These tasks use ShareHeap::_process_strong_tasks
5962 assert(UseDynamicNumberOfGCThreads ||
5963 workers()->active_workers() == workers()->total_workers(),
5964 "If not dynamic should be using all the workers");
5965 workers()->run_task(&g1_par_task);
5966 } else {
5967 g1_par_task.set_for_termination(n_workers);
5968 g1_par_task.work(0);
5969 }
5970 end_par_time_sec = os::elapsedTime();
5971
5972 // Closing the inner scope will execute the destructor
5973 // for the StrongRootsScope object. We record the current
5974 // elapsed time before closing the scope so that time
5975 // taken for the SRS destructor is NOT included in the
5976 // reported parallel time.
6970 err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
6971 HR_FORMAT_PARAMS(hr)));
6972 hr->migrate_strong_code_roots();
6973 return false;
6974 }
6975 };
6976
6977 void G1CollectedHeap::migrate_strong_code_roots() {
6978 MigrateCodeRootsHeapRegionClosure cl;
6979 double migrate_start = os::elapsedTime();
6980 collection_set_iterate(&cl);
6981 double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
6982 g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
6983 }
6984
6985 void G1CollectedHeap::purge_code_root_memory() {
6986 double purge_start = os::elapsedTime();
6987 G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
6988 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
6989 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
6990 }
6991
6992 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
6993 G1CollectedHeap* _g1h;
6994
6995 public:
6996 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
6997 _g1h(g1h) {}
6998
6999 void do_code_blob(CodeBlob* cb) {
7000 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
7001 if (nm == NULL) {
7002 return;
7003 }
7004
7005 if (ScavengeRootsInCode) {
7006 _g1h->register_nmethod(nm);
7007 }
7008 }
7009 };
|