3103 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3104 } _oop_closure;
3105 public:
3106 VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
3107 void do_klass(Klass* k) {
3108 k->oops_do(&_oop_closure);
3109 }
3110 };
3111
3112 void CMSCollector::verify_after_remark_work_2() {
3113 ResourceMark rm;
3114 HandleMark hm;
3115 GenCollectedHeap* gch = GenCollectedHeap::heap();
3116
3117 // Get a clear set of claim bits for the strong roots processing to work with.
3118 ClassLoaderDataGraph::clear_claimed_marks();
3119
3120 // Mark from roots one level into CMS
3121 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3122 markBitMap());
3123 CMKlassClosure klass_closure(¬Older);
3124
3125 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3126 gch->gen_process_strong_roots(_cmsGen->level(),
3127 true, // younger gens are roots
3128 true, // activate StrongRootsScope
3129 SharedHeap::ScanningOption(roots_scanning_options()),
3130 ¬Older,
3131 NULL,
3132 &klass_closure);
3133
3134 // Now mark from the roots
3135 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3136 verification_mark_bm(), markBitMap(), verification_mark_stack());
3137 assert(_restart_addr == NULL, "Expected pre-condition");
3138 verification_mark_bm()->iterate(&markFromRootsClosure);
3139 while (_restart_addr != NULL) {
3140 // Deal with stack overflow: by restarting at the indicated
3141 // address.
3142 HeapWord* ra = _restart_addr;
3143 markFromRootsClosure.reset(ra);
3723 {
3724 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3725 if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3726 // The parallel version.
3727 FlexibleWorkGang* workers = gch->workers();
3728 assert(workers != NULL, "Need parallel worker threads.");
3729 int n_workers = workers->active_workers();
3730 CMSParInitialMarkTask tsk(this, n_workers);
3731 gch->set_par_threads(n_workers);
3732 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3733 if (n_workers > 1) {
3734 GenCollectedHeap::StrongRootsScope srs(gch);
3735 workers->run_task(&tsk);
3736 } else {
3737 GenCollectedHeap::StrongRootsScope srs(gch);
3738 tsk.work(0);
3739 }
3740 gch->set_par_threads(0);
3741 } else {
3742 // The serial version.
3743 CMKlassClosure klass_closure(¬Older);
3744 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3745 gch->gen_process_strong_roots(_cmsGen->level(),
3746 true, // younger gens are roots
3747 true, // activate StrongRootsScope
3748 SharedHeap::ScanningOption(roots_scanning_options()),
3749 ¬Older,
3750 NULL,
3751 &klass_closure);
3752 }
3753 }
3754
3755 // Clear mod-union table; it will be dirtied in the prologue of
3756 // CMS generation per each younger generation collection.
3757
3758 assert(_modUnionTable.isAllClear(),
3759 "Was cleared in most recent final checkpoint phase"
3760 " or no bits are set in the gc_prologue before the start of the next "
3761 "subsequent marking phase.");
3762
3763 assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
4185 Par_MarkFromRootsClosure cl(this, _collector, my_span,
4186 &_collector->_markBitMap,
4187 work_queue(i),
4188 &_collector->_markStack,
4189 _asynch);
4190 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4191 } // else nothing to do for this task
4192 } // else nothing to do for this task
4193 }
4194 // We'd be tempted to assert here that since there are no
4195 // more tasks left to claim in this space, the global_finger
4196 // must exceed space->top() and a fortiori space->end(). However,
4197 // that would not quite be correct because the bumping of
4198 // global_finger occurs strictly after the claiming of a task,
4199 // so by the time we reach here the global finger may not yet
4200 // have been bumped up by the thread that claimed the last
4201 // task.
4202 pst->all_tasks_completed();
4203 }
4204
4205 class Par_ConcMarkingClosure: public CMSOopClosure {
4206 private:
4207 CMSCollector* _collector;
4208 CMSConcMarkingTask* _task;
4209 MemRegion _span;
4210 CMSBitMap* _bit_map;
4211 CMSMarkStack* _overflow_stack;
4212 OopTaskQueue* _work_queue;
4213 protected:
4214 DO_OOP_WORK_DEFN
4215 public:
4216 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4217 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4218 CMSOopClosure(collector->ref_processor()),
4219 _collector(collector),
4220 _task(task),
4221 _span(collector->_span),
4222 _work_queue(work_queue),
4223 _bit_map(bit_map),
4224 _overflow_stack(overflow_stack)
4225 { }
4226 virtual void do_oop(oop* p);
4227 virtual void do_oop(narrowOop* p);
4228
4229 void trim_queue(size_t max);
4230 void handle_stack_overflow(HeapWord* lost);
4231 void do_yield_check() {
4232 if (_task->should_yield()) {
4233 _task->yield();
4234 }
4235 }
4236 };
4237
4238 // Grey object scanning during work stealing phase --
4969 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4970 "Should only be AbortablePreclean.");
4971 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4972 if (should_abort_preclean()) {
4973 break; // out of preclean loop
4974 } else {
4975 // Compute the next address at which preclean should pick up.
4976 lastAddr = next_card_start_after_block(stop_point);
4977 }
4978 }
4979 } else {
4980 break;
4981 }
4982 }
4983 verify_work_stacks_empty();
4984 verify_overflow_empty();
4985 return cumNumDirtyCards;
4986 }
4987
4988 class PrecleanKlassClosure : public KlassClosure {
4989 CMKlassClosure _cm_klass_closure;
4990 public:
4991 PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4992 void do_klass(Klass* k) {
4993 if (k->has_accumulated_modified_oops()) {
4994 k->clear_accumulated_modified_oops();
4995
4996 _cm_klass_closure.do_klass(k);
4997 }
4998 }
4999 };
5000
5001 // The freelist lock is needed to prevent asserts, is it really needed?
5002 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
5003
5004 cl->set_freelistLock(freelistLock);
5005
5006 CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
5007
5008 // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
5009 // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
5207 _collectorState = Sweeping;
5208 // Call isAllClear() under bitMapLock
5209 assert(_modUnionTable.isAllClear(),
5210 "Should be clear by end of the final marking");
5211 assert(_ct->klass_rem_set()->mod_union_is_clear(),
5212 "Should be clear by end of the final marking");
5213 if (UseAdaptiveSizePolicy) {
5214 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5215 }
5216 }
5217
5218 void CMSParInitialMarkTask::work(uint worker_id) {
5219 elapsedTimer _timer;
5220 ResourceMark rm;
5221 HandleMark hm;
5222
5223 // ---------- scan from roots --------------
5224 _timer.start();
5225 GenCollectedHeap* gch = GenCollectedHeap::heap();
5226 Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5227 CMKlassClosure klass_closure(&par_mri_cl);
5228
5229 // ---------- young gen roots --------------
5230 {
5231 work_on_young_gen_roots(worker_id, &par_mri_cl);
5232 _timer.stop();
5233 if (PrintCMSStatistics != 0) {
5234 gclog_or_tty->print_cr(
5235 "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5236 worker_id, _timer.seconds());
5237 }
5238 }
5239
5240 // ---------- remaining roots --------------
5241 _timer.reset();
5242 _timer.start();
5243 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5244 false, // yg was scanned above
5245 false, // this is parallel code
5246 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5247 &par_mri_cl,
5281
5282 OopTaskQueueSet* task_queues() { return _task_queues; }
5283
5284 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5285
5286 ParallelTaskTerminator* terminator() { return &_term; }
5287 int n_workers() { return _n_workers; }
5288
5289 void work(uint worker_id);
5290
5291 private:
5292 // ... of dirty cards in old space
5293 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5294 Par_MarkRefsIntoAndScanClosure* cl);
5295
5296 // ... work stealing for the above
5297 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5298 };
5299
5300 class RemarkKlassClosure : public KlassClosure {
5301 CMKlassClosure _cm_klass_closure;
5302 public:
5303 RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5304 void do_klass(Klass* k) {
5305 // Check if we have modified any oops in the Klass during the concurrent marking.
5306 if (k->has_accumulated_modified_oops()) {
5307 k->clear_accumulated_modified_oops();
5308
5309 // We could have transfered the current modified marks to the accumulated marks,
5310 // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5311 } else if (k->has_modified_oops()) {
5312 // Don't clear anything, this info is needed by the next young collection.
5313 } else {
5314 // No modified oops in the Klass.
5315 return;
5316 }
5317
5318 // The klass has modified fields, need to scan the klass.
5319 _cm_klass_closure.do_klass(k);
5320 }
5321 };
7718 _finger = addr + obj->size();
7719 assert(_finger > addr, "we just incremented it above");
7720 // Note: the finger doesn't advance while we drain
7721 // the stack below.
7722 bool res = _mark_stack->push(obj);
7723 assert(res, "Empty non-zero size stack should have space for single push");
7724 while (!_mark_stack->isEmpty()) {
7725 oop new_oop = _mark_stack->pop();
7726 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7727 // now scan this oop's oops
7728 new_oop->oop_iterate(&_pam_verify_closure);
7729 }
7730 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7731 return true;
7732 }
7733
7734 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7735 CMSCollector* collector, MemRegion span,
7736 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7737 CMSMarkStack* mark_stack):
7738 CMSOopClosure(collector->ref_processor()),
7739 _collector(collector),
7740 _span(span),
7741 _verification_bm(verification_bm),
7742 _cms_bm(cms_bm),
7743 _mark_stack(mark_stack)
7744 { }
7745
7746 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7747 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7748
7749 // Upon stack overflow, we discard (part of) the stack,
7750 // remembering the least address amongst those discarded
7751 // in CMSCollector's _restart_address.
7752 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7753 // Remember the least grey address discarded
7754 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7755 _collector->lower_restart_addr(ra);
7756 _mark_stack->reset(); // discard stack contents
7757 _mark_stack->expand(); // expand the stack if possible
7758 }
7771 }
7772
7773 if (!_mark_stack->push(obj)) { // stack overflow
7774 if (PrintCMSStatistics != 0) {
7775 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7776 SIZE_FORMAT, _mark_stack->capacity());
7777 }
7778 assert(_mark_stack->isFull(), "Else push should have succeeded");
7779 handle_stack_overflow(addr);
7780 }
7781 // anything including and to the right of _finger
7782 // will be scanned as we iterate over the remainder of the
7783 // bit map
7784 }
7785 }
7786
7787 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7788 MemRegion span,
7789 CMSBitMap* bitMap, CMSMarkStack* markStack,
7790 HeapWord* finger, MarkFromRootsClosure* parent) :
7791 CMSOopClosure(collector->ref_processor()),
7792 _collector(collector),
7793 _span(span),
7794 _bitMap(bitMap),
7795 _markStack(markStack),
7796 _finger(finger),
7797 _parent(parent)
7798 { }
7799
7800 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7801 MemRegion span,
7802 CMSBitMap* bit_map,
7803 OopTaskQueue* work_queue,
7804 CMSMarkStack* overflow_stack,
7805 HeapWord* finger,
7806 HeapWord** global_finger_addr,
7807 Par_MarkFromRootsClosure* parent) :
7808 CMSOopClosure(collector->ref_processor()),
7809 _collector(collector),
7810 _whole_span(collector->_span),
7811 _span(span),
7812 _bit_map(bit_map),
7813 _work_queue(work_queue),
7814 _overflow_stack(overflow_stack),
7815 _finger(finger),
7816 _global_finger_addr(global_finger_addr),
7817 _parent(parent)
7818 { }
7819
7820 // Assumes thread-safe access by callers, who are
7821 // responsible for mutual exclusion.
7822 void CMSCollector::lower_restart_addr(HeapWord* low) {
7823 assert(_span.contains(low), "Out of bounds addr");
7824 if (_restart_addr == NULL) {
7825 _restart_addr = low;
7826 } else {
7827 _restart_addr = MIN2(_restart_addr, low);
7828 }
7837 _collector->lower_restart_addr(ra);
7838 _markStack->reset(); // discard stack contents
7839 _markStack->expand(); // expand the stack if possible
7840 }
7841
7842 // Upon stack overflow, we discard (part of) the stack,
7843 // remembering the least address amongst those discarded
7844 // in CMSCollector's _restart_address.
7845 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7846 // We need to do this under a mutex to prevent other
7847 // workers from interfering with the work done below.
7848 MutexLockerEx ml(_overflow_stack->par_lock(),
7849 Mutex::_no_safepoint_check_flag);
7850 // Remember the least grey address discarded
7851 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7852 _collector->lower_restart_addr(ra);
7853 _overflow_stack->reset(); // discard stack contents
7854 _overflow_stack->expand(); // expand the stack if possible
7855 }
7856
7857 void CMKlassClosure::do_klass(Klass* k) {
7858 assert(_oop_closure != NULL, "Not initialized?");
7859 k->oops_do(_oop_closure);
7860 }
7861
7862 void PushOrMarkClosure::do_oop(oop obj) {
7863 // Ignore mark word because we are running concurrent with mutators.
7864 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7865 HeapWord* addr = (HeapWord*)obj;
7866 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7867 // Oop lies in _span and isn't yet grey or black
7868 _bitMap->mark(addr); // now grey
7869 if (addr < _finger) {
7870 // the bit map iteration has already either passed, or
7871 // sampled, this bit in the bit map; we'll need to
7872 // use the marking stack to scan this oop's oops.
7873 bool simulate_overflow = false;
7874 NOT_PRODUCT(
7875 if (CMSMarkStackOverflowALot &&
7876 _collector->simulate_overflow()) {
7877 // simulate a stack overflow
7878 simulate_overflow = true;
7879 }
7880 )
7881 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7939 // it may have been emptied since.
7940 assert(simulate_overflow ||
7941 _work_queue->size() == _work_queue->max_elems(),
7942 "Else push should have succeeded");
7943 handle_stack_overflow(addr);
7944 }
7945 do_yield_check();
7946 }
7947 }
7948
7949 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7950 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7951
7952 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7953 MemRegion span,
7954 ReferenceProcessor* rp,
7955 CMSBitMap* bit_map,
7956 CMSBitMap* mod_union_table,
7957 CMSMarkStack* mark_stack,
7958 bool concurrent_precleaning):
7959 CMSOopClosure(rp),
7960 _collector(collector),
7961 _span(span),
7962 _bit_map(bit_map),
7963 _mod_union_table(mod_union_table),
7964 _mark_stack(mark_stack),
7965 _concurrent_precleaning(concurrent_precleaning)
7966 {
7967 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7968 }
7969
7970 // Grey object rescan during pre-cleaning and second checkpoint phases --
7971 // the non-parallel version (the parallel version appears further below.)
7972 void PushAndMarkClosure::do_oop(oop obj) {
7973 // Ignore mark word verification. If during concurrent precleaning,
7974 // the object monitor may be locked. If during the checkpoint
7975 // phases, the object may already have been reached by a different
7976 // path and may be at the end of the global overflow list (so
7977 // the mark word may be NULL).
7978 assert(obj->is_oop_or_null(true /* ignore mark word */),
7979 "expected an oop or NULL");
8012 _mod_union_table->mark_range(redirty_range);
8013 } else {
8014 _mod_union_table->mark(addr);
8015 }
8016 _collector->_ser_pmc_preclean_ovflw++;
8017 } else {
8018 // During the remark phase, we need to remember this oop
8019 // in the overflow list.
8020 _collector->push_on_overflow_list(obj);
8021 _collector->_ser_pmc_remark_ovflw++;
8022 }
8023 }
8024 }
8025 }
8026
8027 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
8028 MemRegion span,
8029 ReferenceProcessor* rp,
8030 CMSBitMap* bit_map,
8031 OopTaskQueue* work_queue):
8032 CMSOopClosure(rp),
8033 _collector(collector),
8034 _span(span),
8035 _bit_map(bit_map),
8036 _work_queue(work_queue)
8037 {
8038 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
8039 }
8040
8041 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
8042 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
8043
8044 // Grey object rescan during second checkpoint phase --
8045 // the parallel version.
8046 void Par_PushAndMarkClosure::do_oop(oop obj) {
8047 // In the assert below, we ignore the mark word because
8048 // this oop may point to an already visited object that is
8049 // on the overflow stack (in which case the mark word has
8050 // been hijacked for chaining into the overflow stack --
8051 // if this is the last object in the overflow stack then
8052 // its mark word will be NULL). Because this object may
|
3103 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3104 } _oop_closure;
3105 public:
3106 VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
3107 void do_klass(Klass* k) {
3108 k->oops_do(&_oop_closure);
3109 }
3110 };
3111
3112 void CMSCollector::verify_after_remark_work_2() {
3113 ResourceMark rm;
3114 HandleMark hm;
3115 GenCollectedHeap* gch = GenCollectedHeap::heap();
3116
3117 // Get a clear set of claim bits for the strong roots processing to work with.
3118 ClassLoaderDataGraph::clear_claimed_marks();
3119
3120 // Mark from roots one level into CMS
3121 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3122 markBitMap());
3123 KlassToOopClosure klass_closure(¬Older);
3124
3125 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3126 gch->gen_process_strong_roots(_cmsGen->level(),
3127 true, // younger gens are roots
3128 true, // activate StrongRootsScope
3129 SharedHeap::ScanningOption(roots_scanning_options()),
3130 ¬Older,
3131 NULL,
3132 &klass_closure);
3133
3134 // Now mark from the roots
3135 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3136 verification_mark_bm(), markBitMap(), verification_mark_stack());
3137 assert(_restart_addr == NULL, "Expected pre-condition");
3138 verification_mark_bm()->iterate(&markFromRootsClosure);
3139 while (_restart_addr != NULL) {
3140 // Deal with stack overflow: by restarting at the indicated
3141 // address.
3142 HeapWord* ra = _restart_addr;
3143 markFromRootsClosure.reset(ra);
3723 {
3724 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3725 if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3726 // The parallel version.
3727 FlexibleWorkGang* workers = gch->workers();
3728 assert(workers != NULL, "Need parallel worker threads.");
3729 int n_workers = workers->active_workers();
3730 CMSParInitialMarkTask tsk(this, n_workers);
3731 gch->set_par_threads(n_workers);
3732 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3733 if (n_workers > 1) {
3734 GenCollectedHeap::StrongRootsScope srs(gch);
3735 workers->run_task(&tsk);
3736 } else {
3737 GenCollectedHeap::StrongRootsScope srs(gch);
3738 tsk.work(0);
3739 }
3740 gch->set_par_threads(0);
3741 } else {
3742 // The serial version.
3743 KlassToOopClosure klass_closure(¬Older);
3744 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3745 gch->gen_process_strong_roots(_cmsGen->level(),
3746 true, // younger gens are roots
3747 true, // activate StrongRootsScope
3748 SharedHeap::ScanningOption(roots_scanning_options()),
3749 ¬Older,
3750 NULL,
3751 &klass_closure);
3752 }
3753 }
3754
3755 // Clear mod-union table; it will be dirtied in the prologue of
3756 // CMS generation per each younger generation collection.
3757
3758 assert(_modUnionTable.isAllClear(),
3759 "Was cleared in most recent final checkpoint phase"
3760 " or no bits are set in the gc_prologue before the start of the next "
3761 "subsequent marking phase.");
3762
3763 assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
4185 Par_MarkFromRootsClosure cl(this, _collector, my_span,
4186 &_collector->_markBitMap,
4187 work_queue(i),
4188 &_collector->_markStack,
4189 _asynch);
4190 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4191 } // else nothing to do for this task
4192 } // else nothing to do for this task
4193 }
4194 // We'd be tempted to assert here that since there are no
4195 // more tasks left to claim in this space, the global_finger
4196 // must exceed space->top() and a fortiori space->end(). However,
4197 // that would not quite be correct because the bumping of
4198 // global_finger occurs strictly after the claiming of a task,
4199 // so by the time we reach here the global finger may not yet
4200 // have been bumped up by the thread that claimed the last
4201 // task.
4202 pst->all_tasks_completed();
4203 }
4204
4205 class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
4206 private:
4207 CMSCollector* _collector;
4208 CMSConcMarkingTask* _task;
4209 MemRegion _span;
4210 CMSBitMap* _bit_map;
4211 CMSMarkStack* _overflow_stack;
4212 OopTaskQueue* _work_queue;
4213 protected:
4214 DO_OOP_WORK_DEFN
4215 public:
4216 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4217 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4218 MetadataAwareOopClosure(collector->ref_processor()),
4219 _collector(collector),
4220 _task(task),
4221 _span(collector->_span),
4222 _work_queue(work_queue),
4223 _bit_map(bit_map),
4224 _overflow_stack(overflow_stack)
4225 { }
4226 virtual void do_oop(oop* p);
4227 virtual void do_oop(narrowOop* p);
4228
4229 void trim_queue(size_t max);
4230 void handle_stack_overflow(HeapWord* lost);
4231 void do_yield_check() {
4232 if (_task->should_yield()) {
4233 _task->yield();
4234 }
4235 }
4236 };
4237
4238 // Grey object scanning during work stealing phase --
4969 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4970 "Should only be AbortablePreclean.");
4971 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4972 if (should_abort_preclean()) {
4973 break; // out of preclean loop
4974 } else {
4975 // Compute the next address at which preclean should pick up.
4976 lastAddr = next_card_start_after_block(stop_point);
4977 }
4978 }
4979 } else {
4980 break;
4981 }
4982 }
4983 verify_work_stacks_empty();
4984 verify_overflow_empty();
4985 return cumNumDirtyCards;
4986 }
4987
4988 class PrecleanKlassClosure : public KlassClosure {
4989 KlassToOopClosure _cm_klass_closure;
4990 public:
4991 PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4992 void do_klass(Klass* k) {
4993 if (k->has_accumulated_modified_oops()) {
4994 k->clear_accumulated_modified_oops();
4995
4996 _cm_klass_closure.do_klass(k);
4997 }
4998 }
4999 };
5000
5001 // The freelist lock is needed to prevent asserts, is it really needed?
5002 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
5003
5004 cl->set_freelistLock(freelistLock);
5005
5006 CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
5007
5008 // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
5009 // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
5207 _collectorState = Sweeping;
5208 // Call isAllClear() under bitMapLock
5209 assert(_modUnionTable.isAllClear(),
5210 "Should be clear by end of the final marking");
5211 assert(_ct->klass_rem_set()->mod_union_is_clear(),
5212 "Should be clear by end of the final marking");
5213 if (UseAdaptiveSizePolicy) {
5214 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5215 }
5216 }
5217
5218 void CMSParInitialMarkTask::work(uint worker_id) {
5219 elapsedTimer _timer;
5220 ResourceMark rm;
5221 HandleMark hm;
5222
5223 // ---------- scan from roots --------------
5224 _timer.start();
5225 GenCollectedHeap* gch = GenCollectedHeap::heap();
5226 Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5227 KlassToOopClosure klass_closure(&par_mri_cl);
5228
5229 // ---------- young gen roots --------------
5230 {
5231 work_on_young_gen_roots(worker_id, &par_mri_cl);
5232 _timer.stop();
5233 if (PrintCMSStatistics != 0) {
5234 gclog_or_tty->print_cr(
5235 "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5236 worker_id, _timer.seconds());
5237 }
5238 }
5239
5240 // ---------- remaining roots --------------
5241 _timer.reset();
5242 _timer.start();
5243 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5244 false, // yg was scanned above
5245 false, // this is parallel code
5246 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5247 &par_mri_cl,
5281
5282 OopTaskQueueSet* task_queues() { return _task_queues; }
5283
5284 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5285
5286 ParallelTaskTerminator* terminator() { return &_term; }
5287 int n_workers() { return _n_workers; }
5288
5289 void work(uint worker_id);
5290
5291 private:
5292 // ... of dirty cards in old space
5293 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5294 Par_MarkRefsIntoAndScanClosure* cl);
5295
5296 // ... work stealing for the above
5297 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5298 };
5299
5300 class RemarkKlassClosure : public KlassClosure {
5301 KlassToOopClosure _cm_klass_closure;
5302 public:
5303 RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5304 void do_klass(Klass* k) {
5305 // Check if we have modified any oops in the Klass during the concurrent marking.
5306 if (k->has_accumulated_modified_oops()) {
5307 k->clear_accumulated_modified_oops();
5308
5309 // We could have transfered the current modified marks to the accumulated marks,
5310 // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5311 } else if (k->has_modified_oops()) {
5312 // Don't clear anything, this info is needed by the next young collection.
5313 } else {
5314 // No modified oops in the Klass.
5315 return;
5316 }
5317
5318 // The klass has modified fields, need to scan the klass.
5319 _cm_klass_closure.do_klass(k);
5320 }
5321 };
7718 _finger = addr + obj->size();
7719 assert(_finger > addr, "we just incremented it above");
7720 // Note: the finger doesn't advance while we drain
7721 // the stack below.
7722 bool res = _mark_stack->push(obj);
7723 assert(res, "Empty non-zero size stack should have space for single push");
7724 while (!_mark_stack->isEmpty()) {
7725 oop new_oop = _mark_stack->pop();
7726 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7727 // now scan this oop's oops
7728 new_oop->oop_iterate(&_pam_verify_closure);
7729 }
7730 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7731 return true;
7732 }
7733
7734 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7735 CMSCollector* collector, MemRegion span,
7736 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7737 CMSMarkStack* mark_stack):
7738 MetadataAwareOopClosure(collector->ref_processor()),
7739 _collector(collector),
7740 _span(span),
7741 _verification_bm(verification_bm),
7742 _cms_bm(cms_bm),
7743 _mark_stack(mark_stack)
7744 { }
7745
7746 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7747 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7748
7749 // Upon stack overflow, we discard (part of) the stack,
7750 // remembering the least address amongst those discarded
7751 // in CMSCollector's _restart_address.
7752 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7753 // Remember the least grey address discarded
7754 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7755 _collector->lower_restart_addr(ra);
7756 _mark_stack->reset(); // discard stack contents
7757 _mark_stack->expand(); // expand the stack if possible
7758 }
7771 }
7772
7773 if (!_mark_stack->push(obj)) { // stack overflow
7774 if (PrintCMSStatistics != 0) {
7775 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7776 SIZE_FORMAT, _mark_stack->capacity());
7777 }
7778 assert(_mark_stack->isFull(), "Else push should have succeeded");
7779 handle_stack_overflow(addr);
7780 }
7781 // anything including and to the right of _finger
7782 // will be scanned as we iterate over the remainder of the
7783 // bit map
7784 }
7785 }
7786
7787 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7788 MemRegion span,
7789 CMSBitMap* bitMap, CMSMarkStack* markStack,
7790 HeapWord* finger, MarkFromRootsClosure* parent) :
7791 MetadataAwareOopClosure(collector->ref_processor()),
7792 _collector(collector),
7793 _span(span),
7794 _bitMap(bitMap),
7795 _markStack(markStack),
7796 _finger(finger),
7797 _parent(parent)
7798 { }
7799
7800 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7801 MemRegion span,
7802 CMSBitMap* bit_map,
7803 OopTaskQueue* work_queue,
7804 CMSMarkStack* overflow_stack,
7805 HeapWord* finger,
7806 HeapWord** global_finger_addr,
7807 Par_MarkFromRootsClosure* parent) :
7808 MetadataAwareOopClosure(collector->ref_processor()),
7809 _collector(collector),
7810 _whole_span(collector->_span),
7811 _span(span),
7812 _bit_map(bit_map),
7813 _work_queue(work_queue),
7814 _overflow_stack(overflow_stack),
7815 _finger(finger),
7816 _global_finger_addr(global_finger_addr),
7817 _parent(parent)
7818 { }
7819
7820 // Assumes thread-safe access by callers, who are
7821 // responsible for mutual exclusion.
7822 void CMSCollector::lower_restart_addr(HeapWord* low) {
7823 assert(_span.contains(low), "Out of bounds addr");
7824 if (_restart_addr == NULL) {
7825 _restart_addr = low;
7826 } else {
7827 _restart_addr = MIN2(_restart_addr, low);
7828 }
7837 _collector->lower_restart_addr(ra);
7838 _markStack->reset(); // discard stack contents
7839 _markStack->expand(); // expand the stack if possible
7840 }
7841
7842 // Upon stack overflow, we discard (part of) the stack,
7843 // remembering the least address amongst those discarded
7844 // in CMSCollector's _restart_address.
7845 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7846 // We need to do this under a mutex to prevent other
7847 // workers from interfering with the work done below.
7848 MutexLockerEx ml(_overflow_stack->par_lock(),
7849 Mutex::_no_safepoint_check_flag);
7850 // Remember the least grey address discarded
7851 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7852 _collector->lower_restart_addr(ra);
7853 _overflow_stack->reset(); // discard stack contents
7854 _overflow_stack->expand(); // expand the stack if possible
7855 }
7856
7857 void PushOrMarkClosure::do_oop(oop obj) {
7858 // Ignore mark word because we are running concurrent with mutators.
7859 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7860 HeapWord* addr = (HeapWord*)obj;
7861 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7862 // Oop lies in _span and isn't yet grey or black
7863 _bitMap->mark(addr); // now grey
7864 if (addr < _finger) {
7865 // the bit map iteration has already either passed, or
7866 // sampled, this bit in the bit map; we'll need to
7867 // use the marking stack to scan this oop's oops.
7868 bool simulate_overflow = false;
7869 NOT_PRODUCT(
7870 if (CMSMarkStackOverflowALot &&
7871 _collector->simulate_overflow()) {
7872 // simulate a stack overflow
7873 simulate_overflow = true;
7874 }
7875 )
7876 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7934 // it may have been emptied since.
7935 assert(simulate_overflow ||
7936 _work_queue->size() == _work_queue->max_elems(),
7937 "Else push should have succeeded");
7938 handle_stack_overflow(addr);
7939 }
7940 do_yield_check();
7941 }
7942 }
7943
7944 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7945 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7946
7947 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7948 MemRegion span,
7949 ReferenceProcessor* rp,
7950 CMSBitMap* bit_map,
7951 CMSBitMap* mod_union_table,
7952 CMSMarkStack* mark_stack,
7953 bool concurrent_precleaning):
7954 MetadataAwareOopClosure(rp),
7955 _collector(collector),
7956 _span(span),
7957 _bit_map(bit_map),
7958 _mod_union_table(mod_union_table),
7959 _mark_stack(mark_stack),
7960 _concurrent_precleaning(concurrent_precleaning)
7961 {
7962 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7963 }
7964
7965 // Grey object rescan during pre-cleaning and second checkpoint phases --
7966 // the non-parallel version (the parallel version appears further below.)
7967 void PushAndMarkClosure::do_oop(oop obj) {
7968 // Ignore mark word verification. If during concurrent precleaning,
7969 // the object monitor may be locked. If during the checkpoint
7970 // phases, the object may already have been reached by a different
7971 // path and may be at the end of the global overflow list (so
7972 // the mark word may be NULL).
7973 assert(obj->is_oop_or_null(true /* ignore mark word */),
7974 "expected an oop or NULL");
8007 _mod_union_table->mark_range(redirty_range);
8008 } else {
8009 _mod_union_table->mark(addr);
8010 }
8011 _collector->_ser_pmc_preclean_ovflw++;
8012 } else {
8013 // During the remark phase, we need to remember this oop
8014 // in the overflow list.
8015 _collector->push_on_overflow_list(obj);
8016 _collector->_ser_pmc_remark_ovflw++;
8017 }
8018 }
8019 }
8020 }
8021
8022 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
8023 MemRegion span,
8024 ReferenceProcessor* rp,
8025 CMSBitMap* bit_map,
8026 OopTaskQueue* work_queue):
8027 MetadataAwareOopClosure(rp),
8028 _collector(collector),
8029 _span(span),
8030 _bit_map(bit_map),
8031 _work_queue(work_queue)
8032 {
8033 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
8034 }
8035
8036 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
8037 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
8038
8039 // Grey object rescan during second checkpoint phase --
8040 // the parallel version.
8041 void Par_PushAndMarkClosure::do_oop(oop obj) {
8042 // In the assert below, we ignore the mark word because
8043 // this oop may point to an already visited object that is
8044 // on the overflow stack (in which case the mark word has
8045 // been hijacked for chaining into the overflow stack --
8046 // if this is the last object in the overflow stack then
8047 // its mark word will be NULL). Because this object may
|