923 assert_lock_strong(freelistLock());
924
925 #ifndef PRODUCT
926 if (GenCollectedHeap::heap()->promotion_should_fail()) {
927 return NULL;
928 }
929 #endif // #ifndef PRODUCT
930
931 oop res = _cmsSpace->promote(obj, obj_size);
932 if (res == NULL) {
933 // expand and retry
934 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
935 expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
936 // Since this is the old generation, we don't try to promote
937 // into a more senior generation.
938 res = _cmsSpace->promote(obj, obj_size);
939 }
940 if (res != NULL) {
941 // See comment in allocate() about when objects should
942 // be allocated live.
943 assert(obj->is_oop(), "Will dereference klass pointer below");
944 collector()->promoted(false, // Not parallel
945 (HeapWord*)res, obj->is_objArray(), obj_size);
946 // promotion counters
947 NOT_PRODUCT(
948 _numObjectsPromoted++;
949 _numWordsPromoted +=
950 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
951 )
952 }
953 return res;
954 }
955
956
957 // IMPORTANT: Notes on object size recognition in CMS.
958 // ---------------------------------------------------
959 // A block of storage in the CMS generation is always in
960 // one of three states. A free block (FREE), an allocated
961 // object (OBJECT) whose size() method reports the correct size,
962 // and an intermediate state (TRANSIENT) in which its size cannot
963 // be accurately determined.
1046
1047 if (UseCompressedClassPointers) {
1048 // Copy gap missed by (aligned) header size calculation below
1049 obj->set_klass_gap(old->klass_gap());
1050 }
1051 if (word_sz > (size_t)oopDesc::header_size()) {
1052 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1053 obj_ptr + oopDesc::header_size(),
1054 word_sz - oopDesc::header_size());
1055 }
1056
1057 // Now we can track the promoted object, if necessary. We take care
1058 // to delay the transition from uninitialized to full object
1059 // (i.e., insertion of klass pointer) until after, so that it
1060 // atomically becomes a promoted object.
1061 if (promoInfo->tracking()) {
1062 promoInfo->track((PromotedObject*)obj, old->klass());
1063 }
1064 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1065 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1066 assert(old->is_oop(), "Will use and dereference old klass ptr below");
1067
1068 // Finally, install the klass pointer (this should be volatile).
1069 OrderAccess::storestore();
1070 obj->set_klass(old->klass());
1071 // We should now be able to calculate the right size for this object
1072 assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1073
1074 collector()->promoted(true, // parallel
1075 obj_ptr, old->is_objArray(), word_sz);
1076
1077 NOT_PRODUCT(
1078 Atomic::inc_ptr(&_numObjectsPromoted);
1079 Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1080 )
1081
1082 return obj;
1083 }
1084
1085 void
1086 ConcurrentMarkSweepGeneration::
1087 par_promote_alloc_done(int thread_num) {
1088 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1089 ps->lab.retire(thread_num);
1090 }
1091
1092 void
3331 virtual void do_oop(narrowOop* p);
3332
3333 void trim_queue(size_t max);
3334 void handle_stack_overflow(HeapWord* lost);
3335 void do_yield_check() {
3336 if (_task->should_yield()) {
3337 _task->yield();
3338 }
3339 }
3340 };
3341
3342 DO_OOP_WORK_IMPL(ParConcMarkingClosure)
3343
3344 // Grey object scanning during work stealing phase --
3345 // the salient assumption here is that any references
3346 // that are in these stolen objects being scanned must
3347 // already have been initialized (else they would not have
3348 // been published), so we do not need to check for
3349 // uninitialized objects before pushing here.
3350 void ParConcMarkingClosure::do_oop(oop obj) {
3351 assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3352 HeapWord* addr = (HeapWord*)obj;
3353 // Check if oop points into the CMS generation
3354 // and is not marked
3355 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3356 // a white object ...
3357 // If we manage to "claim" the object, by being the
3358 // first thread to mark it, then we push it on our
3359 // marking stack
3360 if (_bit_map->par_mark(addr)) { // ... now grey
3361 // push on work queue (grey set)
3362 bool simulate_overflow = false;
3363 NOT_PRODUCT(
3364 if (CMSMarkStackOverflowALot &&
3365 _collector->simulate_overflow()) {
3366 // simulate a stack overflow
3367 simulate_overflow = true;
3368 }
3369 )
3370 if (simulate_overflow ||
3371 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3373 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
3374 // We cannot assert that the overflow stack is full because
3375 // it may have been emptied since.
3376 assert(simulate_overflow ||
3377 _work_queue->size() == _work_queue->max_elems(),
3378 "Else push should have succeeded");
3379 handle_stack_overflow(addr);
3380 }
3381 } // Else, some other thread got there first
3382 do_yield_check();
3383 }
3384 }
3385
3386 void ParConcMarkingClosure::do_oop(oop* p) { ParConcMarkingClosure::do_oop_work(p); }
3387 void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
3388
3389 void ParConcMarkingClosure::trim_queue(size_t max) {
3390 while (_work_queue->size() > max) {
3391 oop new_oop;
3392 if (_work_queue->pop_local(new_oop)) {
3393 assert(new_oop->is_oop(), "Should be an oop");
3394 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3395 assert(_span.contains((HeapWord*)new_oop), "Not in span");
3396 new_oop->oop_iterate(this); // do_oop() above
3397 do_yield_check();
3398 }
3399 }
3400 }
3401
3402 // Upon stack overflow, we discard (part of) the stack,
3403 // remembering the least address amongst those discarded
3404 // in CMSCollector's _restart_address.
3405 void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3406 // We need to do this under a mutex to prevent other
3407 // workers from interfering with the work done below.
3408 MutexLockerEx ml(_overflow_stack->par_lock(),
3409 Mutex::_no_safepoint_check_flag);
3410 // Remember the least grey address discarded
3411 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3412 _collector->lower_restart_addr(ra);
3413 _overflow_stack->reset(); // discard stack contents
3414 _overflow_stack->expand(); // expand the stack if possible
3415 }
3416
3417
3418 void CMSConcMarkingTask::do_work_steal(int i) {
3419 OopTaskQueue* work_q = work_queue(i);
3420 oop obj_to_scan;
3421 CMSBitMap* bm = &(_collector->_markBitMap);
3422 CMSMarkStack* ovflw = &(_collector->_markStack);
3423 int* seed = _collector->hash_seed(i);
3424 ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3425 while (true) {
3426 cl.trim_queue(0);
3427 assert(work_q->size() == 0, "Should have been emptied above");
3428 if (get_work_from_overflow_stack(ovflw, work_q)) {
3429 // Can't assert below because the work obtained from the
3430 // overflow stack may already have been stolen from us.
3431 // assert(work_q->size() > 0, "Work from overflow stack");
3432 continue;
3433 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3434 assert(obj_to_scan->is_oop(), "Should be an oop");
3435 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3436 obj_to_scan->oop_iterate(&cl);
3437 } else if (terminator()->offer_termination(&_term_term)) {
3438 assert(work_q->size() == 0, "Impossible!");
3439 break;
3440 } else if (yielding() || should_yield()) {
3441 yield();
3442 }
3443 }
3444 }
3445
3446 // This is run by the CMS (coordinator) thread.
3447 void CMSConcMarkingTask::coordinator_yield() {
3448 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3449 "CMS thread should hold CMS token");
3450 // First give up the locks, then yield, then re-lock
3451 // We should probably use a constructor/destructor idiom to
3452 // do this unlock/lock or modify the MutexUnlocker class to
3453 // serve our purpose. XXX
3454 assert_lock_strong(_bit_map_lock);
4505 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4506 start = space->bottom();
4507 end = space->top();
4508 } else if (nth_task == 0) {
4509 start = space->bottom();
4510 end = chunk_array[nth_task];
4511 } else if (nth_task < (uint)chunk_top) {
4512 assert(nth_task >= 1, "Control point invariant");
4513 start = chunk_array[nth_task - 1];
4514 end = chunk_array[nth_task];
4515 } else {
4516 assert(nth_task == (uint)chunk_top, "Control point invariant");
4517 start = chunk_array[chunk_top - 1];
4518 end = space->top();
4519 }
4520 MemRegion mr(start, end);
4521 // Verify that mr is in space
4522 assert(mr.is_empty() || space->used_region().contains(mr),
4523 "Should be in space");
4524 // Verify that "start" is an object boundary
4525 assert(mr.is_empty() || oop(mr.start())->is_oop(),
4526 "Should be an oop");
4527 space->par_oop_iterate(mr, cl);
4528 }
4529 pst->all_tasks_completed();
4530 }
4531 }
4532
4533 void
4534 CMSParRemarkTask::do_dirty_card_rescan_tasks(
4535 CompactibleFreeListSpace* sp, int i,
4536 ParMarkRefsIntoAndScanClosure* cl) {
4537 // Until all tasks completed:
4538 // . claim an unclaimed task
4539 // . compute region boundaries corresponding to task claimed
4540 // . transfer dirty bits ct->mut for that region
4541 // . apply rescanclosure to dirty mut bits for that region
4542
4543 ResourceMark rm;
4544 HandleMark hm;
4545
4639 // only affects the number of attempts made to get work from the
4640 // overflow list and does not affect the number of workers. Just
4641 // pass ParallelGCThreads so this behavior is unchanged.
4642 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4643 work_q,
4644 ParallelGCThreads)) {
4645 // found something in global overflow list;
4646 // not yet ready to go stealing work from others.
4647 // We'd like to assert(work_q->size() != 0, ...)
4648 // because we just took work from the overflow list,
4649 // but of course we can't since all of that could have
4650 // been already stolen from us.
4651 // "He giveth and He taketh away."
4652 continue;
4653 }
4654 // Verify that we have no work before we resort to stealing
4655 assert(work_q->size() == 0, "Have work, shouldn't steal");
4656 // Try to steal from other queues that have work
4657 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4658 NOT_PRODUCT(num_steals++;)
4659 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4660 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4661 // Do scanning work
4662 obj_to_scan->oop_iterate(cl);
4663 // Loop around, finish this work, and try to steal some more
4664 } else if (terminator()->offer_termination()) {
4665 break; // nirvana from the infinite cycle
4666 }
4667 }
4668 log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
4669 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4670 "Else our work is not yet done");
4671 }
4672
4673 // Record object boundaries in _eden_chunk_array by sampling the eden
4674 // top in the slow-path eden object allocation code path and record
4675 // the boundaries, if CMSEdenChunksRecordAlways is true. If
4676 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
4677 // sampling in sample_eden() that activates during the part of the
4678 // preclean phase.
4679 void CMSCollector::sample_eden_chunk() {
5118 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5119 // only affects the number of attempts made to get work from the
5120 // overflow list and does not affect the number of workers. Just
5121 // pass ParallelGCThreads so this behavior is unchanged.
5122 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5123 work_q,
5124 ParallelGCThreads)) {
5125 // Found something in global overflow list;
5126 // not yet ready to go stealing work from others.
5127 // We'd like to assert(work_q->size() != 0, ...)
5128 // because we just took work from the overflow list,
5129 // but of course we can't, since all of that might have
5130 // been already stolen from us.
5131 continue;
5132 }
5133 // Verify that we have no work before we resort to stealing
5134 assert(work_q->size() == 0, "Have work, shouldn't steal");
5135 // Try to steal from other queues that have work
5136 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5137 NOT_PRODUCT(num_steals++;)
5138 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5139 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5140 // Do scanning work
5141 obj_to_scan->oop_iterate(keep_alive);
5142 // Loop around, finish this work, and try to steal some more
5143 } else if (terminator()->offer_termination()) {
5144 break; // nirvana from the infinite cycle
5145 }
5146 }
5147 log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
5148 }
5149
5150 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5151 {
5152 GenCollectedHeap* gch = GenCollectedHeap::heap();
5153 WorkGang* workers = gch->workers();
5154 assert(workers != NULL, "Need parallel worker threads.");
5155 CMSRefProcTaskProxy rp_task(task, &_collector,
5156 _collector.ref_processor()->span(),
5157 _collector.markBitMap(),
5158 workers, _collector.task_queues());
5808 // XXX: there seems to be a lot of code duplication here;
5809 // should refactor and consolidate common code.
5810
5811 // This closure is used to mark refs into the CMS generation in
5812 // the CMS bit map. Called at the first checkpoint. This closure
5813 // assumes that we do not need to re-mark dirty cards; if the CMS
5814 // generation on which this is used is not an oldest
5815 // generation then this will lose younger_gen cards!
5816
5817 MarkRefsIntoClosure::MarkRefsIntoClosure(
5818 MemRegion span, CMSBitMap* bitMap):
5819 _span(span),
5820 _bitMap(bitMap)
5821 {
5822 assert(ref_processor() == NULL, "deliberately left NULL");
5823 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5824 }
5825
5826 void MarkRefsIntoClosure::do_oop(oop obj) {
5827 // if p points into _span, then mark corresponding bit in _markBitMap
5828 assert(obj->is_oop(), "expected an oop");
5829 HeapWord* addr = (HeapWord*)obj;
5830 if (_span.contains(addr)) {
5831 // this should be made more efficient
5832 _bitMap->mark(addr);
5833 }
5834 }
5835
5836 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
5837 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
5838
5839 ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
5840 MemRegion span, CMSBitMap* bitMap):
5841 _span(span),
5842 _bitMap(bitMap)
5843 {
5844 assert(ref_processor() == NULL, "deliberately left NULL");
5845 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5846 }
5847
5848 void ParMarkRefsIntoClosure::do_oop(oop obj) {
5849 // if p points into _span, then mark corresponding bit in _markBitMap
5850 assert(obj->is_oop(), "expected an oop");
5851 HeapWord* addr = (HeapWord*)obj;
5852 if (_span.contains(addr)) {
5853 // this should be made more efficient
5854 _bitMap->par_mark(addr);
5855 }
5856 }
5857
5858 void ParMarkRefsIntoClosure::do_oop(oop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
5859 void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
5860
5861 // A variant of the above, used for CMS marking verification.
5862 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5863 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5864 _span(span),
5865 _verification_bm(verification_bm),
5866 _cms_bm(cms_bm)
5867 {
5868 assert(ref_processor() == NULL, "deliberately left NULL");
5869 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5870 }
5871
5872 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5873 // if p points into _span, then mark corresponding bit in _markBitMap
5874 assert(obj->is_oop(), "expected an oop");
5875 HeapWord* addr = (HeapWord*)obj;
5876 if (_span.contains(addr)) {
5877 _verification_bm->mark(addr);
5878 if (!_cms_bm->isMarked(addr)) {
5879 Log(gc, verify) log;
5880 ResourceMark rm;
5881 LogStream ls(log.error());
5882 oop(addr)->print_on(&ls);
5883 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5884 fatal("... aborting");
5885 }
5886 }
5887 }
5888
5889 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5890 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5891
5892 //////////////////////////////////////////////////
5893 // MarkRefsIntoAndScanClosure
5894 //////////////////////////////////////////////////
5908 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
5909 mark_stack, concurrent_precleaning),
5910 _yield(should_yield),
5911 _concurrent_precleaning(concurrent_precleaning),
5912 _freelistLock(NULL)
5913 {
5914 // FIXME: Should initialize in base class constructor.
5915 assert(rp != NULL, "ref_processor shouldn't be NULL");
5916 set_ref_processor_internal(rp);
5917 }
5918
5919 // This closure is used to mark refs into the CMS generation at the
5920 // second (final) checkpoint, and to scan and transitively follow
5921 // the unmarked oops. It is also used during the concurrent precleaning
5922 // phase while scanning objects on dirty cards in the CMS generation.
5923 // The marks are made in the marking bit map and the marking stack is
5924 // used for keeping the (newly) grey objects during the scan.
5925 // The parallel version (Par_...) appears further below.
5926 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
5927 if (obj != NULL) {
5928 assert(obj->is_oop(), "expected an oop");
5929 HeapWord* addr = (HeapWord*)obj;
5930 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
5931 assert(_collector->overflow_list_is_empty(),
5932 "overflow list should be empty");
5933 if (_span.contains(addr) &&
5934 !_bit_map->isMarked(addr)) {
5935 // mark bit map (object is now grey)
5936 _bit_map->mark(addr);
5937 // push on marking stack (stack should be empty), and drain the
5938 // stack by applying this closure to the oops in the oops popped
5939 // from the stack (i.e. blacken the grey objects)
5940 bool res = _mark_stack->push(obj);
5941 assert(res, "Should have space to push on empty stack");
5942 do {
5943 oop new_oop = _mark_stack->pop();
5944 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
5945 assert(_bit_map->isMarked((HeapWord*)new_oop),
5946 "only grey objects on this stack");
5947 // iterate over the oops in this oop, marking and pushing
5948 // the ones in CMS heap (i.e. in _span).
5949 new_oop->oop_iterate(&_pushAndMarkClosure);
5950 // check if it's time to yield
5951 do_yield_check();
5952 } while (!_mark_stack->isEmpty() ||
5953 (!_concurrent_precleaning && take_from_overflow_list()));
5954 // if marking stack is empty, and we are not doing this
5955 // during precleaning, then check the overflow list
5956 }
5957 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
5958 assert(_collector->overflow_list_is_empty(),
5959 "overflow list was drained above");
5960
5961 assert(_collector->no_preserved_marks(),
5962 "All preserved marks should have been restored above");
5963 }
5964 }
6006 _low_water_mark(MIN2((work_queue->max_elems()/4),
6007 ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6008 _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6009 {
6010 // FIXME: Should initialize in base class constructor.
6011 assert(rp != NULL, "ref_processor shouldn't be NULL");
6012 set_ref_processor_internal(rp);
6013 }
6014
6015 // This closure is used to mark refs into the CMS generation at the
6016 // second (final) checkpoint, and to scan and transitively follow
6017 // the unmarked oops. The marks are made in the marking bit map and
6018 // the work_queue is used for keeping the (newly) grey objects during
6019 // the scan phase whence they are also available for stealing by parallel
6020 // threads. Since the marking bit map is shared, updates are
6021 // synchronized (via CAS).
6022 void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
6023 if (obj != NULL) {
6024 // Ignore mark word because this could be an already marked oop
6025 // that may be chained at the end of the overflow list.
6026 assert(obj->is_oop(true), "expected an oop");
6027 HeapWord* addr = (HeapWord*)obj;
6028 if (_span.contains(addr) &&
6029 !_bit_map->isMarked(addr)) {
6030 // mark bit map (object will become grey):
6031 // It is possible for several threads to be
6032 // trying to "claim" this object concurrently;
6033 // the unique thread that succeeds in marking the
6034 // object first will do the subsequent push on
6035 // to the work queue (or overflow list).
6036 if (_bit_map->par_mark(addr)) {
6037 // push on work_queue (which may not be empty), and trim the
6038 // queue to an appropriate length by applying this closure to
6039 // the oops in the oops popped from the stack (i.e. blacken the
6040 // grey objects)
6041 bool res = _work_queue->push(obj);
6042 assert(res, "Low water mark should be less than capacity?");
6043 trim_queue(_low_water_mark);
6044 } // Else, another thread claimed the object
6045 }
6046 }
6052 // This closure is used to rescan the marked objects on the dirty cards
6053 // in the mod union table and the card table proper.
6054 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6055 oop p, MemRegion mr) {
6056
6057 size_t size = 0;
6058 HeapWord* addr = (HeapWord*)p;
6059 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6060 assert(_span.contains(addr), "we are scanning the CMS generation");
6061 // check if it's time to yield
6062 if (do_yield_check()) {
6063 // We yielded for some foreground stop-world work,
6064 // and we have been asked to abort this ongoing preclean cycle.
6065 return 0;
6066 }
6067 if (_bitMap->isMarked(addr)) {
6068 // it's marked; is it potentially uninitialized?
6069 if (p->klass_or_null_acquire() != NULL) {
6070 // an initialized object; ignore mark word in verification below
6071 // since we are running concurrent with mutators
6072 assert(p->is_oop(true), "should be an oop");
6073 if (p->is_objArray()) {
6074 // objArrays are precisely marked; restrict scanning
6075 // to dirty cards only.
6076 size = CompactibleFreeListSpace::adjustObjectSize(
6077 p->oop_iterate_size(_scanningClosure, mr));
6078 } else {
6079 // A non-array may have been imprecisely marked; we need
6080 // to scan object in its entirety.
6081 size = CompactibleFreeListSpace::adjustObjectSize(
6082 p->oop_iterate_size(_scanningClosure));
6083 }
6084 #ifdef ASSERT
6085 size_t direct_size =
6086 CompactibleFreeListSpace::adjustObjectSize(p->size());
6087 assert(size == direct_size, "Inconsistency in size");
6088 assert(size >= 3, "Necessary for Printezis marks to work");
6089 HeapWord* start_pbit = addr + 1;
6090 HeapWord* end_pbit = addr + size - 1;
6091 assert(_bitMap->isMarked(start_pbit) == _bitMap->isMarked(end_pbit),
6092 "inconsistent Printezis mark");
6101 } else {
6102 // An uninitialized object.
6103 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6104 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6105 size = pointer_delta(nextOneAddr + 1, addr);
6106 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6107 "alignment problem");
6108 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6109 // will dirty the card when the klass pointer is installed in the
6110 // object (signaling the completion of initialization).
6111 }
6112 } else {
6113 // Either a not yet marked object or an uninitialized object
6114 if (p->klass_or_null_acquire() == NULL) {
6115 // An uninitialized object, skip to the next card, since
6116 // we may not be able to read its P-bits yet.
6117 assert(size == 0, "Initial value");
6118 } else {
6119 // An object not (yet) reached by marking: we merely need to
6120 // compute its size so as to go look at the next block.
6121 assert(p->is_oop(true), "should be an oop");
6122 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6123 }
6124 }
6125 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6126 return size;
6127 }
6128
6129 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6130 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6131 "CMS thread should hold CMS token");
6132 assert_lock_strong(_freelistLock);
6133 assert_lock_strong(_bitMap->lock());
6134 // relinquish the free_list_lock and bitMaplock()
6135 _bitMap->lock()->unlock();
6136 _freelistLock->unlock();
6137 ConcurrentMarkSweepThread::desynchronize(true);
6138 _collector->stopTimer();
6139 _collector->incrementYields();
6140
6141 // See the comment in coordinator_yield()
6148 ConcurrentMarkSweepThread::synchronize(true);
6149 _freelistLock->lock_without_safepoint_check();
6150 _bitMap->lock()->lock_without_safepoint_check();
6151 _collector->startTimer();
6152 }
6153
6154
6155 //////////////////////////////////////////////////////////////////
6156 // SurvivorSpacePrecleanClosure
6157 //////////////////////////////////////////////////////////////////
6158 // This (single-threaded) closure is used to preclean the oops in
6159 // the survivor spaces.
6160 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6161
6162 HeapWord* addr = (HeapWord*)p;
6163 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6164 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6165 assert(p->klass_or_null() != NULL, "object should be initialized");
6166 // an initialized object; ignore mark word in verification below
6167 // since we are running concurrent with mutators
6168 assert(p->is_oop(true), "should be an oop");
6169 // Note that we do not yield while we iterate over
6170 // the interior oops of p, pushing the relevant ones
6171 // on our marking stack.
6172 size_t size = p->oop_iterate_size(_scanning_closure);
6173 do_yield_check();
6174 // Observe that below, we do not abandon the preclean
6175 // phase as soon as we should; rather we empty the
6176 // marking stack before returning. This is to satisfy
6177 // some existing assertions. In general, it may be a
6178 // good idea to abort immediately and complete the marking
6179 // from the grey objects at a later time.
6180 while (!_mark_stack->isEmpty()) {
6181 oop new_oop = _mark_stack->pop();
6182 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6183 assert(_bit_map->isMarked((HeapWord*)new_oop),
6184 "only grey objects on this stack");
6185 // iterate over the oops in this oop, marking and pushing
6186 // the ones in CMS heap (i.e. in _span).
6187 new_oop->oop_iterate(_scanning_closure);
6188 // check if it's time to yield
6189 do_yield_check();
6190 }
6191 unsigned int after_count =
6192 GenCollectedHeap::heap()->total_collections();
6193 bool abort = (_before_count != after_count) ||
6194 _collector->should_abort_preclean();
6195 return abort ? 0 : size;
6196 }
6197
6198 void SurvivorSpacePrecleanClosure::do_yield_work() {
6199 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6200 "CMS thread should hold CMS token");
6201 assert_lock_strong(_bit_map->lock());
6202 // Relinquish the bit map lock
6206 _collector->incrementYields();
6207
6208 // See the comment in coordinator_yield()
6209 for (unsigned i = 0; i < CMSYieldSleepCount &&
6210 ConcurrentMarkSweepThread::should_yield() &&
6211 !CMSCollector::foregroundGCIsActive(); ++i) {
6212 os::sleep(Thread::current(), 1, false);
6213 }
6214
6215 ConcurrentMarkSweepThread::synchronize(true);
6216 _bit_map->lock()->lock_without_safepoint_check();
6217 _collector->startTimer();
6218 }
6219
6220 // This closure is used to rescan the marked objects on the dirty cards
6221 // in the mod union table and the card table proper. In the parallel
6222 // case, although the bitMap is shared, we do a single read so the
6223 // isMarked() query is "safe".
6224 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6225 // Ignore mark word because we are running concurrent with mutators
6226 assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6227 HeapWord* addr = (HeapWord*)p;
6228 assert(_span.contains(addr), "we are scanning the CMS generation");
6229 bool is_obj_array = false;
6230 #ifdef ASSERT
6231 if (!_parallel) {
6232 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6233 assert(_collector->overflow_list_is_empty(),
6234 "overflow list should be empty");
6235
6236 }
6237 #endif // ASSERT
6238 if (_bit_map->isMarked(addr)) {
6239 // Obj arrays are precisely marked, non-arrays are not;
6240 // so we scan objArrays precisely and non-arrays in their
6241 // entirety.
6242 if (p->is_objArray()) {
6243 is_obj_array = true;
6244 if (_parallel) {
6245 p->oop_iterate(_par_scan_closure, mr);
6246 } else {
6359 // See the comment in coordinator_yield()
6360 for (unsigned i = 0; i < CMSYieldSleepCount &&
6361 ConcurrentMarkSweepThread::should_yield() &&
6362 !CMSCollector::foregroundGCIsActive(); ++i) {
6363 os::sleep(Thread::current(), 1, false);
6364 }
6365
6366 ConcurrentMarkSweepThread::synchronize(true);
6367 _bitMap->lock()->lock_without_safepoint_check();
6368 _collector->startTimer();
6369 }
6370
6371 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6372 assert(_bitMap->isMarked(ptr), "expected bit to be set");
6373 assert(_markStack->isEmpty(),
6374 "should drain stack to limit stack usage");
6375 // convert ptr to an oop preparatory to scanning
6376 oop obj = oop(ptr);
6377 // Ignore mark word in verification below, since we
6378 // may be running concurrent with mutators.
6379 assert(obj->is_oop(true), "should be an oop");
6380 assert(_finger <= ptr, "_finger runneth ahead");
6381 // advance the finger to right end of this object
6382 _finger = ptr + obj->size();
6383 assert(_finger > ptr, "we just incremented it above");
6384 // On large heaps, it may take us some time to get through
6385 // the marking phase. During
6386 // this time it's possible that a lot of mutations have
6387 // accumulated in the card table and the mod union table --
6388 // these mutation records are redundant until we have
6389 // actually traced into the corresponding card.
6390 // Here, we check whether advancing the finger would make
6391 // us cross into a new card, and if so clear corresponding
6392 // cards in the MUT (preclean them in the card-table in the
6393 // future).
6394
6395 DEBUG_ONLY(if (!_verifying) {)
6396 // The clean-on-enter optimization is disabled by default,
6397 // until we fix 6178663.
6398 if (CMSCleanOnEnter && (_finger > _threshold)) {
6399 // [_threshold, _finger) represents the interval
6406 assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
6407 "_threshold should always be card-aligned");
6408 _threshold = align_up(_finger, CardTableModRefBS::card_size);
6409 MemRegion mr(old_threshold, _threshold);
6410 assert(!mr.is_empty(), "Control point invariant");
6411 assert(_span.contains(mr), "Should clear within span");
6412 _mut->clear_range(mr);
6413 }
6414 DEBUG_ONLY(})
6415 // Note: the finger doesn't advance while we drain
6416 // the stack below.
6417 PushOrMarkClosure pushOrMarkClosure(_collector,
6418 _span, _bitMap, _markStack,
6419 _finger, this);
6420 bool res = _markStack->push(obj);
6421 assert(res, "Empty non-zero size stack should have space for single push");
6422 while (!_markStack->isEmpty()) {
6423 oop new_oop = _markStack->pop();
6424 // Skip verifying header mark word below because we are
6425 // running concurrent with mutators.
6426 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6427 // now scan this oop's oops
6428 new_oop->oop_iterate(&pushOrMarkClosure);
6429 do_yield_check();
6430 }
6431 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6432 }
6433
6434 ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
6435 CMSCollector* collector, MemRegion span,
6436 CMSBitMap* bit_map,
6437 OopTaskQueue* work_queue,
6438 CMSMarkStack* overflow_stack):
6439 _collector(collector),
6440 _whole_span(collector->_span),
6441 _span(span),
6442 _bit_map(bit_map),
6443 _mut(&collector->_modUnionTable),
6444 _work_queue(work_queue),
6445 _overflow_stack(overflow_stack),
6446 _skip_bits(0),
6472 if (p->klass_or_null_acquire() == NULL) {
6473 // in the case of Clean-on-Enter optimization, redirty card
6474 // and avoid clearing card by increasing the threshold.
6475 return true;
6476 }
6477 }
6478 scan_oops_in_oop(addr);
6479 return true;
6480 }
6481
6482 void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6483 assert(_bit_map->isMarked(ptr), "expected bit to be set");
6484 // Should we assert that our work queue is empty or
6485 // below some drain limit?
6486 assert(_work_queue->size() == 0,
6487 "should drain stack to limit stack usage");
6488 // convert ptr to an oop preparatory to scanning
6489 oop obj = oop(ptr);
6490 // Ignore mark word in verification below, since we
6491 // may be running concurrent with mutators.
6492 assert(obj->is_oop(true), "should be an oop");
6493 assert(_finger <= ptr, "_finger runneth ahead");
6494 // advance the finger to right end of this object
6495 _finger = ptr + obj->size();
6496 assert(_finger > ptr, "we just incremented it above");
6497 // On large heaps, it may take us some time to get through
6498 // the marking phase. During
6499 // this time it's possible that a lot of mutations have
6500 // accumulated in the card table and the mod union table --
6501 // these mutation records are redundant until we have
6502 // actually traced into the corresponding card.
6503 // Here, we check whether advancing the finger would make
6504 // us cross into a new card, and if so clear corresponding
6505 // cards in the MUT (preclean them in the card-table in the
6506 // future).
6507
6508 // The clean-on-enter optimization is disabled by default,
6509 // until we fix 6178663.
6510 if (CMSCleanOnEnter && (_finger > _threshold)) {
6511 // [_threshold, _finger) represents the interval
6512 // of cards to be cleared in MUT (or precleaned in card table).
6533 _overflow_stack,
6534 _finger,
6535 gfa, this);
6536 bool res = _work_queue->push(obj); // overflow could occur here
6537 assert(res, "Will hold once we use workqueues");
6538 while (true) {
6539 oop new_oop;
6540 if (!_work_queue->pop_local(new_oop)) {
6541 // We emptied our work_queue; check if there's stuff that can
6542 // be gotten from the overflow stack.
6543 if (CMSConcMarkingTask::get_work_from_overflow_stack(
6544 _overflow_stack, _work_queue)) {
6545 do_yield_check();
6546 continue;
6547 } else { // done
6548 break;
6549 }
6550 }
6551 // Skip verifying header mark word below because we are
6552 // running concurrent with mutators.
6553 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6554 // now scan this oop's oops
6555 new_oop->oop_iterate(&pushOrMarkClosure);
6556 do_yield_check();
6557 }
6558 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6559 }
6560
6561 // Yield in response to a request from VM Thread or
6562 // from mutators.
6563 void ParMarkFromRootsClosure::do_yield_work() {
6564 assert(_task != NULL, "sanity");
6565 _task->yield();
6566 }
6567
6568 // A variant of the above used for verifying CMS marking work.
6569 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6570 MemRegion span,
6571 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6572 CMSMarkStack* mark_stack):
6573 _collector(collector),
6587 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6588 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6589 assert(_span.contains(addr), "Out of bounds _finger?");
6590 _finger = addr;
6591 }
6592
6593 // Should revisit to see if this should be restructured for
6594 // greater efficiency.
6595 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6596 // convert offset into a HeapWord*
6597 HeapWord* addr = _verification_bm->startWord() + offset;
6598 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6599 "address out of range");
6600 assert(_verification_bm->isMarked(addr), "tautology");
6601 assert(_cms_bm->isMarked(addr), "tautology");
6602
6603 assert(_mark_stack->isEmpty(),
6604 "should drain stack to limit stack usage");
6605 // convert addr to an oop preparatory to scanning
6606 oop obj = oop(addr);
6607 assert(obj->is_oop(), "should be an oop");
6608 assert(_finger <= addr, "_finger runneth ahead");
6609 // advance the finger to right end of this object
6610 _finger = addr + obj->size();
6611 assert(_finger > addr, "we just incremented it above");
6612 // Note: the finger doesn't advance while we drain
6613 // the stack below.
6614 bool res = _mark_stack->push(obj);
6615 assert(res, "Empty non-zero size stack should have space for single push");
6616 while (!_mark_stack->isEmpty()) {
6617 oop new_oop = _mark_stack->pop();
6618 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6619 // now scan this oop's oops
6620 new_oop->oop_iterate(&_pam_verify_closure);
6621 }
6622 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6623 return true;
6624 }
6625
6626 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6627 CMSCollector* collector, MemRegion span,
6628 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6629 CMSMarkStack* mark_stack):
6630 MetadataAwareOopClosure(collector->ref_processor()),
6631 _collector(collector),
6632 _span(span),
6633 _verification_bm(verification_bm),
6634 _cms_bm(cms_bm),
6635 _mark_stack(mark_stack)
6636 { }
6637
6638 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6639 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6640
6641 // Upon stack overflow, we discard (part of) the stack,
6642 // remembering the least address amongst those discarded
6643 // in CMSCollector's _restart_address.
6644 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6645 // Remember the least grey address discarded
6646 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6647 _collector->lower_restart_addr(ra);
6648 _mark_stack->reset(); // discard stack contents
6649 _mark_stack->expand(); // expand the stack if possible
6650 }
6651
6652 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6653 assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6654 HeapWord* addr = (HeapWord*)obj;
6655 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6656 // Oop lies in _span and isn't yet grey or black
6657 _verification_bm->mark(addr); // now grey
6658 if (!_cms_bm->isMarked(addr)) {
6659 Log(gc, verify) log;
6660 ResourceMark rm;
6661 LogStream ls(log.error());
6662 oop(addr)->print_on(&ls);
6663 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6664 fatal("... aborting");
6665 }
6666
6667 if (!_mark_stack->push(obj)) { // stack overflow
6668 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
6669 assert(_mark_stack->isFull(), "Else push should have succeeded");
6670 handle_stack_overflow(addr);
6671 }
6672 // anything including and to the right of _finger
6673 // will be scanned as we iterate over the remainder of the
6730 _markStack->expand(); // expand the stack if possible
6731 }
6732
6733 // Upon stack overflow, we discard (part of) the stack,
6734 // remembering the least address amongst those discarded
6735 // in CMSCollector's _restart_address.
6736 void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6737 // We need to do this under a mutex to prevent other
6738 // workers from interfering with the work done below.
6739 MutexLockerEx ml(_overflow_stack->par_lock(),
6740 Mutex::_no_safepoint_check_flag);
6741 // Remember the least grey address discarded
6742 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6743 _collector->lower_restart_addr(ra);
6744 _overflow_stack->reset(); // discard stack contents
6745 _overflow_stack->expand(); // expand the stack if possible
6746 }
6747
6748 void PushOrMarkClosure::do_oop(oop obj) {
6749 // Ignore mark word because we are running concurrent with mutators.
6750 assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6751 HeapWord* addr = (HeapWord*)obj;
6752 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6753 // Oop lies in _span and isn't yet grey or black
6754 _bitMap->mark(addr); // now grey
6755 if (addr < _finger) {
6756 // the bit map iteration has already either passed, or
6757 // sampled, this bit in the bit map; we'll need to
6758 // use the marking stack to scan this oop's oops.
6759 bool simulate_overflow = false;
6760 NOT_PRODUCT(
6761 if (CMSMarkStackOverflowALot &&
6762 _collector->simulate_overflow()) {
6763 // simulate a stack overflow
6764 simulate_overflow = true;
6765 }
6766 )
6767 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6768 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
6769 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6770 handle_stack_overflow(addr);
6771 }
6772 }
6773 // anything including and to the right of _finger
6774 // will be scanned as we iterate over the remainder of the
6775 // bit map
6776 do_yield_check();
6777 }
6778 }
6779
6780 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
6781 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
6782
6783 void ParPushOrMarkClosure::do_oop(oop obj) {
6784 // Ignore mark word because we are running concurrent with mutators.
6785 assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6786 HeapWord* addr = (HeapWord*)obj;
6787 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
6788 // Oop lies in _span and isn't yet grey or black
6789 // We read the global_finger (volatile read) strictly after marking oop
6790 bool res = _bit_map->par_mark(addr); // now grey
6791 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
6792 // Should we push this marked oop on our stack?
6793 // -- if someone else marked it, nothing to do
6794 // -- if target oop is above global finger nothing to do
6795 // -- if target oop is in chunk and above local finger
6796 // then nothing to do
6797 // -- else push on work queue
6798 if ( !res // someone else marked it, they will deal with it
6799 || (addr >= *gfa) // will be scanned in a later task
6800 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
6801 return;
6802 }
6803 // the bit map iteration has already either passed, or
6804 // sampled, this bit in the bit map; we'll need to
6805 // use the marking stack to scan this oop's oops.
6838 bool concurrent_precleaning):
6839 MetadataAwareOopClosure(rp),
6840 _collector(collector),
6841 _span(span),
6842 _bit_map(bit_map),
6843 _mod_union_table(mod_union_table),
6844 _mark_stack(mark_stack),
6845 _concurrent_precleaning(concurrent_precleaning)
6846 {
6847 assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6848 }
6849
6850 // Grey object rescan during pre-cleaning and second checkpoint phases --
6851 // the non-parallel version (the parallel version appears further below.)
6852 void PushAndMarkClosure::do_oop(oop obj) {
6853 // Ignore mark word verification. If during concurrent precleaning,
6854 // the object monitor may be locked. If during the checkpoint
6855 // phases, the object may already have been reached by a different
6856 // path and may be at the end of the global overflow list (so
6857 // the mark word may be NULL).
6858 assert(obj->is_oop_or_null(true /* ignore mark word */),
6859 "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6860 HeapWord* addr = (HeapWord*)obj;
6861 // Check if oop points into the CMS generation
6862 // and is not marked
6863 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6864 // a white object ...
6865 _bit_map->mark(addr); // ... now grey
6866 // push on the marking stack (grey set)
6867 bool simulate_overflow = false;
6868 NOT_PRODUCT(
6869 if (CMSMarkStackOverflowALot &&
6870 _collector->simulate_overflow()) {
6871 // simulate a stack overflow
6872 simulate_overflow = true;
6873 }
6874 )
6875 if (simulate_overflow || !_mark_stack->push(obj)) {
6876 if (_concurrent_precleaning) {
6877 // During precleaning we can just dirty the appropriate card(s)
6878 // in the mod union table, thus ensuring that the object remains
6917 assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6918 }
6919
6920 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
6921 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
6922
6923 // Grey object rescan during second checkpoint phase --
6924 // the parallel version.
6925 void ParPushAndMarkClosure::do_oop(oop obj) {
6926 // In the assert below, we ignore the mark word because
6927 // this oop may point to an already visited object that is
6928 // on the overflow stack (in which case the mark word has
6929 // been hijacked for chaining into the overflow stack --
6930 // if this is the last object in the overflow stack then
6931 // its mark word will be NULL). Because this object may
6932 // have been subsequently popped off the global overflow
6933 // stack, and the mark word possibly restored to the prototypical
6934 // value, by the time we get to examined this failing assert in
6935 // the debugger, is_oop_or_null(false) may subsequently start
6936 // to hold.
6937 assert(obj->is_oop_or_null(true),
6938 "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6939 HeapWord* addr = (HeapWord*)obj;
6940 // Check if oop points into the CMS generation
6941 // and is not marked
6942 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6943 // a white object ...
6944 // If we manage to "claim" the object, by being the
6945 // first thread to mark it, then we push it on our
6946 // marking stack
6947 if (_bit_map->par_mark(addr)) { // ... now grey
6948 // push on work queue (grey set)
6949 bool simulate_overflow = false;
6950 NOT_PRODUCT(
6951 if (CMSMarkStackOverflowALot &&
6952 _collector->par_simulate_overflow()) {
6953 // simulate a stack overflow
6954 simulate_overflow = true;
6955 }
6956 )
6957 if (simulate_overflow || !_work_queue->push(obj)) {
7308 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7309 do_post_free_or_garbage_chunk(fc, size);
7310
7311 assert(_limit >= addr + size,
7312 "A freshly garbage chunk can't possibly straddle over _limit");
7313 if (inFreeRange()) lookahead_and_flush(fc, size);
7314 return size;
7315 }
7316
7317 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7318 HeapWord* addr = (HeapWord*) fc;
7319 // The sweeper has just found a live object. Return any accumulated
7320 // left hand chunk to the free lists.
7321 if (inFreeRange()) {
7322 assert(freeFinger() < addr, "freeFinger points too high");
7323 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7324 }
7325
7326 // This object is live: we'd normally expect this to be
7327 // an oop, and like to assert the following:
7328 // assert(oop(addr)->is_oop(), "live block should be an oop");
7329 // However, as we commented above, this may be an object whose
7330 // header hasn't yet been initialized.
7331 size_t size;
7332 assert(_bitMap->isMarked(addr), "Tautology for this control point");
7333 if (_bitMap->isMarked(addr + 1)) {
7334 // Determine the size from the bit map, rather than trying to
7335 // compute it from the object header.
7336 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7337 size = pointer_delta(nextOneAddr + 1, addr);
7338 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7339 "alignment problem");
7340
7341 #ifdef ASSERT
7342 if (oop(addr)->klass_or_null_acquire() != NULL) {
7343 // Ignore mark word because we are running concurrent with mutators
7344 assert(oop(addr)->is_oop(true), "live block should be an oop");
7345 assert(size ==
7346 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7347 "P-mark and computed size do not agree");
7348 }
7349 #endif
7350
7351 } else {
7352 // This should be an initialized object that's alive.
7353 assert(oop(addr)->klass_or_null_acquire() != NULL,
7354 "Should be an initialized object");
7355 // Ignore mark word because we are running concurrent with mutators
7356 assert(oop(addr)->is_oop(true), "live block should be an oop");
7357 // Verify that the bit map has no bits marked between
7358 // addr and purported end of this block.
7359 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7360 assert(size >= 3, "Necessary for Printezis marks to work");
7361 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7362 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7363 }
7364 return size;
7365 }
7366
7367 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7368 size_t chunkSize) {
7369 // do_post_free_or_garbage_chunk() should only be called in the case
7370 // of the adaptive free list allocator.
7371 const bool fcInFreeLists = fc->is_free();
7372 assert((HeapWord*)fc <= _limit, "sweep invariant");
7373 if (CMSTestInFreeList && fcInFreeLists) {
7374 assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7375 }
7376
7644 // In general, during recursive tracing, several threads
7645 // may be concurrently getting here; the first one to
7646 // "tag" it, claims it.
7647 if (_bit_map->par_mark(addr)) {
7648 bool res = _work_queue->push(obj);
7649 assert(res, "Low water mark should be much less than capacity");
7650 // Do a recursive trim in the hope that this will keep
7651 // stack usage lower, but leave some oops for potential stealers
7652 trim_queue(_low_water_mark);
7653 } // Else, another thread got there first
7654 }
7655 }
7656
7657 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
7658 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
7659
7660 void CMSParKeepAliveClosure::trim_queue(uint max) {
7661 while (_work_queue->size() > max) {
7662 oop new_oop;
7663 if (_work_queue->pop_local(new_oop)) {
7664 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7665 assert(_bit_map->isMarked((HeapWord*)new_oop),
7666 "no white objects on this stack!");
7667 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7668 // iterate over the oops in this oop, marking and pushing
7669 // the ones in CMS heap (i.e. in _span).
7670 new_oop->oop_iterate(&_mark_and_push);
7671 }
7672 }
7673 }
7674
7675 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
7676 CMSCollector* collector,
7677 MemRegion span, CMSBitMap* bit_map,
7678 OopTaskQueue* work_queue):
7679 _collector(collector),
7680 _span(span),
7681 _bit_map(bit_map),
7682 _work_queue(work_queue) { }
7683
7684 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
7724 return "Par Spooling Space";
7725 case _adaptive_size_policy:
7726 return "Ergonomics";
7727 default:
7728 return "unknown";
7729 }
7730 }
7731
7732 void CMSDrainMarkingStackClosure::do_void() {
7733 // the max number to take from overflow list at a time
7734 const size_t num = _mark_stack->capacity()/4;
7735 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
7736 "Overflow list should be NULL during concurrent phases");
7737 while (!_mark_stack->isEmpty() ||
7738 // if stack is empty, check the overflow list
7739 _collector->take_from_overflow_list(num, _mark_stack)) {
7740 oop obj = _mark_stack->pop();
7741 HeapWord* addr = (HeapWord*)obj;
7742 assert(_span.contains(addr), "Should be within span");
7743 assert(_bit_map->isMarked(addr), "Should be marked");
7744 assert(obj->is_oop(), "Should be an oop");
7745 obj->oop_iterate(_keep_alive);
7746 }
7747 }
7748
7749 void CMSParDrainMarkingStackClosure::do_void() {
7750 // drain queue
7751 trim_queue(0);
7752 }
7753
7754 // Trim our work_queue so its length is below max at return
7755 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
7756 while (_work_queue->size() > max) {
7757 oop new_oop;
7758 if (_work_queue->pop_local(new_oop)) {
7759 assert(new_oop->is_oop(), "Expected an oop");
7760 assert(_bit_map->isMarked((HeapWord*)new_oop),
7761 "no white objects on this stack!");
7762 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7763 // iterate over the oops in this oop, marking and pushing
7764 // the ones in CMS heap (i.e. in _span).
7765 new_oop->oop_iterate(&_mark_and_push);
7766 }
7767 }
7768 }
7769
7770 ////////////////////////////////////////////////////////////////////
7771 // Support for Marking Stack Overflow list handling and related code
7772 ////////////////////////////////////////////////////////////////////
7773 // Much of the following code is similar in shape and spirit to the
7774 // code used in ParNewGC. We should try and share that code
7775 // as much as possible in the future.
7776
7777 #ifndef PRODUCT
7778 // Debugging support for CMSStackOverflowALot
7779
7790 return false;
7791 }
7792 }
7793
7794 bool CMSCollector::par_simulate_overflow() {
7795 return simulate_overflow();
7796 }
7797 #endif
7798
7799 // Single-threaded
7800 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
7801 assert(stack->isEmpty(), "Expected precondition");
7802 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
7803 size_t i = num;
7804 oop cur = _overflow_list;
7805 const markOop proto = markOopDesc::prototype();
7806 NOT_PRODUCT(ssize_t n = 0;)
7807 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
7808 next = oop(cur->mark());
7809 cur->set_mark(proto); // until proven otherwise
7810 assert(cur->is_oop(), "Should be an oop");
7811 bool res = stack->push(cur);
7812 assert(res, "Bit off more than can chew?");
7813 NOT_PRODUCT(n++;)
7814 }
7815 _overflow_list = cur;
7816 #ifndef PRODUCT
7817 assert(_num_par_pushes >= n, "Too many pops?");
7818 _num_par_pushes -=n;
7819 #endif
7820 return !stack->isEmpty();
7821 }
7822
7823 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
7824 // (MT-safe) Get a prefix of at most "num" from the list.
7825 // The overflow list is chained through the mark word of
7826 // each object in the list. We fetch the entire list,
7827 // break off a prefix of the right size and return the
7828 // remainder. If other threads try to take objects from
7829 // the overflow list at that time, they will wait for
7830 // some time to see if data becomes available. If (and
7934 suffix_tail->set_mark(markOop(cur_overflow_list));
7935 } else { // cur_overflow_list == BUSY
7936 suffix_tail->set_mark(NULL);
7937 }
7938 // ... and try to place spliced list back on overflow_list ...
7939 observed_overflow_list =
7940 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7941 } while (cur_overflow_list != observed_overflow_list);
7942 // ... until we have succeeded in doing so.
7943 }
7944 }
7945
7946 // Push the prefix elements on work_q
7947 assert(prefix != NULL, "control point invariant");
7948 const markOop proto = markOopDesc::prototype();
7949 oop next;
7950 NOT_PRODUCT(ssize_t n = 0;)
7951 for (cur = prefix; cur != NULL; cur = next) {
7952 next = oop(cur->mark());
7953 cur->set_mark(proto); // until proven otherwise
7954 assert(cur->is_oop(), "Should be an oop");
7955 bool res = work_q->push(cur);
7956 assert(res, "Bit off more than we can chew?");
7957 NOT_PRODUCT(n++;)
7958 }
7959 #ifndef PRODUCT
7960 assert(_num_par_pushes >= n, "Too many pops?");
7961 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
7962 #endif
7963 return true;
7964 }
7965
7966 // Single-threaded
7967 void CMSCollector::push_on_overflow_list(oop p) {
7968 NOT_PRODUCT(_num_par_pushes++;)
7969 assert(p->is_oop(), "Not an oop");
7970 preserve_mark_if_necessary(p);
7971 p->set_mark((markOop)_overflow_list);
7972 _overflow_list = p;
7973 }
7974
7975 // Multi-threaded; use CAS to prepend to overflow list
7976 void CMSCollector::par_push_on_overflow_list(oop p) {
7977 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
7978 assert(p->is_oop(), "Not an oop");
7979 par_preserve_mark_if_necessary(p);
7980 oop observed_overflow_list = _overflow_list;
7981 oop cur_overflow_list;
7982 do {
7983 cur_overflow_list = observed_overflow_list;
7984 if (cur_overflow_list != BUSY) {
7985 p->set_mark(markOop(cur_overflow_list));
7986 } else {
7987 p->set_mark(NULL);
7988 }
7989 observed_overflow_list =
7990 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
7991 } while (cur_overflow_list != observed_overflow_list);
7992 }
7993 #undef BUSY
7994
7995 // Single threaded
7996 // General Note on GrowableArray: pushes may silently fail
7997 // because we are (temporarily) out of C-heap for expanding
7998 // the stack. The problem is quite ubiquitous and affects
8045 // until all work's been completed. Because we
8046 // expect the preserved oop stack (set) to be small,
8047 // it's probably fine to do this single-threaded.
8048 // We can explore cleverer concurrent/overlapped/parallel
8049 // processing of preserved marks if we feel the
8050 // need for this in the future. Stack overflow should
8051 // be so rare in practice and, when it happens, its
8052 // effect on performance so great that this will
8053 // likely just be in the noise anyway.
8054 void CMSCollector::restore_preserved_marks_if_any() {
8055 assert(SafepointSynchronize::is_at_safepoint(),
8056 "world should be stopped");
8057 assert(Thread::current()->is_ConcurrentGC_thread() ||
8058 Thread::current()->is_VM_thread(),
8059 "should be single-threaded");
8060 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8061 "bijection");
8062
8063 while (!_preserved_oop_stack.is_empty()) {
8064 oop p = _preserved_oop_stack.pop();
8065 assert(p->is_oop(), "Should be an oop");
8066 assert(_span.contains(p), "oop should be in _span");
8067 assert(p->mark() == markOopDesc::prototype(),
8068 "Set when taken from overflow list");
8069 markOop m = _preserved_mark_stack.pop();
8070 p->set_mark(m);
8071 }
8072 assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8073 "stacks were cleared above");
8074 }
8075
8076 #ifndef PRODUCT
8077 bool CMSCollector::no_preserved_marks() const {
8078 return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8079 }
8080 #endif
8081
8082 // Transfer some number of overflown objects to usual marking
8083 // stack. Return true if some objects were transferred.
8084 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8085 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
|
923 assert_lock_strong(freelistLock());
924
925 #ifndef PRODUCT
926 if (GenCollectedHeap::heap()->promotion_should_fail()) {
927 return NULL;
928 }
929 #endif // #ifndef PRODUCT
930
931 oop res = _cmsSpace->promote(obj, obj_size);
932 if (res == NULL) {
933 // expand and retry
934 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
935 expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
936 // Since this is the old generation, we don't try to promote
937 // into a more senior generation.
938 res = _cmsSpace->promote(obj, obj_size);
939 }
940 if (res != NULL) {
941 // See comment in allocate() about when objects should
942 // be allocated live.
943 assert(oopDesc::is_oop(obj), "Will dereference klass pointer below");
944 collector()->promoted(false, // Not parallel
945 (HeapWord*)res, obj->is_objArray(), obj_size);
946 // promotion counters
947 NOT_PRODUCT(
948 _numObjectsPromoted++;
949 _numWordsPromoted +=
950 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
951 )
952 }
953 return res;
954 }
955
956
957 // IMPORTANT: Notes on object size recognition in CMS.
958 // ---------------------------------------------------
959 // A block of storage in the CMS generation is always in
960 // one of three states. A free block (FREE), an allocated
961 // object (OBJECT) whose size() method reports the correct size,
962 // and an intermediate state (TRANSIENT) in which its size cannot
963 // be accurately determined.
1046
1047 if (UseCompressedClassPointers) {
1048 // Copy gap missed by (aligned) header size calculation below
1049 obj->set_klass_gap(old->klass_gap());
1050 }
1051 if (word_sz > (size_t)oopDesc::header_size()) {
1052 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1053 obj_ptr + oopDesc::header_size(),
1054 word_sz - oopDesc::header_size());
1055 }
1056
1057 // Now we can track the promoted object, if necessary. We take care
1058 // to delay the transition from uninitialized to full object
1059 // (i.e., insertion of klass pointer) until after, so that it
1060 // atomically becomes a promoted object.
1061 if (promoInfo->tracking()) {
1062 promoInfo->track((PromotedObject*)obj, old->klass());
1063 }
1064 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1065 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1066 assert(oopDesc::is_oop(old), "Will use and dereference old klass ptr below");
1067
1068 // Finally, install the klass pointer (this should be volatile).
1069 OrderAccess::storestore();
1070 obj->set_klass(old->klass());
1071 // We should now be able to calculate the right size for this object
1072 assert(oopDesc::is_oop(obj) && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1073
1074 collector()->promoted(true, // parallel
1075 obj_ptr, old->is_objArray(), word_sz);
1076
1077 NOT_PRODUCT(
1078 Atomic::inc_ptr(&_numObjectsPromoted);
1079 Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1080 )
1081
1082 return obj;
1083 }
1084
1085 void
1086 ConcurrentMarkSweepGeneration::
1087 par_promote_alloc_done(int thread_num) {
1088 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1089 ps->lab.retire(thread_num);
1090 }
1091
1092 void
3331 virtual void do_oop(narrowOop* p);
3332
3333 void trim_queue(size_t max);
3334 void handle_stack_overflow(HeapWord* lost);
3335 void do_yield_check() {
3336 if (_task->should_yield()) {
3337 _task->yield();
3338 }
3339 }
3340 };
3341
3342 DO_OOP_WORK_IMPL(ParConcMarkingClosure)
3343
3344 // Grey object scanning during work stealing phase --
3345 // the salient assumption here is that any references
3346 // that are in these stolen objects being scanned must
3347 // already have been initialized (else they would not have
3348 // been published), so we do not need to check for
3349 // uninitialized objects before pushing here.
3350 void ParConcMarkingClosure::do_oop(oop obj) {
3351 assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3352 HeapWord* addr = (HeapWord*)obj;
3353 // Check if oop points into the CMS generation
3354 // and is not marked
3355 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3356 // a white object ...
3357 // If we manage to "claim" the object, by being the
3358 // first thread to mark it, then we push it on our
3359 // marking stack
3360 if (_bit_map->par_mark(addr)) { // ... now grey
3361 // push on work queue (grey set)
3362 bool simulate_overflow = false;
3363 NOT_PRODUCT(
3364 if (CMSMarkStackOverflowALot &&
3365 _collector->simulate_overflow()) {
3366 // simulate a stack overflow
3367 simulate_overflow = true;
3368 }
3369 )
3370 if (simulate_overflow ||
3371 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3373 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
3374 // We cannot assert that the overflow stack is full because
3375 // it may have been emptied since.
3376 assert(simulate_overflow ||
3377 _work_queue->size() == _work_queue->max_elems(),
3378 "Else push should have succeeded");
3379 handle_stack_overflow(addr);
3380 }
3381 } // Else, some other thread got there first
3382 do_yield_check();
3383 }
3384 }
3385
3386 void ParConcMarkingClosure::do_oop(oop* p) { ParConcMarkingClosure::do_oop_work(p); }
3387 void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
3388
3389 void ParConcMarkingClosure::trim_queue(size_t max) {
3390 while (_work_queue->size() > max) {
3391 oop new_oop;
3392 if (_work_queue->pop_local(new_oop)) {
3393 assert(oopDesc::is_oop(new_oop), "Should be an oop");
3394 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3395 assert(_span.contains((HeapWord*)new_oop), "Not in span");
3396 new_oop->oop_iterate(this); // do_oop() above
3397 do_yield_check();
3398 }
3399 }
3400 }
3401
3402 // Upon stack overflow, we discard (part of) the stack,
3403 // remembering the least address amongst those discarded
3404 // in CMSCollector's _restart_address.
3405 void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3406 // We need to do this under a mutex to prevent other
3407 // workers from interfering with the work done below.
3408 MutexLockerEx ml(_overflow_stack->par_lock(),
3409 Mutex::_no_safepoint_check_flag);
3410 // Remember the least grey address discarded
3411 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3412 _collector->lower_restart_addr(ra);
3413 _overflow_stack->reset(); // discard stack contents
3414 _overflow_stack->expand(); // expand the stack if possible
3415 }
3416
3417
3418 void CMSConcMarkingTask::do_work_steal(int i) {
3419 OopTaskQueue* work_q = work_queue(i);
3420 oop obj_to_scan;
3421 CMSBitMap* bm = &(_collector->_markBitMap);
3422 CMSMarkStack* ovflw = &(_collector->_markStack);
3423 int* seed = _collector->hash_seed(i);
3424 ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3425 while (true) {
3426 cl.trim_queue(0);
3427 assert(work_q->size() == 0, "Should have been emptied above");
3428 if (get_work_from_overflow_stack(ovflw, work_q)) {
3429 // Can't assert below because the work obtained from the
3430 // overflow stack may already have been stolen from us.
3431 // assert(work_q->size() > 0, "Work from overflow stack");
3432 continue;
3433 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3434 assert(oopDesc::is_oop(obj_to_scan), "Should be an oop");
3435 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3436 obj_to_scan->oop_iterate(&cl);
3437 } else if (terminator()->offer_termination(&_term_term)) {
3438 assert(work_q->size() == 0, "Impossible!");
3439 break;
3440 } else if (yielding() || should_yield()) {
3441 yield();
3442 }
3443 }
3444 }
3445
3446 // This is run by the CMS (coordinator) thread.
3447 void CMSConcMarkingTask::coordinator_yield() {
3448 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3449 "CMS thread should hold CMS token");
3450 // First give up the locks, then yield, then re-lock
3451 // We should probably use a constructor/destructor idiom to
3452 // do this unlock/lock or modify the MutexUnlocker class to
3453 // serve our purpose. XXX
3454 assert_lock_strong(_bit_map_lock);
4505 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4506 start = space->bottom();
4507 end = space->top();
4508 } else if (nth_task == 0) {
4509 start = space->bottom();
4510 end = chunk_array[nth_task];
4511 } else if (nth_task < (uint)chunk_top) {
4512 assert(nth_task >= 1, "Control point invariant");
4513 start = chunk_array[nth_task - 1];
4514 end = chunk_array[nth_task];
4515 } else {
4516 assert(nth_task == (uint)chunk_top, "Control point invariant");
4517 start = chunk_array[chunk_top - 1];
4518 end = space->top();
4519 }
4520 MemRegion mr(start, end);
4521 // Verify that mr is in space
4522 assert(mr.is_empty() || space->used_region().contains(mr),
4523 "Should be in space");
4524 // Verify that "start" is an object boundary
4525 assert(mr.is_empty() || oopDesc::is_oop(oop(mr.start())),
4526 "Should be an oop");
4527 space->par_oop_iterate(mr, cl);
4528 }
4529 pst->all_tasks_completed();
4530 }
4531 }
4532
4533 void
4534 CMSParRemarkTask::do_dirty_card_rescan_tasks(
4535 CompactibleFreeListSpace* sp, int i,
4536 ParMarkRefsIntoAndScanClosure* cl) {
4537 // Until all tasks completed:
4538 // . claim an unclaimed task
4539 // . compute region boundaries corresponding to task claimed
4540 // . transfer dirty bits ct->mut for that region
4541 // . apply rescanclosure to dirty mut bits for that region
4542
4543 ResourceMark rm;
4544 HandleMark hm;
4545
4639 // only affects the number of attempts made to get work from the
4640 // overflow list and does not affect the number of workers. Just
4641 // pass ParallelGCThreads so this behavior is unchanged.
4642 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4643 work_q,
4644 ParallelGCThreads)) {
4645 // found something in global overflow list;
4646 // not yet ready to go stealing work from others.
4647 // We'd like to assert(work_q->size() != 0, ...)
4648 // because we just took work from the overflow list,
4649 // but of course we can't since all of that could have
4650 // been already stolen from us.
4651 // "He giveth and He taketh away."
4652 continue;
4653 }
4654 // Verify that we have no work before we resort to stealing
4655 assert(work_q->size() == 0, "Have work, shouldn't steal");
4656 // Try to steal from other queues that have work
4657 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4658 NOT_PRODUCT(num_steals++;)
4659 assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
4660 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4661 // Do scanning work
4662 obj_to_scan->oop_iterate(cl);
4663 // Loop around, finish this work, and try to steal some more
4664 } else if (terminator()->offer_termination()) {
4665 break; // nirvana from the infinite cycle
4666 }
4667 }
4668 log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
4669 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4670 "Else our work is not yet done");
4671 }
4672
4673 // Record object boundaries in _eden_chunk_array by sampling the eden
4674 // top in the slow-path eden object allocation code path and record
4675 // the boundaries, if CMSEdenChunksRecordAlways is true. If
4676 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
4677 // sampling in sample_eden() that activates during the part of the
4678 // preclean phase.
4679 void CMSCollector::sample_eden_chunk() {
5118 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5119 // only affects the number of attempts made to get work from the
5120 // overflow list and does not affect the number of workers. Just
5121 // pass ParallelGCThreads so this behavior is unchanged.
5122 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5123 work_q,
5124 ParallelGCThreads)) {
5125 // Found something in global overflow list;
5126 // not yet ready to go stealing work from others.
5127 // We'd like to assert(work_q->size() != 0, ...)
5128 // because we just took work from the overflow list,
5129 // but of course we can't, since all of that might have
5130 // been already stolen from us.
5131 continue;
5132 }
5133 // Verify that we have no work before we resort to stealing
5134 assert(work_q->size() == 0, "Have work, shouldn't steal");
5135 // Try to steal from other queues that have work
5136 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5137 NOT_PRODUCT(num_steals++;)
5138 assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
5139 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5140 // Do scanning work
5141 obj_to_scan->oop_iterate(keep_alive);
5142 // Loop around, finish this work, and try to steal some more
5143 } else if (terminator()->offer_termination()) {
5144 break; // nirvana from the infinite cycle
5145 }
5146 }
5147 log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
5148 }
5149
5150 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5151 {
5152 GenCollectedHeap* gch = GenCollectedHeap::heap();
5153 WorkGang* workers = gch->workers();
5154 assert(workers != NULL, "Need parallel worker threads.");
5155 CMSRefProcTaskProxy rp_task(task, &_collector,
5156 _collector.ref_processor()->span(),
5157 _collector.markBitMap(),
5158 workers, _collector.task_queues());
5808 // XXX: there seems to be a lot of code duplication here;
5809 // should refactor and consolidate common code.
5810
5811 // This closure is used to mark refs into the CMS generation in
5812 // the CMS bit map. Called at the first checkpoint. This closure
5813 // assumes that we do not need to re-mark dirty cards; if the CMS
5814 // generation on which this is used is not an oldest
5815 // generation then this will lose younger_gen cards!
5816
5817 MarkRefsIntoClosure::MarkRefsIntoClosure(
5818 MemRegion span, CMSBitMap* bitMap):
5819 _span(span),
5820 _bitMap(bitMap)
5821 {
5822 assert(ref_processor() == NULL, "deliberately left NULL");
5823 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5824 }
5825
5826 void MarkRefsIntoClosure::do_oop(oop obj) {
5827 // if p points into _span, then mark corresponding bit in _markBitMap
5828 assert(oopDesc::is_oop(obj), "expected an oop");
5829 HeapWord* addr = (HeapWord*)obj;
5830 if (_span.contains(addr)) {
5831 // this should be made more efficient
5832 _bitMap->mark(addr);
5833 }
5834 }
5835
5836 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
5837 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
5838
5839 ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
5840 MemRegion span, CMSBitMap* bitMap):
5841 _span(span),
5842 _bitMap(bitMap)
5843 {
5844 assert(ref_processor() == NULL, "deliberately left NULL");
5845 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5846 }
5847
5848 void ParMarkRefsIntoClosure::do_oop(oop obj) {
5849 // if p points into _span, then mark corresponding bit in _markBitMap
5850 assert(oopDesc::is_oop(obj), "expected an oop");
5851 HeapWord* addr = (HeapWord*)obj;
5852 if (_span.contains(addr)) {
5853 // this should be made more efficient
5854 _bitMap->par_mark(addr);
5855 }
5856 }
5857
5858 void ParMarkRefsIntoClosure::do_oop(oop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
5859 void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
5860
5861 // A variant of the above, used for CMS marking verification.
5862 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5863 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5864 _span(span),
5865 _verification_bm(verification_bm),
5866 _cms_bm(cms_bm)
5867 {
5868 assert(ref_processor() == NULL, "deliberately left NULL");
5869 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5870 }
5871
5872 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5873 // if p points into _span, then mark corresponding bit in _markBitMap
5874 assert(oopDesc::is_oop(obj), "expected an oop");
5875 HeapWord* addr = (HeapWord*)obj;
5876 if (_span.contains(addr)) {
5877 _verification_bm->mark(addr);
5878 if (!_cms_bm->isMarked(addr)) {
5879 Log(gc, verify) log;
5880 ResourceMark rm;
5881 LogStream ls(log.error());
5882 oop(addr)->print_on(&ls);
5883 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5884 fatal("... aborting");
5885 }
5886 }
5887 }
5888
5889 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5890 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5891
5892 //////////////////////////////////////////////////
5893 // MarkRefsIntoAndScanClosure
5894 //////////////////////////////////////////////////
5908 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
5909 mark_stack, concurrent_precleaning),
5910 _yield(should_yield),
5911 _concurrent_precleaning(concurrent_precleaning),
5912 _freelistLock(NULL)
5913 {
5914 // FIXME: Should initialize in base class constructor.
5915 assert(rp != NULL, "ref_processor shouldn't be NULL");
5916 set_ref_processor_internal(rp);
5917 }
5918
5919 // This closure is used to mark refs into the CMS generation at the
5920 // second (final) checkpoint, and to scan and transitively follow
5921 // the unmarked oops. It is also used during the concurrent precleaning
5922 // phase while scanning objects on dirty cards in the CMS generation.
5923 // The marks are made in the marking bit map and the marking stack is
5924 // used for keeping the (newly) grey objects during the scan.
5925 // The parallel version (Par_...) appears further below.
5926 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
5927 if (obj != NULL) {
5928 assert(oopDesc::is_oop(obj), "expected an oop");
5929 HeapWord* addr = (HeapWord*)obj;
5930 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
5931 assert(_collector->overflow_list_is_empty(),
5932 "overflow list should be empty");
5933 if (_span.contains(addr) &&
5934 !_bit_map->isMarked(addr)) {
5935 // mark bit map (object is now grey)
5936 _bit_map->mark(addr);
5937 // push on marking stack (stack should be empty), and drain the
5938 // stack by applying this closure to the oops in the oops popped
5939 // from the stack (i.e. blacken the grey objects)
5940 bool res = _mark_stack->push(obj);
5941 assert(res, "Should have space to push on empty stack");
5942 do {
5943 oop new_oop = _mark_stack->pop();
5944 assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
5945 assert(_bit_map->isMarked((HeapWord*)new_oop),
5946 "only grey objects on this stack");
5947 // iterate over the oops in this oop, marking and pushing
5948 // the ones in CMS heap (i.e. in _span).
5949 new_oop->oop_iterate(&_pushAndMarkClosure);
5950 // check if it's time to yield
5951 do_yield_check();
5952 } while (!_mark_stack->isEmpty() ||
5953 (!_concurrent_precleaning && take_from_overflow_list()));
5954 // if marking stack is empty, and we are not doing this
5955 // during precleaning, then check the overflow list
5956 }
5957 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
5958 assert(_collector->overflow_list_is_empty(),
5959 "overflow list was drained above");
5960
5961 assert(_collector->no_preserved_marks(),
5962 "All preserved marks should have been restored above");
5963 }
5964 }
6006 _low_water_mark(MIN2((work_queue->max_elems()/4),
6007 ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6008 _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6009 {
6010 // FIXME: Should initialize in base class constructor.
6011 assert(rp != NULL, "ref_processor shouldn't be NULL");
6012 set_ref_processor_internal(rp);
6013 }
6014
6015 // This closure is used to mark refs into the CMS generation at the
6016 // second (final) checkpoint, and to scan and transitively follow
6017 // the unmarked oops. The marks are made in the marking bit map and
6018 // the work_queue is used for keeping the (newly) grey objects during
6019 // the scan phase whence they are also available for stealing by parallel
6020 // threads. Since the marking bit map is shared, updates are
6021 // synchronized (via CAS).
6022 void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
6023 if (obj != NULL) {
6024 // Ignore mark word because this could be an already marked oop
6025 // that may be chained at the end of the overflow list.
6026 assert(oopDesc::is_oop(obj, true), "expected an oop");
6027 HeapWord* addr = (HeapWord*)obj;
6028 if (_span.contains(addr) &&
6029 !_bit_map->isMarked(addr)) {
6030 // mark bit map (object will become grey):
6031 // It is possible for several threads to be
6032 // trying to "claim" this object concurrently;
6033 // the unique thread that succeeds in marking the
6034 // object first will do the subsequent push on
6035 // to the work queue (or overflow list).
6036 if (_bit_map->par_mark(addr)) {
6037 // push on work_queue (which may not be empty), and trim the
6038 // queue to an appropriate length by applying this closure to
6039 // the oops in the oops popped from the stack (i.e. blacken the
6040 // grey objects)
6041 bool res = _work_queue->push(obj);
6042 assert(res, "Low water mark should be less than capacity?");
6043 trim_queue(_low_water_mark);
6044 } // Else, another thread claimed the object
6045 }
6046 }
6052 // This closure is used to rescan the marked objects on the dirty cards
6053 // in the mod union table and the card table proper.
6054 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6055 oop p, MemRegion mr) {
6056
6057 size_t size = 0;
6058 HeapWord* addr = (HeapWord*)p;
6059 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6060 assert(_span.contains(addr), "we are scanning the CMS generation");
6061 // check if it's time to yield
6062 if (do_yield_check()) {
6063 // We yielded for some foreground stop-world work,
6064 // and we have been asked to abort this ongoing preclean cycle.
6065 return 0;
6066 }
6067 if (_bitMap->isMarked(addr)) {
6068 // it's marked; is it potentially uninitialized?
6069 if (p->klass_or_null_acquire() != NULL) {
6070 // an initialized object; ignore mark word in verification below
6071 // since we are running concurrent with mutators
6072 assert(oopDesc::is_oop(p, true), "should be an oop");
6073 if (p->is_objArray()) {
6074 // objArrays are precisely marked; restrict scanning
6075 // to dirty cards only.
6076 size = CompactibleFreeListSpace::adjustObjectSize(
6077 p->oop_iterate_size(_scanningClosure, mr));
6078 } else {
6079 // A non-array may have been imprecisely marked; we need
6080 // to scan object in its entirety.
6081 size = CompactibleFreeListSpace::adjustObjectSize(
6082 p->oop_iterate_size(_scanningClosure));
6083 }
6084 #ifdef ASSERT
6085 size_t direct_size =
6086 CompactibleFreeListSpace::adjustObjectSize(p->size());
6087 assert(size == direct_size, "Inconsistency in size");
6088 assert(size >= 3, "Necessary for Printezis marks to work");
6089 HeapWord* start_pbit = addr + 1;
6090 HeapWord* end_pbit = addr + size - 1;
6091 assert(_bitMap->isMarked(start_pbit) == _bitMap->isMarked(end_pbit),
6092 "inconsistent Printezis mark");
6101 } else {
6102 // An uninitialized object.
6103 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6104 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6105 size = pointer_delta(nextOneAddr + 1, addr);
6106 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6107 "alignment problem");
6108 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6109 // will dirty the card when the klass pointer is installed in the
6110 // object (signaling the completion of initialization).
6111 }
6112 } else {
6113 // Either a not yet marked object or an uninitialized object
6114 if (p->klass_or_null_acquire() == NULL) {
6115 // An uninitialized object, skip to the next card, since
6116 // we may not be able to read its P-bits yet.
6117 assert(size == 0, "Initial value");
6118 } else {
6119 // An object not (yet) reached by marking: we merely need to
6120 // compute its size so as to go look at the next block.
6121 assert(oopDesc::is_oop(p, true), "should be an oop");
6122 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6123 }
6124 }
6125 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6126 return size;
6127 }
6128
6129 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6130 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6131 "CMS thread should hold CMS token");
6132 assert_lock_strong(_freelistLock);
6133 assert_lock_strong(_bitMap->lock());
6134 // relinquish the free_list_lock and bitMaplock()
6135 _bitMap->lock()->unlock();
6136 _freelistLock->unlock();
6137 ConcurrentMarkSweepThread::desynchronize(true);
6138 _collector->stopTimer();
6139 _collector->incrementYields();
6140
6141 // See the comment in coordinator_yield()
6148 ConcurrentMarkSweepThread::synchronize(true);
6149 _freelistLock->lock_without_safepoint_check();
6150 _bitMap->lock()->lock_without_safepoint_check();
6151 _collector->startTimer();
6152 }
6153
6154
6155 //////////////////////////////////////////////////////////////////
6156 // SurvivorSpacePrecleanClosure
6157 //////////////////////////////////////////////////////////////////
6158 // This (single-threaded) closure is used to preclean the oops in
6159 // the survivor spaces.
6160 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6161
6162 HeapWord* addr = (HeapWord*)p;
6163 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6164 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6165 assert(p->klass_or_null() != NULL, "object should be initialized");
6166 // an initialized object; ignore mark word in verification below
6167 // since we are running concurrent with mutators
6168 assert(oopDesc::is_oop(p, true), "should be an oop");
6169 // Note that we do not yield while we iterate over
6170 // the interior oops of p, pushing the relevant ones
6171 // on our marking stack.
6172 size_t size = p->oop_iterate_size(_scanning_closure);
6173 do_yield_check();
6174 // Observe that below, we do not abandon the preclean
6175 // phase as soon as we should; rather we empty the
6176 // marking stack before returning. This is to satisfy
6177 // some existing assertions. In general, it may be a
6178 // good idea to abort immediately and complete the marking
6179 // from the grey objects at a later time.
6180 while (!_mark_stack->isEmpty()) {
6181 oop new_oop = _mark_stack->pop();
6182 assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
6183 assert(_bit_map->isMarked((HeapWord*)new_oop),
6184 "only grey objects on this stack");
6185 // iterate over the oops in this oop, marking and pushing
6186 // the ones in CMS heap (i.e. in _span).
6187 new_oop->oop_iterate(_scanning_closure);
6188 // check if it's time to yield
6189 do_yield_check();
6190 }
6191 unsigned int after_count =
6192 GenCollectedHeap::heap()->total_collections();
6193 bool abort = (_before_count != after_count) ||
6194 _collector->should_abort_preclean();
6195 return abort ? 0 : size;
6196 }
6197
6198 void SurvivorSpacePrecleanClosure::do_yield_work() {
6199 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6200 "CMS thread should hold CMS token");
6201 assert_lock_strong(_bit_map->lock());
6202 // Relinquish the bit map lock
6206 _collector->incrementYields();
6207
6208 // See the comment in coordinator_yield()
6209 for (unsigned i = 0; i < CMSYieldSleepCount &&
6210 ConcurrentMarkSweepThread::should_yield() &&
6211 !CMSCollector::foregroundGCIsActive(); ++i) {
6212 os::sleep(Thread::current(), 1, false);
6213 }
6214
6215 ConcurrentMarkSweepThread::synchronize(true);
6216 _bit_map->lock()->lock_without_safepoint_check();
6217 _collector->startTimer();
6218 }
6219
6220 // This closure is used to rescan the marked objects on the dirty cards
6221 // in the mod union table and the card table proper. In the parallel
6222 // case, although the bitMap is shared, we do a single read so the
6223 // isMarked() query is "safe".
6224 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6225 // Ignore mark word because we are running concurrent with mutators
6226 assert(oopDesc::is_oop_or_null(p, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6227 HeapWord* addr = (HeapWord*)p;
6228 assert(_span.contains(addr), "we are scanning the CMS generation");
6229 bool is_obj_array = false;
6230 #ifdef ASSERT
6231 if (!_parallel) {
6232 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6233 assert(_collector->overflow_list_is_empty(),
6234 "overflow list should be empty");
6235
6236 }
6237 #endif // ASSERT
6238 if (_bit_map->isMarked(addr)) {
6239 // Obj arrays are precisely marked, non-arrays are not;
6240 // so we scan objArrays precisely and non-arrays in their
6241 // entirety.
6242 if (p->is_objArray()) {
6243 is_obj_array = true;
6244 if (_parallel) {
6245 p->oop_iterate(_par_scan_closure, mr);
6246 } else {
6359 // See the comment in coordinator_yield()
6360 for (unsigned i = 0; i < CMSYieldSleepCount &&
6361 ConcurrentMarkSweepThread::should_yield() &&
6362 !CMSCollector::foregroundGCIsActive(); ++i) {
6363 os::sleep(Thread::current(), 1, false);
6364 }
6365
6366 ConcurrentMarkSweepThread::synchronize(true);
6367 _bitMap->lock()->lock_without_safepoint_check();
6368 _collector->startTimer();
6369 }
6370
6371 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6372 assert(_bitMap->isMarked(ptr), "expected bit to be set");
6373 assert(_markStack->isEmpty(),
6374 "should drain stack to limit stack usage");
6375 // convert ptr to an oop preparatory to scanning
6376 oop obj = oop(ptr);
6377 // Ignore mark word in verification below, since we
6378 // may be running concurrent with mutators.
6379 assert(oopDesc::is_oop(obj, true), "should be an oop");
6380 assert(_finger <= ptr, "_finger runneth ahead");
6381 // advance the finger to right end of this object
6382 _finger = ptr + obj->size();
6383 assert(_finger > ptr, "we just incremented it above");
6384 // On large heaps, it may take us some time to get through
6385 // the marking phase. During
6386 // this time it's possible that a lot of mutations have
6387 // accumulated in the card table and the mod union table --
6388 // these mutation records are redundant until we have
6389 // actually traced into the corresponding card.
6390 // Here, we check whether advancing the finger would make
6391 // us cross into a new card, and if so clear corresponding
6392 // cards in the MUT (preclean them in the card-table in the
6393 // future).
6394
6395 DEBUG_ONLY(if (!_verifying) {)
6396 // The clean-on-enter optimization is disabled by default,
6397 // until we fix 6178663.
6398 if (CMSCleanOnEnter && (_finger > _threshold)) {
6399 // [_threshold, _finger) represents the interval
6406 assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
6407 "_threshold should always be card-aligned");
6408 _threshold = align_up(_finger, CardTableModRefBS::card_size);
6409 MemRegion mr(old_threshold, _threshold);
6410 assert(!mr.is_empty(), "Control point invariant");
6411 assert(_span.contains(mr), "Should clear within span");
6412 _mut->clear_range(mr);
6413 }
6414 DEBUG_ONLY(})
6415 // Note: the finger doesn't advance while we drain
6416 // the stack below.
6417 PushOrMarkClosure pushOrMarkClosure(_collector,
6418 _span, _bitMap, _markStack,
6419 _finger, this);
6420 bool res = _markStack->push(obj);
6421 assert(res, "Empty non-zero size stack should have space for single push");
6422 while (!_markStack->isEmpty()) {
6423 oop new_oop = _markStack->pop();
6424 // Skip verifying header mark word below because we are
6425 // running concurrent with mutators.
6426 assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop");
6427 // now scan this oop's oops
6428 new_oop->oop_iterate(&pushOrMarkClosure);
6429 do_yield_check();
6430 }
6431 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6432 }
6433
6434 ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
6435 CMSCollector* collector, MemRegion span,
6436 CMSBitMap* bit_map,
6437 OopTaskQueue* work_queue,
6438 CMSMarkStack* overflow_stack):
6439 _collector(collector),
6440 _whole_span(collector->_span),
6441 _span(span),
6442 _bit_map(bit_map),
6443 _mut(&collector->_modUnionTable),
6444 _work_queue(work_queue),
6445 _overflow_stack(overflow_stack),
6446 _skip_bits(0),
6472 if (p->klass_or_null_acquire() == NULL) {
6473 // in the case of Clean-on-Enter optimization, redirty card
6474 // and avoid clearing card by increasing the threshold.
6475 return true;
6476 }
6477 }
6478 scan_oops_in_oop(addr);
6479 return true;
6480 }
6481
6482 void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6483 assert(_bit_map->isMarked(ptr), "expected bit to be set");
6484 // Should we assert that our work queue is empty or
6485 // below some drain limit?
6486 assert(_work_queue->size() == 0,
6487 "should drain stack to limit stack usage");
6488 // convert ptr to an oop preparatory to scanning
6489 oop obj = oop(ptr);
6490 // Ignore mark word in verification below, since we
6491 // may be running concurrent with mutators.
6492 assert(oopDesc::is_oop(obj, true), "should be an oop");
6493 assert(_finger <= ptr, "_finger runneth ahead");
6494 // advance the finger to right end of this object
6495 _finger = ptr + obj->size();
6496 assert(_finger > ptr, "we just incremented it above");
6497 // On large heaps, it may take us some time to get through
6498 // the marking phase. During
6499 // this time it's possible that a lot of mutations have
6500 // accumulated in the card table and the mod union table --
6501 // these mutation records are redundant until we have
6502 // actually traced into the corresponding card.
6503 // Here, we check whether advancing the finger would make
6504 // us cross into a new card, and if so clear corresponding
6505 // cards in the MUT (preclean them in the card-table in the
6506 // future).
6507
6508 // The clean-on-enter optimization is disabled by default,
6509 // until we fix 6178663.
6510 if (CMSCleanOnEnter && (_finger > _threshold)) {
6511 // [_threshold, _finger) represents the interval
6512 // of cards to be cleared in MUT (or precleaned in card table).
6533 _overflow_stack,
6534 _finger,
6535 gfa, this);
6536 bool res = _work_queue->push(obj); // overflow could occur here
6537 assert(res, "Will hold once we use workqueues");
6538 while (true) {
6539 oop new_oop;
6540 if (!_work_queue->pop_local(new_oop)) {
6541 // We emptied our work_queue; check if there's stuff that can
6542 // be gotten from the overflow stack.
6543 if (CMSConcMarkingTask::get_work_from_overflow_stack(
6544 _overflow_stack, _work_queue)) {
6545 do_yield_check();
6546 continue;
6547 } else { // done
6548 break;
6549 }
6550 }
6551 // Skip verifying header mark word below because we are
6552 // running concurrent with mutators.
6553 assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop");
6554 // now scan this oop's oops
6555 new_oop->oop_iterate(&pushOrMarkClosure);
6556 do_yield_check();
6557 }
6558 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6559 }
6560
6561 // Yield in response to a request from VM Thread or
6562 // from mutators.
6563 void ParMarkFromRootsClosure::do_yield_work() {
6564 assert(_task != NULL, "sanity");
6565 _task->yield();
6566 }
6567
6568 // A variant of the above used for verifying CMS marking work.
6569 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6570 MemRegion span,
6571 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6572 CMSMarkStack* mark_stack):
6573 _collector(collector),
6587 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6588 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6589 assert(_span.contains(addr), "Out of bounds _finger?");
6590 _finger = addr;
6591 }
6592
6593 // Should revisit to see if this should be restructured for
6594 // greater efficiency.
6595 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6596 // convert offset into a HeapWord*
6597 HeapWord* addr = _verification_bm->startWord() + offset;
6598 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6599 "address out of range");
6600 assert(_verification_bm->isMarked(addr), "tautology");
6601 assert(_cms_bm->isMarked(addr), "tautology");
6602
6603 assert(_mark_stack->isEmpty(),
6604 "should drain stack to limit stack usage");
6605 // convert addr to an oop preparatory to scanning
6606 oop obj = oop(addr);
6607 assert(oopDesc::is_oop(obj), "should be an oop");
6608 assert(_finger <= addr, "_finger runneth ahead");
6609 // advance the finger to right end of this object
6610 _finger = addr + obj->size();
6611 assert(_finger > addr, "we just incremented it above");
6612 // Note: the finger doesn't advance while we drain
6613 // the stack below.
6614 bool res = _mark_stack->push(obj);
6615 assert(res, "Empty non-zero size stack should have space for single push");
6616 while (!_mark_stack->isEmpty()) {
6617 oop new_oop = _mark_stack->pop();
6618 assert(oopDesc::is_oop(new_oop), "Oops! expected to pop an oop");
6619 // now scan this oop's oops
6620 new_oop->oop_iterate(&_pam_verify_closure);
6621 }
6622 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6623 return true;
6624 }
6625
6626 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6627 CMSCollector* collector, MemRegion span,
6628 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6629 CMSMarkStack* mark_stack):
6630 MetadataAwareOopClosure(collector->ref_processor()),
6631 _collector(collector),
6632 _span(span),
6633 _verification_bm(verification_bm),
6634 _cms_bm(cms_bm),
6635 _mark_stack(mark_stack)
6636 { }
6637
6638 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6639 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6640
6641 // Upon stack overflow, we discard (part of) the stack,
6642 // remembering the least address amongst those discarded
6643 // in CMSCollector's _restart_address.
6644 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6645 // Remember the least grey address discarded
6646 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6647 _collector->lower_restart_addr(ra);
6648 _mark_stack->reset(); // discard stack contents
6649 _mark_stack->expand(); // expand the stack if possible
6650 }
6651
6652 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6653 assert(oopDesc::is_oop_or_null(obj), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6654 HeapWord* addr = (HeapWord*)obj;
6655 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6656 // Oop lies in _span and isn't yet grey or black
6657 _verification_bm->mark(addr); // now grey
6658 if (!_cms_bm->isMarked(addr)) {
6659 Log(gc, verify) log;
6660 ResourceMark rm;
6661 LogStream ls(log.error());
6662 oop(addr)->print_on(&ls);
6663 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6664 fatal("... aborting");
6665 }
6666
6667 if (!_mark_stack->push(obj)) { // stack overflow
6668 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
6669 assert(_mark_stack->isFull(), "Else push should have succeeded");
6670 handle_stack_overflow(addr);
6671 }
6672 // anything including and to the right of _finger
6673 // will be scanned as we iterate over the remainder of the
6730 _markStack->expand(); // expand the stack if possible
6731 }
6732
6733 // Upon stack overflow, we discard (part of) the stack,
6734 // remembering the least address amongst those discarded
6735 // in CMSCollector's _restart_address.
6736 void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6737 // We need to do this under a mutex to prevent other
6738 // workers from interfering with the work done below.
6739 MutexLockerEx ml(_overflow_stack->par_lock(),
6740 Mutex::_no_safepoint_check_flag);
6741 // Remember the least grey address discarded
6742 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6743 _collector->lower_restart_addr(ra);
6744 _overflow_stack->reset(); // discard stack contents
6745 _overflow_stack->expand(); // expand the stack if possible
6746 }
6747
6748 void PushOrMarkClosure::do_oop(oop obj) {
6749 // Ignore mark word because we are running concurrent with mutators.
6750 assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6751 HeapWord* addr = (HeapWord*)obj;
6752 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6753 // Oop lies in _span and isn't yet grey or black
6754 _bitMap->mark(addr); // now grey
6755 if (addr < _finger) {
6756 // the bit map iteration has already either passed, or
6757 // sampled, this bit in the bit map; we'll need to
6758 // use the marking stack to scan this oop's oops.
6759 bool simulate_overflow = false;
6760 NOT_PRODUCT(
6761 if (CMSMarkStackOverflowALot &&
6762 _collector->simulate_overflow()) {
6763 // simulate a stack overflow
6764 simulate_overflow = true;
6765 }
6766 )
6767 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6768 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
6769 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6770 handle_stack_overflow(addr);
6771 }
6772 }
6773 // anything including and to the right of _finger
6774 // will be scanned as we iterate over the remainder of the
6775 // bit map
6776 do_yield_check();
6777 }
6778 }
6779
6780 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
6781 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
6782
6783 void ParPushOrMarkClosure::do_oop(oop obj) {
6784 // Ignore mark word because we are running concurrent with mutators.
6785 assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6786 HeapWord* addr = (HeapWord*)obj;
6787 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
6788 // Oop lies in _span and isn't yet grey or black
6789 // We read the global_finger (volatile read) strictly after marking oop
6790 bool res = _bit_map->par_mark(addr); // now grey
6791 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
6792 // Should we push this marked oop on our stack?
6793 // -- if someone else marked it, nothing to do
6794 // -- if target oop is above global finger nothing to do
6795 // -- if target oop is in chunk and above local finger
6796 // then nothing to do
6797 // -- else push on work queue
6798 if ( !res // someone else marked it, they will deal with it
6799 || (addr >= *gfa) // will be scanned in a later task
6800 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
6801 return;
6802 }
6803 // the bit map iteration has already either passed, or
6804 // sampled, this bit in the bit map; we'll need to
6805 // use the marking stack to scan this oop's oops.
6838 bool concurrent_precleaning):
6839 MetadataAwareOopClosure(rp),
6840 _collector(collector),
6841 _span(span),
6842 _bit_map(bit_map),
6843 _mod_union_table(mod_union_table),
6844 _mark_stack(mark_stack),
6845 _concurrent_precleaning(concurrent_precleaning)
6846 {
6847 assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6848 }
6849
6850 // Grey object rescan during pre-cleaning and second checkpoint phases --
6851 // the non-parallel version (the parallel version appears further below.)
6852 void PushAndMarkClosure::do_oop(oop obj) {
6853 // Ignore mark word verification. If during concurrent precleaning,
6854 // the object monitor may be locked. If during the checkpoint
6855 // phases, the object may already have been reached by a different
6856 // path and may be at the end of the global overflow list (so
6857 // the mark word may be NULL).
6858 assert(oopDesc::is_oop_or_null(obj, true /* ignore mark word */),
6859 "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6860 HeapWord* addr = (HeapWord*)obj;
6861 // Check if oop points into the CMS generation
6862 // and is not marked
6863 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6864 // a white object ...
6865 _bit_map->mark(addr); // ... now grey
6866 // push on the marking stack (grey set)
6867 bool simulate_overflow = false;
6868 NOT_PRODUCT(
6869 if (CMSMarkStackOverflowALot &&
6870 _collector->simulate_overflow()) {
6871 // simulate a stack overflow
6872 simulate_overflow = true;
6873 }
6874 )
6875 if (simulate_overflow || !_mark_stack->push(obj)) {
6876 if (_concurrent_precleaning) {
6877 // During precleaning we can just dirty the appropriate card(s)
6878 // in the mod union table, thus ensuring that the object remains
6917 assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6918 }
6919
6920 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
6921 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
6922
6923 // Grey object rescan during second checkpoint phase --
6924 // the parallel version.
6925 void ParPushAndMarkClosure::do_oop(oop obj) {
6926 // In the assert below, we ignore the mark word because
6927 // this oop may point to an already visited object that is
6928 // on the overflow stack (in which case the mark word has
6929 // been hijacked for chaining into the overflow stack --
6930 // if this is the last object in the overflow stack then
6931 // its mark word will be NULL). Because this object may
6932 // have been subsequently popped off the global overflow
6933 // stack, and the mark word possibly restored to the prototypical
6934 // value, by the time we get to examined this failing assert in
6935 // the debugger, is_oop_or_null(false) may subsequently start
6936 // to hold.
6937 assert(oopDesc::is_oop_or_null(obj, true),
6938 "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6939 HeapWord* addr = (HeapWord*)obj;
6940 // Check if oop points into the CMS generation
6941 // and is not marked
6942 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6943 // a white object ...
6944 // If we manage to "claim" the object, by being the
6945 // first thread to mark it, then we push it on our
6946 // marking stack
6947 if (_bit_map->par_mark(addr)) { // ... now grey
6948 // push on work queue (grey set)
6949 bool simulate_overflow = false;
6950 NOT_PRODUCT(
6951 if (CMSMarkStackOverflowALot &&
6952 _collector->par_simulate_overflow()) {
6953 // simulate a stack overflow
6954 simulate_overflow = true;
6955 }
6956 )
6957 if (simulate_overflow || !_work_queue->push(obj)) {
7308 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7309 do_post_free_or_garbage_chunk(fc, size);
7310
7311 assert(_limit >= addr + size,
7312 "A freshly garbage chunk can't possibly straddle over _limit");
7313 if (inFreeRange()) lookahead_and_flush(fc, size);
7314 return size;
7315 }
7316
7317 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7318 HeapWord* addr = (HeapWord*) fc;
7319 // The sweeper has just found a live object. Return any accumulated
7320 // left hand chunk to the free lists.
7321 if (inFreeRange()) {
7322 assert(freeFinger() < addr, "freeFinger points too high");
7323 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7324 }
7325
7326 // This object is live: we'd normally expect this to be
7327 // an oop, and like to assert the following:
7328 // assert(oopDesc::is_oop(oop(addr)), "live block should be an oop");
7329 // However, as we commented above, this may be an object whose
7330 // header hasn't yet been initialized.
7331 size_t size;
7332 assert(_bitMap->isMarked(addr), "Tautology for this control point");
7333 if (_bitMap->isMarked(addr + 1)) {
7334 // Determine the size from the bit map, rather than trying to
7335 // compute it from the object header.
7336 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7337 size = pointer_delta(nextOneAddr + 1, addr);
7338 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7339 "alignment problem");
7340
7341 #ifdef ASSERT
7342 if (oop(addr)->klass_or_null_acquire() != NULL) {
7343 // Ignore mark word because we are running concurrent with mutators
7344 assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop");
7345 assert(size ==
7346 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7347 "P-mark and computed size do not agree");
7348 }
7349 #endif
7350
7351 } else {
7352 // This should be an initialized object that's alive.
7353 assert(oop(addr)->klass_or_null_acquire() != NULL,
7354 "Should be an initialized object");
7355 // Ignore mark word because we are running concurrent with mutators
7356 assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop");
7357 // Verify that the bit map has no bits marked between
7358 // addr and purported end of this block.
7359 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7360 assert(size >= 3, "Necessary for Printezis marks to work");
7361 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7362 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7363 }
7364 return size;
7365 }
7366
7367 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7368 size_t chunkSize) {
7369 // do_post_free_or_garbage_chunk() should only be called in the case
7370 // of the adaptive free list allocator.
7371 const bool fcInFreeLists = fc->is_free();
7372 assert((HeapWord*)fc <= _limit, "sweep invariant");
7373 if (CMSTestInFreeList && fcInFreeLists) {
7374 assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7375 }
7376
7644 // In general, during recursive tracing, several threads
7645 // may be concurrently getting here; the first one to
7646 // "tag" it, claims it.
7647 if (_bit_map->par_mark(addr)) {
7648 bool res = _work_queue->push(obj);
7649 assert(res, "Low water mark should be much less than capacity");
7650 // Do a recursive trim in the hope that this will keep
7651 // stack usage lower, but leave some oops for potential stealers
7652 trim_queue(_low_water_mark);
7653 } // Else, another thread got there first
7654 }
7655 }
7656
7657 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
7658 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
7659
7660 void CMSParKeepAliveClosure::trim_queue(uint max) {
7661 while (_work_queue->size() > max) {
7662 oop new_oop;
7663 if (_work_queue->pop_local(new_oop)) {
7664 assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
7665 assert(_bit_map->isMarked((HeapWord*)new_oop),
7666 "no white objects on this stack!");
7667 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7668 // iterate over the oops in this oop, marking and pushing
7669 // the ones in CMS heap (i.e. in _span).
7670 new_oop->oop_iterate(&_mark_and_push);
7671 }
7672 }
7673 }
7674
7675 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
7676 CMSCollector* collector,
7677 MemRegion span, CMSBitMap* bit_map,
7678 OopTaskQueue* work_queue):
7679 _collector(collector),
7680 _span(span),
7681 _bit_map(bit_map),
7682 _work_queue(work_queue) { }
7683
7684 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
7724 return "Par Spooling Space";
7725 case _adaptive_size_policy:
7726 return "Ergonomics";
7727 default:
7728 return "unknown";
7729 }
7730 }
7731
7732 void CMSDrainMarkingStackClosure::do_void() {
7733 // the max number to take from overflow list at a time
7734 const size_t num = _mark_stack->capacity()/4;
7735 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
7736 "Overflow list should be NULL during concurrent phases");
7737 while (!_mark_stack->isEmpty() ||
7738 // if stack is empty, check the overflow list
7739 _collector->take_from_overflow_list(num, _mark_stack)) {
7740 oop obj = _mark_stack->pop();
7741 HeapWord* addr = (HeapWord*)obj;
7742 assert(_span.contains(addr), "Should be within span");
7743 assert(_bit_map->isMarked(addr), "Should be marked");
7744 assert(oopDesc::is_oop(obj), "Should be an oop");
7745 obj->oop_iterate(_keep_alive);
7746 }
7747 }
7748
7749 void CMSParDrainMarkingStackClosure::do_void() {
7750 // drain queue
7751 trim_queue(0);
7752 }
7753
7754 // Trim our work_queue so its length is below max at return
7755 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
7756 while (_work_queue->size() > max) {
7757 oop new_oop;
7758 if (_work_queue->pop_local(new_oop)) {
7759 assert(oopDesc::is_oop(new_oop), "Expected an oop");
7760 assert(_bit_map->isMarked((HeapWord*)new_oop),
7761 "no white objects on this stack!");
7762 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7763 // iterate over the oops in this oop, marking and pushing
7764 // the ones in CMS heap (i.e. in _span).
7765 new_oop->oop_iterate(&_mark_and_push);
7766 }
7767 }
7768 }
7769
7770 ////////////////////////////////////////////////////////////////////
7771 // Support for Marking Stack Overflow list handling and related code
7772 ////////////////////////////////////////////////////////////////////
7773 // Much of the following code is similar in shape and spirit to the
7774 // code used in ParNewGC. We should try and share that code
7775 // as much as possible in the future.
7776
7777 #ifndef PRODUCT
7778 // Debugging support for CMSStackOverflowALot
7779
7790 return false;
7791 }
7792 }
7793
7794 bool CMSCollector::par_simulate_overflow() {
7795 return simulate_overflow();
7796 }
7797 #endif
7798
7799 // Single-threaded
7800 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
7801 assert(stack->isEmpty(), "Expected precondition");
7802 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
7803 size_t i = num;
7804 oop cur = _overflow_list;
7805 const markOop proto = markOopDesc::prototype();
7806 NOT_PRODUCT(ssize_t n = 0;)
7807 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
7808 next = oop(cur->mark());
7809 cur->set_mark(proto); // until proven otherwise
7810 assert(oopDesc::is_oop(cur), "Should be an oop");
7811 bool res = stack->push(cur);
7812 assert(res, "Bit off more than can chew?");
7813 NOT_PRODUCT(n++;)
7814 }
7815 _overflow_list = cur;
7816 #ifndef PRODUCT
7817 assert(_num_par_pushes >= n, "Too many pops?");
7818 _num_par_pushes -=n;
7819 #endif
7820 return !stack->isEmpty();
7821 }
7822
7823 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
7824 // (MT-safe) Get a prefix of at most "num" from the list.
7825 // The overflow list is chained through the mark word of
7826 // each object in the list. We fetch the entire list,
7827 // break off a prefix of the right size and return the
7828 // remainder. If other threads try to take objects from
7829 // the overflow list at that time, they will wait for
7830 // some time to see if data becomes available. If (and
7934 suffix_tail->set_mark(markOop(cur_overflow_list));
7935 } else { // cur_overflow_list == BUSY
7936 suffix_tail->set_mark(NULL);
7937 }
7938 // ... and try to place spliced list back on overflow_list ...
7939 observed_overflow_list =
7940 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7941 } while (cur_overflow_list != observed_overflow_list);
7942 // ... until we have succeeded in doing so.
7943 }
7944 }
7945
7946 // Push the prefix elements on work_q
7947 assert(prefix != NULL, "control point invariant");
7948 const markOop proto = markOopDesc::prototype();
7949 oop next;
7950 NOT_PRODUCT(ssize_t n = 0;)
7951 for (cur = prefix; cur != NULL; cur = next) {
7952 next = oop(cur->mark());
7953 cur->set_mark(proto); // until proven otherwise
7954 assert(oopDesc::is_oop(cur), "Should be an oop");
7955 bool res = work_q->push(cur);
7956 assert(res, "Bit off more than we can chew?");
7957 NOT_PRODUCT(n++;)
7958 }
7959 #ifndef PRODUCT
7960 assert(_num_par_pushes >= n, "Too many pops?");
7961 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
7962 #endif
7963 return true;
7964 }
7965
7966 // Single-threaded
7967 void CMSCollector::push_on_overflow_list(oop p) {
7968 NOT_PRODUCT(_num_par_pushes++;)
7969 assert(oopDesc::is_oop(p), "Not an oop");
7970 preserve_mark_if_necessary(p);
7971 p->set_mark((markOop)_overflow_list);
7972 _overflow_list = p;
7973 }
7974
7975 // Multi-threaded; use CAS to prepend to overflow list
7976 void CMSCollector::par_push_on_overflow_list(oop p) {
7977 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
7978 assert(oopDesc::is_oop(p), "Not an oop");
7979 par_preserve_mark_if_necessary(p);
7980 oop observed_overflow_list = _overflow_list;
7981 oop cur_overflow_list;
7982 do {
7983 cur_overflow_list = observed_overflow_list;
7984 if (cur_overflow_list != BUSY) {
7985 p->set_mark(markOop(cur_overflow_list));
7986 } else {
7987 p->set_mark(NULL);
7988 }
7989 observed_overflow_list =
7990 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
7991 } while (cur_overflow_list != observed_overflow_list);
7992 }
7993 #undef BUSY
7994
7995 // Single threaded
7996 // General Note on GrowableArray: pushes may silently fail
7997 // because we are (temporarily) out of C-heap for expanding
7998 // the stack. The problem is quite ubiquitous and affects
8045 // until all work's been completed. Because we
8046 // expect the preserved oop stack (set) to be small,
8047 // it's probably fine to do this single-threaded.
8048 // We can explore cleverer concurrent/overlapped/parallel
8049 // processing of preserved marks if we feel the
8050 // need for this in the future. Stack overflow should
8051 // be so rare in practice and, when it happens, its
8052 // effect on performance so great that this will
8053 // likely just be in the noise anyway.
8054 void CMSCollector::restore_preserved_marks_if_any() {
8055 assert(SafepointSynchronize::is_at_safepoint(),
8056 "world should be stopped");
8057 assert(Thread::current()->is_ConcurrentGC_thread() ||
8058 Thread::current()->is_VM_thread(),
8059 "should be single-threaded");
8060 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8061 "bijection");
8062
8063 while (!_preserved_oop_stack.is_empty()) {
8064 oop p = _preserved_oop_stack.pop();
8065 assert(oopDesc::is_oop(p), "Should be an oop");
8066 assert(_span.contains(p), "oop should be in _span");
8067 assert(p->mark() == markOopDesc::prototype(),
8068 "Set when taken from overflow list");
8069 markOop m = _preserved_mark_stack.pop();
8070 p->set_mark(m);
8071 }
8072 assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8073 "stacks were cleared above");
8074 }
8075
8076 #ifndef PRODUCT
8077 bool CMSCollector::no_preserved_marks() const {
8078 return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8079 }
8080 #endif
8081
8082 // Transfer some number of overflown objects to usual marking
8083 // stack. Return true if some objects were transferred.
8084 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8085 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
|