src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 6626 : imported patch 8035400-move-g1parscanthreadstate-into-own-files
rev 6627 : imported patch 8035400-2-bengt-fixes


  27 #define ATTRIBUTE_PRINTF(x,y)
  28 #endif
  29 
  30 #include "precompiled.hpp"
  31 #include "classfile/stringTable.hpp"
  32 #include "code/codeCache.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  35 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  36 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  37 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  38 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  39 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  40 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  41 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  42 #include "gc_implementation/g1/g1EvacFailure.hpp"
  43 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  44 #include "gc_implementation/g1/g1Log.hpp"
  45 #include "gc_implementation/g1/g1MarkSweep.hpp"
  46 #include "gc_implementation/g1/g1OopClosures.inline.hpp"

  47 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  48 #include "gc_implementation/g1/g1StringDedup.hpp"
  49 #include "gc_implementation/g1/g1YCTypes.hpp"
  50 #include "gc_implementation/g1/heapRegion.inline.hpp"
  51 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  52 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  53 #include "gc_implementation/g1/vm_operations_g1.hpp"
  54 #include "gc_implementation/shared/gcHeapSummary.hpp"
  55 #include "gc_implementation/shared/gcTimer.hpp"
  56 #include "gc_implementation/shared/gcTrace.hpp"
  57 #include "gc_implementation/shared/gcTraceTime.hpp"
  58 #include "gc_implementation/shared/isGCActiveMark.hpp"
  59 #include "memory/gcLocker.inline.hpp"
  60 #include "memory/generationSpec.hpp"
  61 #include "memory/iterator.hpp"
  62 #include "memory/referenceProcessor.hpp"
  63 #include "oops/oop.inline.hpp"
  64 #include "oops/oop.pcgc.inline.hpp"
  65 #include "runtime/atomic.inline.hpp"
  66 #include "runtime/prefetch.inline.hpp"
  67 #include "runtime/orderAccess.inline.hpp"
  68 #include "runtime/vmThread.hpp"
  69 #include "utilities/globalDefinitions.hpp"
  70 #include "utilities/ticks.hpp"
  71 
  72 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  73 
  74 // turn it on so that the contents of the young list (scan-only /
  75 // to-be-collected) are printed at "strategic" points before / during
  76 // / after the collection --- this is useful for debugging
  77 #define YOUNG_LIST_VERBOSE 0
  78 // CURRENT STATUS
  79 // This file is under construction.  Search for "FIXME".
  80 
  81 // INVARIANTS/NOTES
  82 //
  83 // All allocation activity covered by the G1CollectedHeap interface is
  84 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  85 // and allocate_new_tlab, which are the "entry" points to the
  86 // allocation code from the rest of the JVM.  (Note that this does not
  87 // apply to TLAB allocation, which is not part of this interface: it
  88 // is done by clients of this interface.)
  89 
  90 // Notes on implementation of parallelism in different tasks.


4542   } else {
4543     assert(purpose ==  GCAllocForTenured, "sanity");
4544     HeapWord* result = old_attempt_allocation(word_size);
4545     if (result != NULL) {
4546       return result;
4547     } else {
4548       // Let's try to allocate in the survivors in case we can fit the
4549       // object there.
4550       return survivor_attempt_allocation(word_size);
4551     }
4552   }
4553 
4554   ShouldNotReachHere();
4555   // Trying to keep some compilers happy.
4556   return NULL;
4557 }
4558 
4559 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4560   ParGCAllocBuffer(gclab_word_size), _retired(true) { }
4561 
4562 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
4563   : _g1h(g1h),
4564     _refs(g1h->task_queue(queue_num)),
4565     _dcq(&g1h->dirty_card_queue_set()),
4566     _ct_bs(g1h->g1_barrier_set()),
4567     _g1_rem(g1h->g1_rem_set()),
4568     _hash_seed(17), _queue_num(queue_num),
4569     _term_attempts(0),
4570     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4571     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4572     _age_table(false), _scanner(g1h, this, rp),
4573     _strong_roots_time(0), _term_time(0),
4574     _alloc_buffer_waste(0), _undo_waste(0) {
4575   // we allocate G1YoungSurvRateNumRegions plus one entries, since
4576   // we "sacrifice" entry 0 to keep track of surviving bytes for
4577   // non-young regions (where the age is -1)
4578   // We also add a few elements at the beginning and at the end in
4579   // an attempt to eliminate cache contention
4580   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4581   uint array_length = PADDING_ELEM_NUM +
4582                       real_length +
4583                       PADDING_ELEM_NUM;
4584   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
4585   if (_surviving_young_words_base == NULL)
4586     vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
4587                           "Not enough space for young surv histo.");
4588   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
4589   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
4590 
4591   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
4592   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
4593 
4594   _start = os::elapsedTime();
4595 }
4596 
4597 void
4598 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
4599 {
4600   st->print_raw_cr("GC Termination Stats");
4601   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
4602                    " ------waste (KiB)------");
4603   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
4604                    "  total   alloc    undo");
4605   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
4606                    " ------- ------- -------");
4607 }
4608 
4609 void
4610 G1ParScanThreadState::print_termination_stats(int i,
4611                                               outputStream* const st) const
4612 {
4613   const double elapsed_ms = elapsed_time() * 1000.0;
4614   const double s_roots_ms = strong_roots_time() * 1000.0;
4615   const double term_ms    = term_time() * 1000.0;
4616   st->print_cr("%3d %9.2f %9.2f %6.2f "
4617                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4618                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4619                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
4620                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
4621                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
4622                alloc_buffer_waste() * HeapWordSize / K,
4623                undo_waste() * HeapWordSize / K);
4624 }
4625 
4626 #ifdef ASSERT
4627 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
4628   assert(ref != NULL, "invariant");
4629   assert(UseCompressedOops, "sanity");
4630   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
4631   oop p = oopDesc::load_decode_heap_oop(ref);
4632   assert(_g1h->is_in_g1_reserved(p),
4633          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4634   return true;
4635 }
4636 
4637 bool G1ParScanThreadState::verify_ref(oop* ref) const {
4638   assert(ref != NULL, "invariant");
4639   if (has_partial_array_mask(ref)) {
4640     // Must be in the collection set--it's already been copied.
4641     oop p = clear_partial_array_mask(ref);
4642     assert(_g1h->obj_in_cs(p),
4643            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4644   } else {
4645     oop p = oopDesc::load_decode_heap_oop(ref);
4646     assert(_g1h->is_in_g1_reserved(p),
4647            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4648   }
4649   return true;
4650 }
4651 
4652 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4653   if (ref.is_narrow()) {
4654     return verify_ref((narrowOop*) ref);
4655   } else {
4656     return verify_ref((oop*) ref);
4657   }
4658 }
4659 #endif // ASSERT
4660 
4661 void G1ParScanThreadState::trim_queue() {
4662   assert(_evac_failure_cl != NULL, "not set");
4663 
4664   StarTask ref;
4665   do {
4666     // Drain the overflow stack first, so other threads can steal.
4667     while (refs()->pop_overflow(ref)) {
4668       deal_with_reference(ref);
4669     }
4670 
4671     while (refs()->pop_local(ref)) {
4672       deal_with_reference(ref);
4673     }
4674   } while (!refs()->is_empty());
4675 }
4676 
4677 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4678                                      G1ParScanThreadState* par_scan_state) :
4679   _g1(g1), _par_scan_state(par_scan_state),
4680   _worker_id(par_scan_state->queue_num()) { }
4681 
4682 void G1ParCopyHelper::mark_object(oop obj) {
4683   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
4684 
4685   // We know that the object is not moving so it's safe to read its size.
4686   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4687 }
4688 
4689 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
4690   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4691   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4692   assert(from_obj != to_obj, "should not be self-forwarded");
4693 
4694   assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
4695   assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
4696 
4697   // The object might be in the process of being copied by another
4698   // worker so we cannot trust that its to-space image is
4699   // well-formed. So we have to read its size from its from-space
4700   // image which we know should not be changing.
4701   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4702 }
4703 
4704 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
4705   size_t word_sz = old->size();
4706   HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
4707   // +1 to make the -1 indexes valid...
4708   int       young_index = from_region->young_index_in_cset()+1;
4709   assert( (from_region->is_young() && young_index >  0) ||
4710          (!from_region->is_young() && young_index == 0), "invariant" );
4711   G1CollectorPolicy* g1p = _g1h->g1_policy();
4712   markOop m = old->mark();
4713   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4714                                            : m->age();
4715   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4716                                                              word_sz);
4717   HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
4718 #ifndef PRODUCT
4719   // Should this evacuation fail?
4720   if (_g1h->evacuation_should_fail()) {
4721     if (obj_ptr != NULL) {
4722       undo_allocation(alloc_purpose, obj_ptr, word_sz);
4723       obj_ptr = NULL;
4724     }
4725   }
4726 #endif // !PRODUCT
4727 
4728   if (obj_ptr == NULL) {
4729     // This will either forward-to-self, or detect that someone else has
4730     // installed a forwarding pointer.
4731     return _g1h->handle_evacuation_failure_par(this, old);
4732   }
4733 
4734   oop obj = oop(obj_ptr);
4735 
4736   // We're going to allocate linearly, so might as well prefetch ahead.
4737   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4738 
4739   oop forward_ptr = old->forward_to_atomic(obj);
4740   if (forward_ptr == NULL) {
4741     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4742 
4743     // alloc_purpose is just a hint to allocate() above, recheck the type of region
4744     // we actually allocated from and update alloc_purpose accordingly
4745     HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
4746     alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
4747 
4748     if (g1p->track_object_age(alloc_purpose)) {
4749       // We could simply do obj->incr_age(). However, this causes a
4750       // performance issue. obj->incr_age() will first check whether
4751       // the object has a displaced mark by checking its mark word;
4752       // getting the mark word from the new location of the object
4753       // stalls. So, given that we already have the mark word and we
4754       // are about to install it anyway, it's better to increase the
4755       // age on the mark word, when the object does not have a
4756       // displaced mark word. We're not expecting many objects to have
4757       // a displaced marked word, so that case is not optimized
4758       // further (it could be...) and we simply call obj->incr_age().
4759 
4760       if (m->has_displaced_mark_helper()) {
4761         // in this case, we have to install the mark word first,
4762         // otherwise obj looks to be forwarded (the old mark word,
4763         // which contains the forward pointer, was copied)
4764         obj->set_mark(m);
4765         obj->incr_age();
4766       } else {
4767         m = m->incr_age();
4768         obj->set_mark(m);
4769       }
4770       age_table()->add(obj, word_sz);
4771     } else {
4772       obj->set_mark(m);
4773     }
4774 
4775     if (G1StringDedup::is_enabled()) {
4776       G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
4777                                              to_region->is_young(),
4778                                              queue_num(),
4779                                              obj);
4780     }
4781 
4782     size_t* surv_young_words = surviving_young_words();
4783     surv_young_words[young_index] += word_sz;
4784 
4785     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4786       // We keep track of the next start index in the length field of
4787       // the to-space object. The actual length can be found in the
4788       // length field of the from-space object.
4789       arrayOop(obj)->set_length(0);
4790       oop* old_p = set_partial_array_mask(old);
4791       push_on_queue(old_p);
4792     } else {
4793       // No point in using the slower heap_region_containing() method,
4794       // given that we know obj is in the heap.
4795       _scanner.set_region(_g1h->heap_region_containing_raw(obj));
4796       obj->oop_iterate_backwards(&_scanner);
4797     }
4798   } else {
4799     undo_allocation(alloc_purpose, obj_ptr, word_sz);
4800     obj = forward_ptr;
4801   }
4802   return obj;
4803 }
4804 
4805 template <class T>
4806 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4807   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4808     _scanned_klass->record_modified_oops();
4809   }
4810 }
4811 
4812 template <G1Barrier barrier, bool do_mark_object>
4813 template <class T>
4814 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4815   T heap_oop = oopDesc::load_heap_oop(p);
4816 
4817   if (oopDesc::is_null(heap_oop)) {
4818     return;
4819   }
4820 
4821   oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4822 
4823   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4824 




  27 #define ATTRIBUTE_PRINTF(x,y)
  28 #endif
  29 
  30 #include "precompiled.hpp"
  31 #include "classfile/stringTable.hpp"
  32 #include "code/codeCache.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  35 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  36 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  37 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  38 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  39 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  40 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  41 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  42 #include "gc_implementation/g1/g1EvacFailure.hpp"
  43 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  44 #include "gc_implementation/g1/g1Log.hpp"
  45 #include "gc_implementation/g1/g1MarkSweep.hpp"
  46 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  47 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
  48 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  49 #include "gc_implementation/g1/g1StringDedup.hpp"
  50 #include "gc_implementation/g1/g1YCTypes.hpp"
  51 #include "gc_implementation/g1/heapRegion.inline.hpp"
  52 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  53 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  54 #include "gc_implementation/g1/vm_operations_g1.hpp"
  55 #include "gc_implementation/shared/gcHeapSummary.hpp"
  56 #include "gc_implementation/shared/gcTimer.hpp"
  57 #include "gc_implementation/shared/gcTrace.hpp"
  58 #include "gc_implementation/shared/gcTraceTime.hpp"
  59 #include "gc_implementation/shared/isGCActiveMark.hpp"
  60 #include "memory/gcLocker.inline.hpp"
  61 #include "memory/generationSpec.hpp"
  62 #include "memory/iterator.hpp"
  63 #include "memory/referenceProcessor.hpp"
  64 #include "oops/oop.inline.hpp"
  65 #include "oops/oop.pcgc.inline.hpp"
  66 #include "runtime/atomic.inline.hpp"

  67 #include "runtime/orderAccess.inline.hpp"
  68 #include "runtime/vmThread.hpp"
  69 #include "utilities/globalDefinitions.hpp"

  70 
  71 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  72 
  73 // turn it on so that the contents of the young list (scan-only /
  74 // to-be-collected) are printed at "strategic" points before / during
  75 // / after the collection --- this is useful for debugging
  76 #define YOUNG_LIST_VERBOSE 0
  77 // CURRENT STATUS
  78 // This file is under construction.  Search for "FIXME".
  79 
  80 // INVARIANTS/NOTES
  81 //
  82 // All allocation activity covered by the G1CollectedHeap interface is
  83 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  84 // and allocate_new_tlab, which are the "entry" points to the
  85 // allocation code from the rest of the JVM.  (Note that this does not
  86 // apply to TLAB allocation, which is not part of this interface: it
  87 // is done by clients of this interface.)
  88 
  89 // Notes on implementation of parallelism in different tasks.


4541   } else {
4542     assert(purpose ==  GCAllocForTenured, "sanity");
4543     HeapWord* result = old_attempt_allocation(word_size);
4544     if (result != NULL) {
4545       return result;
4546     } else {
4547       // Let's try to allocate in the survivors in case we can fit the
4548       // object there.
4549       return survivor_attempt_allocation(word_size);
4550     }
4551   }
4552 
4553   ShouldNotReachHere();
4554   // Trying to keep some compilers happy.
4555   return NULL;
4556 }
4557 
4558 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4559   ParGCAllocBuffer(gclab_word_size), _retired(true) { }
4560 
























































































































4561 void G1ParCopyHelper::mark_object(oop obj) {
4562   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
4563 
4564   // We know that the object is not moving so it's safe to read its size.
4565   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4566 }
4567 
4568 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
4569   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4570   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4571   assert(from_obj != to_obj, "should not be self-forwarded");
4572 
4573   assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
4574   assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
4575 
4576   // The object might be in the process of being copied by another
4577   // worker so we cannot trust that its to-space image is
4578   // well-formed. So we have to read its size from its from-space
4579   // image which we know should not be changing.
4580   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4581 }
4582 





































































































4583 template <class T>
4584 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4585   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4586     _scanned_klass->record_modified_oops();
4587   }
4588 }
4589 
4590 template <G1Barrier barrier, bool do_mark_object>
4591 template <class T>
4592 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4593   T heap_oop = oopDesc::load_heap_oop(p);
4594 
4595   if (oopDesc::is_null(heap_oop)) {
4596     return;
4597   }
4598 
4599   oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4600 
4601   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4602