src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp

Print this page
rev 6334 : 8035400: Move G1ParScanThreadState into its own files
Summary: Extract the G1ParScanThreadState class from G1CollectedHeap.?pp into its own files.
Reviewed-by: brutisso, mgerdin
rev 6335 : 8035401: Fix visibility of G1ParScanThreadState members
Summary: After JDK-8035400 there were several opportunities to fix the visibility of several members of the G1ParScanThreadState class.
Reviewed-by: brutisso, mgerdin


  52   // non-young regions (where the age is -1)
  53   // We also add a few elements at the beginning and at the end in
  54   // an attempt to eliminate cache contention
  55   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
  56   uint array_length = PADDING_ELEM_NUM +
  57                       real_length +
  58                       PADDING_ELEM_NUM;
  59   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
  60   if (_surviving_young_words_base == NULL)
  61     vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
  62                           "Not enough space for young surv histo.");
  63   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  64   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
  65 
  66   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
  67   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
  68 
  69   _start = os::elapsedTime();
  70 }
  71 





  72 void
  73 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
  74 {
  75   st->print_raw_cr("GC Termination Stats");
  76   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
  77                    " ------waste (KiB)------");
  78   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
  79                    "  total   alloc    undo");
  80   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
  81                    " ------- ------- -------");
  82 }
  83 
  84 void
  85 G1ParScanThreadState::print_termination_stats(int i,
  86                                               outputStream* const st) const
  87 {
  88   const double elapsed_ms = elapsed_time() * 1000.0;
  89   const double s_roots_ms = strong_roots_time() * 1000.0;
  90   const double term_ms    = term_time() * 1000.0;
  91   st->print_cr("%3d %9.2f %9.2f %6.2f "


 122            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
 123   }
 124   return true;
 125 }
 126 
 127 bool G1ParScanThreadState::verify_task(StarTask ref) const {
 128   if (ref.is_narrow()) {
 129     return verify_ref((narrowOop*) ref);
 130   } else {
 131     return verify_ref((oop*) ref);
 132   }
 133 }
 134 #endif // ASSERT
 135 
 136 void G1ParScanThreadState::trim_queue() {
 137   assert(_evac_failure_cl != NULL, "not set");
 138 
 139   StarTask ref;
 140   do {
 141     // Drain the overflow stack first, so other threads can steal.
 142     while (refs()->pop_overflow(ref)) {
 143       deal_with_reference(ref);
 144     }
 145 
 146     while (refs()->pop_local(ref)) {
 147       deal_with_reference(ref);
 148     }
 149   } while (!refs()->is_empty());
 150 }
 151 
 152 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
 153   size_t word_sz = old->size();
 154   HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
 155   // +1 to make the -1 indexes valid...
 156   int       young_index = from_region->young_index_in_cset()+1;
 157   assert( (from_region->is_young() && young_index >  0) ||
 158          (!from_region->is_young() && young_index == 0), "invariant" );
 159   G1CollectorPolicy* g1p = _g1h->g1_policy();
 160   markOop m = old->mark();
 161   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
 162                                            : m->age();
 163   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
 164                                                              word_sz);
 165   HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
 166 #ifndef PRODUCT
 167   // Should this evacuation fail?
 168   if (_g1h->evacuation_should_fail()) {
 169     if (obj_ptr != NULL) {


 232 
 233     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
 234       // We keep track of the next start index in the length field of
 235       // the to-space object. The actual length can be found in the
 236       // length field of the from-space object.
 237       arrayOop(obj)->set_length(0);
 238       oop* old_p = set_partial_array_mask(old);
 239       push_on_queue(old_p);
 240     } else {
 241       // No point in using the slower heap_region_containing() method,
 242       // given that we know obj is in the heap.
 243       _scanner.set_region(_g1h->heap_region_containing_raw(obj));
 244       obj->oop_iterate_backwards(&_scanner);
 245     }
 246   } else {
 247     undo_allocation(alloc_purpose, obj_ptr, word_sz);
 248     obj = forward_ptr;
 249   }
 250   return obj;
 251 }























































  52   // non-young regions (where the age is -1)
  53   // We also add a few elements at the beginning and at the end in
  54   // an attempt to eliminate cache contention
  55   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
  56   uint array_length = PADDING_ELEM_NUM +
  57                       real_length +
  58                       PADDING_ELEM_NUM;
  59   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
  60   if (_surviving_young_words_base == NULL)
  61     vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
  62                           "Not enough space for young surv histo.");
  63   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  64   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
  65 
  66   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
  67   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
  68 
  69   _start = os::elapsedTime();
  70 }
  71 
  72 G1ParScanThreadState::~G1ParScanThreadState() {
  73   retire_alloc_buffers();
  74   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
  75 }
  76 
  77 void
  78 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
  79 {
  80   st->print_raw_cr("GC Termination Stats");
  81   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
  82                    " ------waste (KiB)------");
  83   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
  84                    "  total   alloc    undo");
  85   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
  86                    " ------- ------- -------");
  87 }
  88 
  89 void
  90 G1ParScanThreadState::print_termination_stats(int i,
  91                                               outputStream* const st) const
  92 {
  93   const double elapsed_ms = elapsed_time() * 1000.0;
  94   const double s_roots_ms = strong_roots_time() * 1000.0;
  95   const double term_ms    = term_time() * 1000.0;
  96   st->print_cr("%3d %9.2f %9.2f %6.2f "


 127            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
 128   }
 129   return true;
 130 }
 131 
 132 bool G1ParScanThreadState::verify_task(StarTask ref) const {
 133   if (ref.is_narrow()) {
 134     return verify_ref((narrowOop*) ref);
 135   } else {
 136     return verify_ref((oop*) ref);
 137   }
 138 }
 139 #endif // ASSERT
 140 
 141 void G1ParScanThreadState::trim_queue() {
 142   assert(_evac_failure_cl != NULL, "not set");
 143 
 144   StarTask ref;
 145   do {
 146     // Drain the overflow stack first, so other threads can steal.
 147     while (_refs->pop_overflow(ref)) {
 148       dispatch_reference(ref);
 149     }
 150 
 151     while (_refs->pop_local(ref)) {
 152       dispatch_reference(ref);
 153     }
 154   } while (!_refs->is_empty());
 155 }
 156 
 157 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
 158   size_t word_sz = old->size();
 159   HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
 160   // +1 to make the -1 indexes valid...
 161   int       young_index = from_region->young_index_in_cset()+1;
 162   assert( (from_region->is_young() && young_index >  0) ||
 163          (!from_region->is_young() && young_index == 0), "invariant" );
 164   G1CollectorPolicy* g1p = _g1h->g1_policy();
 165   markOop m = old->mark();
 166   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
 167                                            : m->age();
 168   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
 169                                                              word_sz);
 170   HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
 171 #ifndef PRODUCT
 172   // Should this evacuation fail?
 173   if (_g1h->evacuation_should_fail()) {
 174     if (obj_ptr != NULL) {


 237 
 238     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
 239       // We keep track of the next start index in the length field of
 240       // the to-space object. The actual length can be found in the
 241       // length field of the from-space object.
 242       arrayOop(obj)->set_length(0);
 243       oop* old_p = set_partial_array_mask(old);
 244       push_on_queue(old_p);
 245     } else {
 246       // No point in using the slower heap_region_containing() method,
 247       // given that we know obj is in the heap.
 248       _scanner.set_region(_g1h->heap_region_containing_raw(obj));
 249       obj->oop_iterate_backwards(&_scanner);
 250     }
 251   } else {
 252     undo_allocation(alloc_purpose, obj_ptr, word_sz);
 253     obj = forward_ptr;
 254   }
 255   return obj;
 256 }
 257 
 258 HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
 259   HeapWord* obj = NULL;
 260   size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
 261   if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
 262     G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
 263     add_to_alloc_buffer_waste(alloc_buf->words_remaining());
 264     alloc_buf->retire(false /* end_of_gc */, false /* retain */);
 265 
 266     HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
 267     if (buf == NULL) {
 268       return NULL; // Let caller handle allocation failure.
 269     }
 270     // Otherwise.
 271     alloc_buf->set_word_size(gclab_word_size);
 272     alloc_buf->set_buf(buf);
 273 
 274     obj = alloc_buf->allocate(word_sz);
 275     assert(obj != NULL, "buffer was definitely big enough...");
 276   } else {
 277     obj = _g1h->par_allocate_during_gc(purpose, word_sz);
 278   }
 279   return obj;
 280 }
 281 
 282 void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
 283   if (alloc_buffer(purpose)->contains(obj)) {
 284     assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
 285            "should contain whole object");
 286     alloc_buffer(purpose)->undo_allocation(obj, word_sz);
 287   } else {
 288     CollectedHeap::fill_with_object(obj, word_sz);
 289     add_to_undo_waste(word_sz);
 290   }
 291 }
 292 
 293 HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
 294   HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
 295   if (obj != NULL) {
 296     return obj;
 297   }
 298   return allocate_slow(purpose, word_sz);
 299 }
 300 
 301 void G1ParScanThreadState::retire_alloc_buffers() {
 302   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
 303     size_t waste = _alloc_buffers[ap]->words_remaining();
 304     add_to_alloc_buffer_waste(waste);
 305     _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
 306                                                true /* end_of_gc */,
 307                                                false /* retain */);
 308   }
 309 }