< prev index next >

src/hotspot/share/gc/g1/g1ParScanThreadState.cpp

Print this page
rev 52675 : 8213890: Implementation of JEP 344: Abortable Mixed Collections for G1
Reviewed-by:
Contributed-by: erik.helin@oracle.com, stefan.johansson@oracle.com
rev 52676 : imported patch AMGC-impl
rev 52679 : imported patch AMGC-tsch-rev1-log
rev 52681 : [mq]: AMGC-kbar-rev1
rev 52682 : [mq]: AMGC-kbar-rev1b


  66   size_t array_length = PADDING_ELEM_NUM +
  67                       real_length +
  68                       PADDING_ELEM_NUM;
  69   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
  70   if (_surviving_young_words_base == NULL)
  71     vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
  72                           "Not enough space for young surv histo.");
  73   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  74   memset(_surviving_young_words, 0, real_length * sizeof(size_t));
  75 
  76   _plab_allocator = new G1PLABAllocator(_g1h->allocator());
  77 
  78   _dest[InCSetState::NotInCSet]    = InCSetState::NotInCSet;
  79   // The dest for Young is used when the objects are aged enough to
  80   // need to be moved to the next space.
  81   _dest[InCSetState::Young]        = InCSetState::Old;
  82   _dest[InCSetState::Old]          = InCSetState::Old;
  83 
  84   _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
  85 
  86   _oops_into_optional_regions = NEW_C_HEAP_ARRAY(G1OopStarChunkedList, _num_optional_regions, mtGC);
  87   for (size_t i = 0; i < _num_optional_regions; i++) {
  88     ::new (_oops_into_optional_regions + i) G1OopStarChunkedList();
  89   }
  90 }
  91 
  92 // Pass locally gathered statistics to global state.
  93 void G1ParScanThreadState::flush(size_t* surviving_young_words) {
  94   _dcq.flush();
  95   // Update allocation statistics.
  96   _plab_allocator->flush_and_retire_stats();
  97   _g1h->g1_policy()->record_age_table(&_age_table);
  98 
  99   uint length = _g1h->collection_set()->young_region_length();
 100   for (uint region_index = 0; region_index < length; region_index++) {
 101     surviving_young_words[region_index] += _surviving_young_words[region_index];
 102   }
 103 }
 104 
 105 G1ParScanThreadState::~G1ParScanThreadState() {
 106   delete _plab_allocator;
 107   delete _closures;
 108   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
 109   size_t used_by_optional = 0;
 110   for (size_t i = 0; i < _num_optional_regions; i++) {
 111     used_by_optional += _oops_into_optional_regions[i].free_chunk_lists();
 112   }
 113   _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::OptScanRS, _worker_id, used_by_optional, G1GCPhaseTimes::OptCSetUsedMemory);
 114   FREE_C_HEAP_ARRAY(G1OopStarChunkedList, _oops_into_optional_regions);
 115 }
 116 
 117 void G1ParScanThreadState::waste(size_t& wasted, size_t& undo_wasted) {
 118   _plab_allocator->waste(wasted, undo_wasted);
 119 }
 120 
 121 #ifdef ASSERT
 122 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
 123   assert(ref != NULL, "invariant");
 124   assert(UseCompressedOops, "sanity");
 125   assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
 126   oop p = RawAccess<>::oop_load(ref);
 127   assert(_g1h->is_in_g1_reserved(p),
 128          "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
 129   return true;
 130 }
 131 
 132 bool G1ParScanThreadState::verify_ref(oop* ref) const {
 133   assert(ref != NULL, "invariant");
 134   if (has_partial_array_mask(ref)) {


 348 const size_t* G1ParScanThreadStateSet::surviving_young_words() const {
 349   assert(_flushed, "thread local state from the per thread states should have been flushed");
 350   return _surviving_young_words_total;
 351 }
 352 
 353 void G1ParScanThreadStateSet::flush() {
 354   assert(!_flushed, "thread local state from the per thread states should be flushed once");
 355 
 356   for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
 357     G1ParScanThreadState* pss = _states[worker_index];
 358 
 359     if (pss == NULL) {
 360       continue;
 361     }
 362 
 363     pss->flush(_surviving_young_words_total);
 364     delete pss;
 365     _states[worker_index] = NULL;
 366   }
 367   _flushed = true;













 368 }
 369 
 370 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
 371   assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
 372 
 373   oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
 374   if (forward_ptr == NULL) {
 375     // Forward-to-self succeeded. We are the "owner" of the object.
 376     HeapRegion* r = _g1h->heap_region_containing(old);
 377 
 378     if (!r->evacuation_failed()) {
 379       r->set_evacuation_failed(true);
 380      _g1h->hr_printer()->evac_failure(r);
 381     }
 382 
 383     _g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
 384 
 385     G1ScanInYoungSetter x(&_scanner, r->is_young());
 386     old->oop_iterate_backwards(&_scanner);
 387 




  66   size_t array_length = PADDING_ELEM_NUM +
  67                       real_length +
  68                       PADDING_ELEM_NUM;
  69   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
  70   if (_surviving_young_words_base == NULL)
  71     vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
  72                           "Not enough space for young surv histo.");
  73   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  74   memset(_surviving_young_words, 0, real_length * sizeof(size_t));
  75 
  76   _plab_allocator = new G1PLABAllocator(_g1h->allocator());
  77 
  78   _dest[InCSetState::NotInCSet]    = InCSetState::NotInCSet;
  79   // The dest for Young is used when the objects are aged enough to
  80   // need to be moved to the next space.
  81   _dest[InCSetState::Young]        = InCSetState::Old;
  82   _dest[InCSetState::Old]          = InCSetState::Old;
  83 
  84   _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
  85 
  86   _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];



  87 }
  88 
  89 // Pass locally gathered statistics to global state.
  90 void G1ParScanThreadState::flush(size_t* surviving_young_words) {
  91   _dcq.flush();
  92   // Update allocation statistics.
  93   _plab_allocator->flush_and_retire_stats();
  94   _g1h->g1_policy()->record_age_table(&_age_table);
  95 
  96   uint length = _g1h->collection_set()->young_region_length();
  97   for (uint region_index = 0; region_index < length; region_index++) {
  98     surviving_young_words[region_index] += _surviving_young_words[region_index];
  99   }
 100 }
 101 
 102 G1ParScanThreadState::~G1ParScanThreadState() {
 103   delete _plab_allocator;
 104   delete _closures;
 105   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
 106   delete[] _oops_into_optional_regions;





 107 }
 108 
 109 void G1ParScanThreadState::waste(size_t& wasted, size_t& undo_wasted) {
 110   _plab_allocator->waste(wasted, undo_wasted);
 111 }
 112 
 113 #ifdef ASSERT
 114 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
 115   assert(ref != NULL, "invariant");
 116   assert(UseCompressedOops, "sanity");
 117   assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
 118   oop p = RawAccess<>::oop_load(ref);
 119   assert(_g1h->is_in_g1_reserved(p),
 120          "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
 121   return true;
 122 }
 123 
 124 bool G1ParScanThreadState::verify_ref(oop* ref) const {
 125   assert(ref != NULL, "invariant");
 126   if (has_partial_array_mask(ref)) {


 340 const size_t* G1ParScanThreadStateSet::surviving_young_words() const {
 341   assert(_flushed, "thread local state from the per thread states should have been flushed");
 342   return _surviving_young_words_total;
 343 }
 344 
 345 void G1ParScanThreadStateSet::flush() {
 346   assert(!_flushed, "thread local state from the per thread states should be flushed once");
 347 
 348   for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
 349     G1ParScanThreadState* pss = _states[worker_index];
 350 
 351     if (pss == NULL) {
 352       continue;
 353     }
 354 
 355     pss->flush(_surviving_young_words_total);
 356     delete pss;
 357     _states[worker_index] = NULL;
 358   }
 359   _flushed = true;
 360 }
 361 
 362 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
 363   for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
 364     G1ParScanThreadState* pss = _states[worker_index];
 365 
 366     if (pss == NULL) {
 367       continue;
 368     }
 369 
 370     size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
 371     _g1h->g1_policy()->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_index, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
 372   }
 373 }
 374 
 375 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
 376   assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
 377 
 378   oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
 379   if (forward_ptr == NULL) {
 380     // Forward-to-self succeeded. We are the "owner" of the object.
 381     HeapRegion* r = _g1h->heap_region_containing(old);
 382 
 383     if (!r->evacuation_failed()) {
 384       r->set_evacuation_failed(true);
 385      _g1h->hr_printer()->evac_failure(r);
 386     }
 387 
 388     _g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
 389 
 390     G1ScanInYoungSetter x(&_scanner, r->is_young());
 391     old->oop_iterate_backwards(&_scanner);
 392 


< prev index next >