< prev index next >

src/share/vm/gc/g1/g1ParScanThreadState.cpp

Print this page




 199 
 200 void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
 201                                                   oop const old, size_t word_sz, uint age,
 202                                                   HeapWord * const obj_ptr,
 203                                                   const AllocationContext_t context) const {
 204   G1PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context);
 205   if (alloc_buf->contains(obj_ptr)) {
 206     _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age,
 207                                                              dest_state.value() == InCSetState::Old,
 208                                                              alloc_buf->word_sz());
 209   } else {
 210     _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age,
 211                                                               dest_state.value() == InCSetState::Old);
 212   }
 213 }
 214 
 215 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
 216                                                  oop const old,
 217                                                  markOop const old_mark) {
 218   const size_t word_sz = old->size();
 219   HeapRegion* const from_region = _g1h->heap_region_containing_raw(old);
 220   // +1 to make the -1 indexes valid...
 221   const int young_index = from_region->young_index_in_cset()+1;
 222   assert( (from_region->is_young() && young_index >  0) ||
 223          (!from_region->is_young() && young_index == 0), "invariant" );
 224   const AllocationContext_t context = from_region->allocation_context();
 225 
 226   uint age = 0;
 227   InCSetState dest_state = next_state(state, old_mark, age);
 228   // The second clause is to prevent premature evacuation failure in case there
 229   // is still space in survivor, but old gen is full.
 230   if (_old_gen_is_full && dest_state.is_old()) {
 231     return handle_evacuation_failure_par(old, old_mark);
 232   }
 233   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);
 234 
 235   // PLAB allocations should succeed most of the time, so we'll
 236   // normally check against NULL once and that's it.
 237   if (obj_ptr == NULL) {
 238     bool plab_refill_failed = false;
 239     obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context, &plab_refill_failed);


 277         age++;
 278       }
 279       if (old_mark->has_displaced_mark_helper()) {
 280         // In this case, we have to install the mark word first,
 281         // otherwise obj looks to be forwarded (the old mark word,
 282         // which contains the forward pointer, was copied)
 283         obj->set_mark(old_mark);
 284         markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
 285         old_mark->set_displaced_mark_helper(new_mark);
 286       } else {
 287         obj->set_mark(old_mark->set_age(age));
 288       }
 289       _age_table.add(age, word_sz);
 290     } else {
 291       obj->set_mark(old_mark);
 292     }
 293 
 294     if (G1StringDedup::is_enabled()) {
 295       const bool is_from_young = state.is_young();
 296       const bool is_to_young = dest_state.is_young();
 297       assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(),
 298              "sanity");
 299       assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(),
 300              "sanity");
 301       G1StringDedup::enqueue_from_evacuation(is_from_young,
 302                                              is_to_young,
 303                                              _worker_id,
 304                                              obj);
 305     }
 306 
 307     _surviving_young_words[young_index] += word_sz;
 308 
 309     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
 310       // We keep track of the next start index in the length field of
 311       // the to-space object. The actual length can be found in the
 312       // length field of the from-space object.
 313       arrayOop(obj)->set_length(0);
 314       oop* old_p = set_partial_array_mask(old);
 315       push_on_queue(old_p);
 316     } else {
 317       HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr);
 318       _scanner.set_region(to_region);
 319       obj->oop_iterate_backwards(&_scanner);
 320     }
 321     return obj;
 322   } else {
 323     _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
 324     return forward_ptr;
 325   }
 326 }
 327 
 328 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
 329   assert(worker_id < _n_workers, "out of bounds access");
 330   return _states[worker_id];
 331 }
 332 
 333 void G1ParScanThreadStateSet::add_cards_scanned(uint worker_id, size_t cards_scanned) {
 334   assert(worker_id < _n_workers, "out of bounds access");
 335   _cards_scanned[worker_id] += cards_scanned;
 336 }
 337 




 199 
 200 void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
 201                                                   oop const old, size_t word_sz, uint age,
 202                                                   HeapWord * const obj_ptr,
 203                                                   const AllocationContext_t context) const {
 204   G1PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context);
 205   if (alloc_buf->contains(obj_ptr)) {
 206     _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age,
 207                                                              dest_state.value() == InCSetState::Old,
 208                                                              alloc_buf->word_sz());
 209   } else {
 210     _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age,
 211                                                               dest_state.value() == InCSetState::Old);
 212   }
 213 }
 214 
 215 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
 216                                                  oop const old,
 217                                                  markOop const old_mark) {
 218   const size_t word_sz = old->size();
 219   HeapRegion* const from_region = _g1h->heap_region_containing(old);
 220   // +1 to make the -1 indexes valid...
 221   const int young_index = from_region->young_index_in_cset()+1;
 222   assert( (from_region->is_young() && young_index >  0) ||
 223          (!from_region->is_young() && young_index == 0), "invariant" );
 224   const AllocationContext_t context = from_region->allocation_context();
 225 
 226   uint age = 0;
 227   InCSetState dest_state = next_state(state, old_mark, age);
 228   // The second clause is to prevent premature evacuation failure in case there
 229   // is still space in survivor, but old gen is full.
 230   if (_old_gen_is_full && dest_state.is_old()) {
 231     return handle_evacuation_failure_par(old, old_mark);
 232   }
 233   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);
 234 
 235   // PLAB allocations should succeed most of the time, so we'll
 236   // normally check against NULL once and that's it.
 237   if (obj_ptr == NULL) {
 238     bool plab_refill_failed = false;
 239     obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context, &plab_refill_failed);


 277         age++;
 278       }
 279       if (old_mark->has_displaced_mark_helper()) {
 280         // In this case, we have to install the mark word first,
 281         // otherwise obj looks to be forwarded (the old mark word,
 282         // which contains the forward pointer, was copied)
 283         obj->set_mark(old_mark);
 284         markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
 285         old_mark->set_displaced_mark_helper(new_mark);
 286       } else {
 287         obj->set_mark(old_mark->set_age(age));
 288       }
 289       _age_table.add(age, word_sz);
 290     } else {
 291       obj->set_mark(old_mark);
 292     }
 293 
 294     if (G1StringDedup::is_enabled()) {
 295       const bool is_from_young = state.is_young();
 296       const bool is_to_young = dest_state.is_young();
 297       assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
 298              "sanity");
 299       assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
 300              "sanity");
 301       G1StringDedup::enqueue_from_evacuation(is_from_young,
 302                                              is_to_young,
 303                                              _worker_id,
 304                                              obj);
 305     }
 306 
 307     _surviving_young_words[young_index] += word_sz;
 308 
 309     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
 310       // We keep track of the next start index in the length field of
 311       // the to-space object. The actual length can be found in the
 312       // length field of the from-space object.
 313       arrayOop(obj)->set_length(0);
 314       oop* old_p = set_partial_array_mask(old);
 315       push_on_queue(old_p);
 316     } else {
 317       HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr);
 318       _scanner.set_region(to_region);
 319       obj->oop_iterate_backwards(&_scanner);
 320     }
 321     return obj;
 322   } else {
 323     _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
 324     return forward_ptr;
 325   }
 326 }
 327 
 328 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
 329   assert(worker_id < _n_workers, "out of bounds access");
 330   return _states[worker_id];
 331 }
 332 
 333 void G1ParScanThreadStateSet::add_cards_scanned(uint worker_id, size_t cards_scanned) {
 334   assert(worker_id < _n_workers, "out of bounds access");
 335   _cards_scanned[worker_id] += cards_scanned;
 336 }
 337 


< prev index next >