< prev index next >

src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp

Print this page
rev 7970 : imported patch inc1


 205   _bm.set_bit(heapWordToOffset(addr));
 206 }
 207 
 208 inline void CMBitMap::clear(HeapWord* addr) {
 209   check_mark(addr);
 210   _bm.clear_bit(heapWordToOffset(addr));
 211 }
 212 
 213 inline bool CMBitMap::parMark(HeapWord* addr) {
 214   check_mark(addr);
 215   return _bm.par_set_bit(heapWordToOffset(addr));
 216 }
 217 
 218 inline bool CMBitMap::parClear(HeapWord* addr) {
 219   check_mark(addr);
 220   return _bm.par_clear_bit(heapWordToOffset(addr));
 221 }
 222 
 223 #undef check_mark
 224 
 225 inline bool CMTask::is_stale_humongous_queue_entry(oop obj) const {
 226   // When a humongous object is eagerly reclaimed, we don't remove
 227   // entries for it from queues.  Instead, we filter out such entries
 228   // on the processing side.
 229   //
 230   // Recently allocated objects are filtered out when queuing, to
 231   // minimize queue size and processing time.  Therefore, if we find
 232   // what appears to be a recently allocated object in the queue, it
 233   // must be for a reclaimed humongous object.
 234   HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
 235   bool result = hr->obj_allocated_since_next_marking(obj);
 236 #ifdef ASSERT
 237   if (result) {
 238     HeapWord* hp = (HeapWord*)obj;
 239     assert(hp == hr->bottom(), "stale humongous should be at region bottom");
 240     assert(!_nextMarkBitMap->isMarked(hp), "stale humongous should not be marked");










 241   }
 242 #endif
 243   return result;










 244 }
 245 
 246 inline void CMTask::push(oop obj) {
 247   HeapWord* objAddr = (HeapWord*) obj;
 248   assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
 249   assert(!_g1h->is_on_master_free_list(
 250               _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
 251   assert(!_g1h->is_obj_ill(obj), "invariant");
 252   assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
 253 
 254   if (_cm->verbose_high()) {
 255     gclog_or_tty->print_cr("[%u] pushing " PTR_FORMAT, _worker_id, p2i((void*) obj));
 256   }
 257 
 258   if (!_task_queue->push(obj)) {
 259     // The local task queue looks full. We need to push some entries
 260     // to the global stack.
 261 
 262     if (_cm->verbose_medium()) {
 263       gclog_or_tty->print_cr("[%u] task queue overflow, "




 205   _bm.set_bit(heapWordToOffset(addr));
 206 }
 207 
 208 inline void CMBitMap::clear(HeapWord* addr) {
 209   check_mark(addr);
 210   _bm.clear_bit(heapWordToOffset(addr));
 211 }
 212 
 213 inline bool CMBitMap::parMark(HeapWord* addr) {
 214   check_mark(addr);
 215   return _bm.par_set_bit(heapWordToOffset(addr));
 216 }
 217 
 218 inline bool CMBitMap::parClear(HeapWord* addr) {
 219   check_mark(addr);
 220   return _bm.par_clear_bit(heapWordToOffset(addr));
 221 }
 222 
 223 #undef check_mark
 224 
 225 inline bool ConcurrentMark::is_stale_humongous_marked_entry(oop entry) const {
 226   // When a humongous object is eagerly reclaimed, we don't remove
 227   // entries for it from mark stacks (either local or global).
 228   // Instead, we filter out such entries on the processing side.
 229   //
 230   // Recently allocated objects are filtered out when queuing, to
 231   // minimize queue size and processing time.  Therefore, if we find
 232   // what appears to be a recently allocated object in the queue, it
 233   // must be for a reclaimed humongous object.
 234   HeapRegion* hr = _g1h->heap_region_containing_raw(entry);
 235   bool result = hr->obj_allocated_since_next_marking(entry);
 236 #ifdef ASSERT
 237   if (result) {
 238     // If entry appears to be a stale humongous object, perform some
 239     // additional tests to increase our confidence in that belief.
 240     // We're limited in what tests we can perform, because a stale
 241     // entry is for an object that has already been reclaimed.
 242     HeapWord* hp = (HeapWord*)entry;
 243     assert(hp == hr->bottom(),
 244            err_msg("Stale humongous object should be at region bottom: object = "
 245                    PTR_FORMAT ", region id %u, bottom = " PTR_FORMAT,
 246                    p2i(hp), hr->hrm_index(), p2i(hr->bottom())));
 247     assert(!_nextMarkBitMap->isMarked(hp),
 248            err_msg("Stale humongous object should not be marked: object = "
 249                    PTR_FORMAT ", region id %u, bottom = " PTR_FORMAT,
 250                    p2i(hp), hr->hrm_index(), p2i(hr->bottom())));
 251   }
 252 #endif
 253   return result;
 254 }
 255 
 256 inline void CMTask::process_queue_entry(oop entry) {
 257   assert(_g1h->is_in_g1_reserved((HeapWord*)entry), "invariant" );
 258 
 259   if (_cm->is_stale_humongous_marked_entry(entry)) {
 260     statsOnly( ++stale_humongous_queue_entries );
 261   } else {
 262     scan_object(entry);
 263   }
 264 }
 265 
 266 inline void CMTask::push(oop obj) {
 267   HeapWord* objAddr = (HeapWord*) obj;
 268   assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
 269   assert(!_g1h->is_on_master_free_list(
 270               _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
 271   assert(!_g1h->is_obj_ill(obj), "invariant");
 272   assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
 273 
 274   if (_cm->verbose_high()) {
 275     gclog_or_tty->print_cr("[%u] pushing " PTR_FORMAT, _worker_id, p2i((void*) obj));
 276   }
 277 
 278   if (!_task_queue->push(obj)) {
 279     // The local task queue looks full. We need to push some entries
 280     // to the global stack.
 281 
 282     if (_cm->verbose_medium()) {
 283       gclog_or_tty->print_cr("[%u] task queue overflow, "


< prev index next >