< prev index next >

src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp

Print this page
rev 7903 : imported patch skip_stale

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -220,10 +220,31 @@
   return _bm.par_clear_bit(heapWordToOffset(addr));
 }
 
 #undef check_mark
 
+inline bool CMTask::is_stale_humongous_queue_entry(oop obj) const {
+  // When a humongous object is eagerly reclaimed, we don't remove
+  // entries for it from queues.  Instead, we filter out such entries
+  // on the processing side.
+  //
+  // Recently allocated objects are filtered out when queuing, to
+  // minimize queue size and processing time.  Therefore, if we find
+  // what appears to be a recently allocated object in the queue, it
+  // must be for a reclaimed humongous object.
+  HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
+  bool result = hr->obj_allocated_since_next_marking(obj);
+#ifdef ASSERT
+  if (result) {
+    HeapWord* hp = (HeapWord*)obj;
+    assert(hp == hr->bottom(), "stale humongous should be at region bottom");
+    assert(!_nextMarkBitMap->isMarked(hp), "stale humongous should not be marked");
+  }
+#endif
+  return result;
+}
+
 inline void CMTask::push(oop obj) {
   HeapWord* objAddr = (HeapWord*) obj;
   assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
   assert(!_g1h->is_on_master_free_list(
               _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
< prev index next >