< prev index next >

src/share/vm/gc/shared/collectedHeap.cpp

Print this page
rev 12906 : [mq]: gc_interface

@@ -23,11 +23,10 @@
  */
 
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "gc/shared/allocTracer.hpp"
-#include "gc/shared/barrierSet.inline.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"

@@ -134,10 +133,13 @@
   _barrier_set->print_on(st);
 }
 
 void CollectedHeap::register_nmethod(nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
+  if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) {
+    CodeCache::add_scavenge_root_nmethod(nm);
+  }
 }
 
 void CollectedHeap::unregister_nmethod(nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
 }

@@ -181,12 +183,11 @@
   _barrier_set(NULL),
   _is_gc_active(false),
   _total_collections(0),
   _total_full_collections(0),
   _gc_cause(GCCause::_no_gc),
-  _gc_lastcause(GCCause::_no_gc),
-  _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
+  _gc_lastcause(GCCause::_no_gc)
 {
   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
   const size_t elements_per_word = HeapWordSize / sizeof(jint);
   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
                                              max_len / elements_per_word);

@@ -244,18 +245,10 @@
   _barrier_set = barrier_set;
   oopDesc::set_bs(_barrier_set);
 }
 
 void CollectedHeap::pre_initialize() {
-  // Used for ReduceInitialCardMarks (when COMPILER2 is used);
-  // otherwise remains unused.
-#if defined(COMPILER2) || INCLUDE_JVMCI
-  _defer_initial_card_mark = is_server_compilation_mode_vm() &&  ReduceInitialCardMarks && can_elide_tlab_store_barriers()
-                             && (DeferInitialCardMark || card_mark_must_follow_store());
-#else
-  assert(_defer_initial_card_mark == false, "Who would set it?");
-#endif
 }
 
 #ifndef PRODUCT
 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
   if (CheckMemoryInitialization && ZapUnusedHeapArea) {

@@ -337,33 +330,10 @@
   }
   thread->tlab().fill(obj, obj + size, new_tlab_size);
   return obj;
 }
 
-void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
-  MemRegion deferred = thread->deferred_card_mark();
-  if (!deferred.is_empty()) {
-    assert(_defer_initial_card_mark, "Otherwise should be empty");
-    {
-      // Verify that the storage points to a parsable object in heap
-      DEBUG_ONLY(oop old_obj = oop(deferred.start());)
-      assert(is_in(old_obj), "Not in allocated heap");
-      assert(!can_elide_initializing_store_barrier(old_obj),
-             "Else should have been filtered in new_store_pre_barrier()");
-      assert(old_obj->is_oop(true), "Not an oop");
-      assert(deferred.word_size() == (size_t)(old_obj->size()),
-             "Mismatch: multiple objects?");
-    }
-    BarrierSet* bs = barrier_set();
-    assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
-    bs->write_region(deferred);
-    // "Clear" the deferred_card_mark field
-    thread->set_deferred_card_mark(MemRegion());
-  }
-  assert(thread->deferred_card_mark().is_empty(), "invariant");
-}
-
 size_t CollectedHeap::max_tlab_size() const {
   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
   // This restriction could be removed by enabling filling with multiple arrays.
   // If we compute that the reasonable way as
   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)

@@ -375,77 +345,10 @@
               sizeof(jint) *
               ((juint) max_jint / (size_t) HeapWordSize);
   return align_size_down(max_int_size, MinObjAlignment);
 }
 
-// Helper for ReduceInitialCardMarks. For performance,
-// compiled code may elide card-marks for initializing stores
-// to a newly allocated object along the fast-path. We
-// compensate for such elided card-marks as follows:
-// (a) Generational, non-concurrent collectors, such as
-//     GenCollectedHeap(ParNew,DefNew,Tenured) and
-//     ParallelScavengeHeap(ParallelGC, ParallelOldGC)
-//     need the card-mark if and only if the region is
-//     in the old gen, and do not care if the card-mark
-//     succeeds or precedes the initializing stores themselves,
-//     so long as the card-mark is completed before the next
-//     scavenge. For all these cases, we can do a card mark
-//     at the point at which we do a slow path allocation
-//     in the old gen, i.e. in this call.
-// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
-//     in addition that the card-mark for an old gen allocated
-//     object strictly follow any associated initializing stores.
-//     In these cases, the memRegion remembered below is
-//     used to card-mark the entire region either just before the next
-//     slow-path allocation by this thread or just before the next scavenge or
-//     CMS-associated safepoint, whichever of these events happens first.
-//     (The implicit assumption is that the object has been fully
-//     initialized by this point, a fact that we assert when doing the
-//     card-mark.)
-// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
-//     G1 concurrent marking is in progress an SATB (pre-write-)barrier
-//     is used to remember the pre-value of any store. Initializing
-//     stores will not need this barrier, so we need not worry about
-//     compensating for the missing pre-barrier here. Turning now
-//     to the post-barrier, we note that G1 needs a RS update barrier
-//     which simply enqueues a (sequence of) dirty cards which may
-//     optionally be refined by the concurrent update threads. Note
-//     that this barrier need only be applied to a non-young write,
-//     but, like in CMS, because of the presence of concurrent refinement
-//     (much like CMS' precleaning), must strictly follow the oop-store.
-//     Thus, using the same protocol for maintaining the intended
-//     invariants turns out, serendepitously, to be the same for both
-//     G1 and CMS.
-//
-// For any future collector, this code should be reexamined with
-// that specific collector in mind, and the documentation above suitably
-// extended and updated.
-oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
-  // If a previous card-mark was deferred, flush it now.
-  flush_deferred_store_barrier(thread);
-  if (can_elide_initializing_store_barrier(new_obj) ||
-      new_obj->is_typeArray()) {
-    // Arrays of non-references don't need a pre-barrier.
-    // The deferred_card_mark region should be empty
-    // following the flush above.
-    assert(thread->deferred_card_mark().is_empty(), "Error");
-  } else {
-    MemRegion mr((HeapWord*)new_obj, new_obj->size());
-    assert(!mr.is_empty(), "Error");
-    if (_defer_initial_card_mark) {
-      // Defer the card mark
-      thread->set_deferred_card_mark(mr);
-    } else {
-      // Do the card mark
-      BarrierSet* bs = barrier_set();
-      assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
-      bs->write_region(mr);
-    }
-  }
-  return new_obj;
-}
-
 size_t CollectedHeap::filler_array_hdr_size() {
   return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
 }
 
 size_t CollectedHeap::filler_array_min_size() {

@@ -544,27 +447,19 @@
          !is_init_completed(),
          "Should only be called at a safepoint or at start-up"
          " otherwise concurrent mutator activity may make heap "
          " unparsable again");
   const bool use_tlab = UseTLAB;
-  const bool deferred = _defer_initial_card_mark;
   // The main thread starts allocating via a TLAB even before it
   // has added itself to the threads list at vm boot-up.
   assert(!use_tlab || Threads::first() != NULL,
          "Attempt to fill tlabs before main thread has been added"
          " to threads list is doomed to failure!");
+  BarrierSet *bs = barrier_set();
   for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
      if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
-#if defined(COMPILER2) || INCLUDE_JVMCI
-     // The deferred store barriers must all have been flushed to the
-     // card-table (or other remembered set structure) before GC starts
-     // processing the card-table (or other remembered set).
-     if (deferred) flush_deferred_store_barrier(thread);
-#else
-     assert(!deferred, "Should be false");
-     assert(thread->deferred_card_mark().is_empty(), "Should be empty");
-#endif
+     bs->make_parsable(thread);
   }
 }
 
 void CollectedHeap::accumulate_statistics_all_tlabs() {
   if (UseTLAB) {

@@ -615,5 +510,9 @@
   // temporarily think something is in the heap.  (Seen this happen in asserts.)
   _reserved.set_word_size(0);
   _reserved.set_start(start);
   _reserved.set_end(end);
 }
+
+void CollectedHeap::verify_nmethod_roots(nmethod* nmethod) {
+  nmethod->verify_scavenge_root_oops();
+}
< prev index next >