src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp

Print this page
rev 5920 : 8035406: Improve data structure for Code Cache remembered sets
Summary: Change the code cache remembered sets data structure from a GrowableArray to a chunked list of nmethods. This makes the data structure more amenable to parallelization, and decreases freeing time.
Reviewed-by:

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -23,10 +23,11 @@
  */
 
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
 
+#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
 #include "gc_implementation/g1/sparsePRT.hpp"
 
 // Remembered set for a heap region.  Represent a set of "cards" that
 // contain pointers into the owner heap region.  Cards are defined somewhat
 // abstractly, in terms of what the "BlockOffsetTable" in use can parse.

@@ -70,11 +71,11 @@
 
 class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
   friend class HeapRegionRemSetIterator;
 
   G1CollectedHeap* _g1h;
-  Mutex            _m;
+  Mutex*           _m;
   HeapRegion*      _hr;
 
   // These are protected by "_m".
   BitMap      _coarse_map;
   size_t      _n_coarse_entries;

@@ -127,11 +128,11 @@
   void link_to_all(PerRegionTable * prt);
   // unlink/remove the given fine grain remembered set into the "all" list
   void unlink_from_all(PerRegionTable * prt);
 
 public:
-  OtherRegionsTable(HeapRegion* hr);
+  OtherRegionsTable(HeapRegion* hr, Mutex* m);
 
   HeapRegion* hr() const { return _hr; }
 
   // For now.  Could "expand" some tables in the future, so that this made
   // sense.

@@ -139,11 +140,10 @@
 
   // Removes any entries shown by the given bitmaps to contain only dead
   // objects.
   void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
 
-  // Not const because it takes a lock.
   size_t occupied() const;
   size_t occ_fine() const;
   size_t occ_coarse() const;
   size_t occ_sparse() const;
 

@@ -190,13 +190,15 @@
 
 private:
   G1BlockOffsetSharedArray* _bosa;
   G1BlockOffsetSharedArray* bosa() const { return _bosa; }
 
-  // A list of code blobs (nmethods) whose code contains pointers into
+  // A set of code blobs (nmethods) whose code contains pointers into
   // the region that owns this RSet.
-  GrowableArray<nmethod*>* _strong_code_roots_list;
+  G1CodeRootSet _code_roots;
+
+  Mutex _m;
 
   OtherRegionsTable _other_regions;
 
   enum ParIterState { Unclaimed, Claimed, Complete };
   volatile ParIterState _iter_state;

@@ -216,21 +218,24 @@
   static int          _n_recorded_events;
 
   static void print_event(outputStream* str, Event evnt);
 
 public:
-  HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
-                   HeapRegion* hr);
+  HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegion* hr);
 
   static int num_par_rem_sets();
   static void setup_remset_size();
 
   HeapRegion* hr() const {
     return _other_regions.hr();
   }
 
-  size_t occupied() const {
+  size_t occupied() {
+    MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
+    return occupied_locked();
+  }
+  size_t occupied_locked() {
     return _other_regions.occupied();
   }
   size_t occ_fine() const {
     return _other_regions.occ_fine();
   }

@@ -258,10 +263,11 @@
   void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
 
   // The region is being reclaimed; clear its remset, and any mention of
   // entries for this region in other remsets.
   void clear();
+  void clear_locked();
 
   // Attempt to claim the region.  Returns true iff this call caused an
   // atomic transition from Unclaimed to Claimed.
   bool claim_iter();
   // Sets the iteration state to "complete".

@@ -287,27 +293,28 @@
   }
 
   // The actual # of bytes this hr_remset takes up.
   // Note also includes the strong code root set.
   size_t mem_size() {
+    MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
     return _other_regions.mem_size()
       // This correction is necessary because the above includes the second
       // part.
       + (sizeof(this) - sizeof(OtherRegionsTable))
       + strong_code_roots_mem_size();
   }
 
   // Returns the memory occupancy of all static data structures associated
   // with remembered sets.
   static size_t static_mem_size() {
-    return OtherRegionsTable::static_mem_size();
+    return OtherRegionsTable::static_mem_size() + G1CodeRootSet::static_mem_size();
   }
 
   // Returns the memory occupancy of all free_list data structures associated
   // with remembered sets.
   static size_t fl_mem_size() {
-    return OtherRegionsTable::fl_mem_size();
+    return OtherRegionsTable::fl_mem_size() + G1CodeRootSet::fl_mem_size();
   }
 
   bool contains_reference(OopOrNarrowOopStar from) const {
     return _other_regions.contains_reference(from);
   }

@@ -326,32 +333,33 @@
   // Applies blk->do_code_blob() to each of the entries in
   // the strong code roots list
   void strong_code_roots_do(CodeBlobClosure* blk) const;
 
   // Returns the number of elements in the strong code roots list
-  int strong_code_roots_list_length() {
-    return _strong_code_roots_list->length();
+  size_t strong_code_roots_list_length() {
+    return _code_roots.length();
   }
 
   // Returns true if the strong code roots contains the given
   // nmethod.
   bool strong_code_roots_list_contains(nmethod* nm) {
-    return _strong_code_roots_list->contains(nm);
+    return _code_roots.contains(nm);
   }
 
   // Returns the amount of memory, in bytes, currently
   // consumed by the strong code roots.
   size_t strong_code_roots_mem_size();
 
-  void print() const;
+  void print() PRODUCT_RETURN;
 
   // Called during a stop-world phase to perform any deferred cleanups.
   static void cleanup();
 
   // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
   // (Uses it to initialize from_card_cache).
   static void init_heap(uint max_regions) {
+    G1CodeRootSet::initialize();
     OtherRegionsTable::init_from_card_cache((size_t) max_regions);
   }
 
   // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
   static void shrink_heap(uint new_n_regs) {

@@ -382,11 +390,11 @@
 };
 
 class HeapRegionRemSetIterator : public StackObj {
 
   // The region RSet over which we're iterating.
-  const HeapRegionRemSet* _hrrs;
+  HeapRegionRemSet* _hrrs;
 
   // Local caching of HRRS fields.
   const BitMap*             _coarse_map;
   PerRegionTable**          _fine_grain_regions;
 

@@ -439,11 +447,11 @@
   bool fine_has_next(size_t& card_index);
 
 public:
   // We require an iterator to be initialized before use, so the
   // constructor does little.
-  HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs);
+  HeapRegionRemSetIterator(HeapRegionRemSet* hrrs);
 
   // If there remains one or more cards to be yielded, returns true and
   // sets "card_index" to one of those cards (which is then considered
   // yielded.)   Otherwise, returns false (and leaves "card_index"
   // undefined.)