1 /*
   2  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_COMPACTINGPERMGENGEN_HPP
  26 #define SHARE_VM_MEMORY_COMPACTINGPERMGENGEN_HPP
  27 
  28 #include "gc_implementation/shared/generationCounters.hpp"
  29 #include "memory/space.hpp"
  30 
  31 // All heaps contains a "permanent generation," containing permanent
  32 // (reflective) objects.  This is like a regular generation in some ways,
  33 // but unlike one in others, and so is split apart.
  34 
  35 class PermanentGenerationSpec;
  36 
  37 // This is the "generation" view of a CompactingPermGen.
  38 // NOTE: the shared spaces used for CDS are here handled in
  39 // a somewhat awkward and potentially buggy fashion, see CR 6801625.
  40 // This infelicity should be fixed, see CR 6897789.
  41 class CompactingPermGenGen: public OneContigSpaceCardGeneration {
  42   friend class VMStructs;
  43   // Abstractly, this is a subtype that gets access to protected fields.
  44   friend class CompactingPermGen;
  45 
  46 private:
  47   // Shared spaces
  48   PermanentGenerationSpec* _spec;
  49   size_t _shared_space_size;
  50   VirtualSpace _ro_vs;
  51   VirtualSpace _rw_vs;
  52   VirtualSpace _md_vs;
  53   VirtualSpace _mc_vs;
  54   BlockOffsetSharedArray* _ro_bts;
  55   BlockOffsetSharedArray* _rw_bts;
  56   OffsetTableContigSpace* _ro_space;
  57   OffsetTableContigSpace* _rw_space;
  58 
  59   // With shared spaces there is a dichotomy in the use of the
  60   // _virtual_space of the generation.  There is a portion of the
  61   // _virtual_space that is used for the unshared part of the
  62   // permanent generation and a portion that is reserved for the shared part.
  63   // The _reserved field in the generation represents both the
  64   // unshared and shared parts of the generation.  The _reserved
  65   // variable is initialized for only the unshared part but is
  66   // later extended to include the shared part during initialization
  67   // if shared spaces are being used.
  68   // The reserved size for the _virtual_space for CompactingPermGenGen
  69   // is the size of the space for the permanent generation including the
  70   // the shared spaces.  This can be seen by the use of MaxPermSize
  71   // in the allocation of PermanentGenerationSpec.  The space for the
  72   // shared spaces is committed separately (???).
  73   // In general at initialization only a part of the
  74   // space for the unshared part of the permanent generation is
  75   // committed and more is committed as the permanent generation is
  76   // grown.  In growing the permanent generation the capacity() and
  77   // max_capacity() of the generation are used.  For the permanent
  78   // generation (implemented with a CompactingPermGenGen) the capacity()
  79   // is taken from the capacity of the space (_the_space variable used for the
  80   // unshared part of the generation) and the max_capacity() is based
  81   // on the size of the _reserved variable (which includes the size of the
  82   // shared spaces) minus the size of the shared spaces.
  83 
  84   // These values are redundant, but are called out separately to avoid
  85   // going through heap/space/gen pointers for performance.
  86   static HeapWord* unshared_bottom;
  87   static HeapWord* unshared_end;
  88   static HeapWord* shared_bottom;
  89   static HeapWord* readonly_bottom;
  90   static HeapWord* readonly_end;
  91   static HeapWord* readwrite_bottom;
  92   static HeapWord* readwrite_end;
  93   static HeapWord* miscdata_bottom;
  94   static HeapWord* miscdata_end;
  95   static HeapWord* misccode_bottom;
  96   static HeapWord* misccode_end;
  97   static HeapWord* shared_end;
  98 
  99   // List of klassOops whose vtbl entries are used to patch others.
 100   static void**        _vtbl_list;
 101 
 102   // Performance Counters
 103   GenerationCounters*  _gen_counters;
 104   CSpaceCounters*      _space_counters;
 105 
 106   void initialize_performance_counters();
 107 
 108 public:
 109 
 110   enum {
 111     vtbl_list_size = 16, // number of entries in the shared space vtable list.
 112     num_virtuals = 200   // number of virtual methods in Klass (or
 113                          // subclass) objects, or greater.
 114   };
 115 
 116   enum {
 117     ro = 0,  // read-only shared space in the heap
 118     rw = 1,  // read-write shared space in the heap
 119     md = 2,  // miscellaneous data for initializing tables, etc.
 120     mc = 3,  // miscellaneous code - vtable replacement.
 121     n_regions = 4
 122   };
 123 
 124   CompactingPermGenGen(ReservedSpace rs, ReservedSpace shared_rs,
 125                        size_t initial_byte_size, int level, GenRemSet* remset,
 126                        ContiguousSpace* space,
 127                        PermanentGenerationSpec* perm_spec);
 128 
 129   const char* name() const {
 130     return "compacting perm gen";
 131   }
 132 
 133   const char* short_name() const {
 134     return "Perm";
 135   }
 136 
 137   // Return the maximum capacity for the object space.  This
 138   // explicitly does not include the shared spaces.
 139   size_t max_capacity() const;
 140 
 141   void update_counters();
 142 
 143   void compute_new_size() {
 144     assert(false, "Should not call this -- handled at PermGen level.");
 145   }
 146 
 147   bool must_be_youngest() const { return false; }
 148   bool must_be_oldest() const { return false; }
 149 
 150   OffsetTableContigSpace* ro_space() const { return _ro_space; }
 151   OffsetTableContigSpace* rw_space() const { return _rw_space; }
 152   VirtualSpace*           md_space()       { return &_md_vs; }
 153   VirtualSpace*           mc_space()       { return &_mc_vs; }
 154   ContiguousSpace* unshared_space() const { return _the_space; }
 155 
 156   static bool inline is_shared(const oopDesc* p) {
 157     return (HeapWord*)p >= shared_bottom && (HeapWord*)p < shared_end;
 158   }
 159   // RedefineClasses note: this tester is used to check residence of
 160   // the specified oop in the shared readonly space and not whether
 161   // the oop is readonly.
 162   static bool inline is_shared_readonly(const oopDesc* p) {
 163     return (HeapWord*)p >= readonly_bottom && (HeapWord*)p < readonly_end;
 164   }
 165   // RedefineClasses note: this tester is used to check residence of
 166   // the specified oop in the shared readwrite space and not whether
 167   // the oop is readwrite.
 168   static bool inline is_shared_readwrite(const oopDesc* p) {
 169     return (HeapWord*)p >= readwrite_bottom && (HeapWord*)p < readwrite_end;
 170   }
 171 
 172   bool is_in_unshared(const void* p) const {
 173     return OneContigSpaceCardGeneration::is_in(p);
 174   }
 175 
 176   bool is_in_shared(const void* p) const {
 177    return p >= shared_bottom && p < shared_end;
 178    }
 179 
 180   inline bool is_in(const void* p) const {
 181     return is_in_unshared(p) || is_in_shared(p);
 182   }
 183 
 184   inline PermanentGenerationSpec* spec() const { return _spec; }
 185   inline void set_spec(PermanentGenerationSpec* spec) { _spec = spec; }
 186 
 187   void pre_adjust_pointers();
 188   void adjust_pointers();
 189   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
 190   void print_on(outputStream* st) const;
 191   void younger_refs_iterate(OopsInGenClosure* blk);
 192   void compact();
 193   void post_compact();
 194   size_t contiguous_available() const;
 195 
 196   void clear_remembered_set();
 197   void invalidate_remembered_set();
 198 
 199   inline bool block_is_obj(const HeapWord* addr) const {
 200     if      (addr < the_space()->top()) return true;
 201     else if (addr < the_space()->end()) return false;
 202     else if (addr < ro_space()->top())  return true;
 203     else if (addr < ro_space()->end())  return false;
 204     else if (addr < rw_space()->top())  return true;
 205     else                                return false;
 206   }
 207 
 208 
 209   inline size_t block_size(const HeapWord* addr) const {
 210     if (addr < the_space()->top()) {
 211       return oop(addr)->size();
 212     }
 213     else if (addr < the_space()->end()) {
 214       assert(addr == the_space()->top(), "non-block head arg to block_size");
 215       return the_space()->end() - the_space()->top();
 216     }
 217 
 218     else if (addr < ro_space()->top()) {
 219       return oop(addr)->size();
 220     }
 221     else if (addr < ro_space()->end()) {
 222       assert(addr == ro_space()->top(), "non-block head arg to block_size");
 223       return ro_space()->end() - ro_space()->top();
 224     }
 225 
 226     else if (addr < rw_space()->top()) {
 227       return oop(addr)->size();
 228     }
 229     else {
 230       assert(addr == rw_space()->top(), "non-block head arg to block_size");
 231       return rw_space()->end() - rw_space()->top();
 232     }
 233   }
 234 
 235   static void generate_vtable_methods(void** vtbl_list,
 236                                       void** vtable,
 237                                       char** md_top, char* md_end,
 238                                       char** mc_top, char* mc_end);
 239 
 240   void verify(bool allow_dirty);
 241 
 242   // Serialization
 243   static void initialize_oops() KERNEL_RETURN;
 244   static void serialize_oops(SerializeOopClosure* soc);
 245   void serialize_bts(SerializeOopClosure* soc);
 246 
 247   // Initiate dumping of shared file.
 248   static jint dump_shared(GrowableArray<oop>* class_promote_order, TRAPS);
 249 
 250   // JVM/TI RedefineClasses() support:
 251   // Remap the shared readonly space to shared readwrite, private if
 252   // sharing is enabled. Simply returns true if sharing is not enabled
 253   // or if the remapping has already been done by a prior call.
 254   static bool remap_shared_readonly_as_readwrite();
 255 };
 256 
 257 #endif // SHARE_VM_MEMORY_COMPACTINGPERMGENGEN_HPP