1 /*
   2  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "memory/compactingPermGenGen.hpp"
  29 #include "memory/filemap.hpp"
  30 #include "memory/genOopClosures.inline.hpp"
  31 #include "memory/generation.inline.hpp"
  32 #include "memory/generationSpec.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/java.hpp"
  35 #ifndef SERIALGC
  36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  37 #endif
  38 
  39 
  40 // An ObjectClosure helper: Recursively adjust all pointers in an object
  41 // and all objects by referenced it. Clear marks on objects in order to
  42 // prevent visiting any object twice. This helper is used when the
  43 // RedefineClasses() API has been called.
  44 
  45 class AdjustSharedObjectClosure : public ObjectClosure {
  46 public:
  47   void do_object(oop obj) {
  48     if (obj->is_shared_readwrite()) {
  49       if (obj->mark()->is_marked()) {
  50         obj->init_mark();         // Don't revisit this object.
  51         obj->adjust_pointers();   // Adjust this object's references.
  52       }
  53     }
  54   }
  55 };
  56 
  57 
  58 // An OopClosure helper: Recursively adjust all pointers in an object
  59 // and all objects by referenced it. Clear marks on objects in order
  60 // to prevent visiting any object twice.
  61 
  62 class RecursiveAdjustSharedObjectClosure : public OopClosure {
  63  protected:
  64   template <class T> inline void do_oop_work(T* p) {
  65     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
  66     if (obj->is_shared_readwrite()) {
  67       if (obj->mark()->is_marked()) {
  68         obj->init_mark();         // Don't revisit this object.
  69         obj->oop_iterate(this);   // Recurse - adjust objects referenced.
  70         obj->adjust_pointers();   // Adjust this object's references.
  71 
  72         // Special case: if a class has a read-only constant pool,
  73         // then the read-write objects referenced by the pool must
  74         // have their marks reset.
  75 
  76         if (obj->klass() == Universe::instanceKlassKlassObj()) {
  77           instanceKlass* ik = instanceKlass::cast((klassOop)obj);
  78           constantPoolOop cp = ik->constants();
  79           if (cp->is_shared_readonly()) {
  80             cp->oop_iterate(this);
  81           }
  82         }
  83       }
  84     }
  85   }
  86  public:
  87   virtual void do_oop(oop* p)       { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
  88   virtual void do_oop(narrowOop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
  89 };
  90 
  91 
  92 // We need to go through all placeholders in the system dictionary and
  93 // try to resolve them into shared classes. Other threads might be in
  94 // the process of loading a shared class and have strong roots on
  95 // their stack to the class without having added the class to the
  96 // dictionary yet. This means the class will be marked during phase 1
  97 // but will not be unmarked during the application of the
  98 // RecursiveAdjustSharedObjectClosure to the SystemDictionary. Note
  99 // that we must not call find_shared_class with non-read-only symbols
 100 // as doing so can cause hash codes to be computed, destroying
 101 // forwarding pointers.
 102 class TraversePlaceholdersClosure : public OopClosure {
 103  protected:
 104   template <class T> inline void do_oop_work(T* p) {
 105     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
 106     if (obj->klass() == Universe::symbolKlassObj() &&
 107         obj->is_shared_readonly()) {
 108       symbolHandle sym((symbolOop) obj);
 109       oop k = SystemDictionary::find_shared_class(sym);
 110       if (k != NULL) {
 111         RecursiveAdjustSharedObjectClosure clo;
 112         clo.do_oop(&k);
 113       }
 114     }
 115   }
 116  public:
 117   virtual void do_oop(oop* p)       { TraversePlaceholdersClosure::do_oop_work(p); }
 118   virtual void do_oop(narrowOop* p) { TraversePlaceholdersClosure::do_oop_work(p); }
 119 
 120 };
 121 
 122 
 123 void CompactingPermGenGen::initialize_performance_counters() {
 124 
 125   const char* gen_name = "perm";
 126 
 127   // Generation Counters - generation 2, 1 subspace
 128   _gen_counters = new GenerationCounters(gen_name, 2, 1, &_virtual_space);
 129 
 130   _space_counters = new CSpaceCounters(gen_name, 0,
 131                                        _virtual_space.reserved_size(),
 132                                       _the_space, _gen_counters);
 133 }
 134 
 135 void CompactingPermGenGen::update_counters() {
 136   if (UsePerfData) {
 137     _space_counters->update_all();
 138     _gen_counters->update_all();
 139   }
 140 }
 141 
 142 
 143 CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs,
 144                                            ReservedSpace shared_rs,
 145                                            size_t initial_byte_size,
 146                                            int level, GenRemSet* remset,
 147                                            ContiguousSpace* space,
 148                                            PermanentGenerationSpec* spec_) :
 149   OneContigSpaceCardGeneration(rs, initial_byte_size, MinPermHeapExpansion,
 150                                level, remset, space) {
 151 
 152   set_spec(spec_);
 153   if (!UseSharedSpaces && !DumpSharedSpaces) {
 154     spec()->disable_sharing();
 155   }
 156 
 157   // Break virtual space into address ranges for all spaces.
 158 
 159   if (spec()->enable_shared_spaces()) {
 160     shared_end = (HeapWord*)(shared_rs.base() + shared_rs.size());
 161       misccode_end = shared_end;
 162       misccode_bottom = misccode_end - heap_word_size(spec()->misc_code_size());
 163       miscdata_end = misccode_bottom;
 164       miscdata_bottom = miscdata_end - heap_word_size(spec()->misc_data_size());
 165       readwrite_end = miscdata_bottom;
 166       readwrite_bottom =
 167         readwrite_end - heap_word_size(spec()->read_write_size());
 168       readonly_end = readwrite_bottom;
 169       readonly_bottom =
 170         readonly_end - heap_word_size(spec()->read_only_size());
 171     shared_bottom = readonly_bottom;
 172     unshared_end = shared_bottom;
 173     assert((char*)shared_bottom == shared_rs.base(), "shared space mismatch");
 174   } else {
 175     shared_end = (HeapWord*)(rs.base() + rs.size());
 176       misccode_end = shared_end;
 177       misccode_bottom = shared_end;
 178       miscdata_end = shared_end;
 179       miscdata_bottom = shared_end;
 180       readwrite_end = shared_end;
 181       readwrite_bottom = shared_end;
 182       readonly_end = shared_end;
 183       readonly_bottom = shared_end;
 184     shared_bottom = shared_end;
 185     unshared_end = shared_bottom;
 186   }
 187   unshared_bottom = (HeapWord*) rs.base();
 188 
 189   // Verify shared and unshared spaces adjacent.
 190   assert((char*)shared_bottom == rs.base()+rs.size(), "shared space mismatch");
 191   assert(unshared_end > unshared_bottom, "shared space mismatch");
 192 
 193   // Split reserved memory into pieces.
 194 
 195   ReservedSpace ro_rs   = shared_rs.first_part(spec()->read_only_size(),
 196                                               UseSharedSpaces);
 197   ReservedSpace tmp_rs1 = shared_rs.last_part(spec()->read_only_size());
 198   ReservedSpace rw_rs   = tmp_rs1.first_part(spec()->read_write_size(),
 199                                              UseSharedSpaces);
 200   ReservedSpace tmp_rs2 = tmp_rs1.last_part(spec()->read_write_size());
 201   ReservedSpace md_rs   = tmp_rs2.first_part(spec()->misc_data_size(),
 202                                              UseSharedSpaces);
 203   ReservedSpace mc_rs   = tmp_rs2.last_part(spec()->misc_data_size());
 204 
 205   _shared_space_size = spec()->read_only_size()
 206                      + spec()->read_write_size()
 207                      + spec()->misc_data_size()
 208                      + spec()->misc_code_size();
 209 
 210   // Allocate the unshared (default) space.
 211   _the_space = new ContigPermSpace(_bts,
 212                MemRegion(unshared_bottom, heap_word_size(initial_byte_size)));
 213   if (_the_space == NULL)
 214     vm_exit_during_initialization("Could not allocate an unshared"
 215                                   " CompactingPermGen Space");
 216 
 217   // Allocate shared spaces
 218   if (spec()->enable_shared_spaces()) {
 219 
 220     // If mapping a shared file, the space is not committed, don't
 221     // mangle.
 222     NOT_PRODUCT(bool old_ZapUnusedHeapArea = ZapUnusedHeapArea;)
 223     NOT_PRODUCT(if (UseSharedSpaces) ZapUnusedHeapArea = false;)
 224 
 225     // Commit the memory behind the shared spaces if dumping (not
 226     // mapping).
 227     if (DumpSharedSpaces) {
 228       _ro_vs.initialize(ro_rs, spec()->read_only_size());
 229       _rw_vs.initialize(rw_rs, spec()->read_write_size());
 230       _md_vs.initialize(md_rs, spec()->misc_data_size());
 231       _mc_vs.initialize(mc_rs, spec()->misc_code_size());
 232     }
 233 
 234     // Allocate the shared spaces.
 235     _ro_bts = new BlockOffsetSharedArray(
 236                   MemRegion(readonly_bottom,
 237                             heap_word_size(spec()->read_only_size())),
 238                   heap_word_size(spec()->read_only_size()));
 239     _ro_space = new OffsetTableContigSpace(_ro_bts,
 240                   MemRegion(readonly_bottom, readonly_end));
 241     _rw_bts = new BlockOffsetSharedArray(
 242                   MemRegion(readwrite_bottom,
 243                             heap_word_size(spec()->read_write_size())),
 244                   heap_word_size(spec()->read_write_size()));
 245     _rw_space = new OffsetTableContigSpace(_rw_bts,
 246                   MemRegion(readwrite_bottom, readwrite_end));
 247 
 248     // Restore mangling flag.
 249     NOT_PRODUCT(ZapUnusedHeapArea = old_ZapUnusedHeapArea;)
 250 
 251     if (_ro_space == NULL || _rw_space == NULL)
 252       vm_exit_during_initialization("Could not allocate a shared space");
 253 
 254     // Cover both shared spaces entirely with cards.
 255     _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end));
 256 
 257     if (UseSharedSpaces) {
 258 
 259       // Map in the regions in the shared file.
 260       FileMapInfo* mapinfo = FileMapInfo::current_info();
 261       size_t image_alignment = mapinfo->alignment();
 262       CollectedHeap* ch = Universe::heap();
 263       if ((!mapinfo->map_space(ro, ro_rs, _ro_space)) ||
 264           (!mapinfo->map_space(rw, rw_rs, _rw_space)) ||
 265           (!mapinfo->map_space(md, md_rs, NULL))      ||
 266           (!mapinfo->map_space(mc, mc_rs, NULL))      ||
 267           // check the alignment constraints
 268           (ch == NULL || ch->kind() != CollectedHeap::GenCollectedHeap ||
 269            image_alignment !=
 270            ((GenCollectedHeap*)ch)->gen_policy()->max_alignment())) {
 271         // Base addresses didn't match; skip sharing, but continue
 272         shared_rs.release();
 273         spec()->disable_sharing();
 274         // If -Xshare:on is specified, print out the error message and exit VM,
 275         // otherwise, set UseSharedSpaces to false and continue.
 276         if (RequireSharedSpaces) {
 277           vm_exit_during_initialization("Unable to use shared archive.", NULL);
 278         } else {
 279           FLAG_SET_DEFAULT(UseSharedSpaces, false);
 280         }
 281 
 282         // Note: freeing the block offset array objects does not
 283         // currently free up the underlying storage.
 284         delete _ro_bts;
 285         _ro_bts = NULL;
 286         delete _ro_space;
 287         _ro_space = NULL;
 288         delete _rw_bts;
 289         _rw_bts = NULL;
 290         delete _rw_space;
 291         _rw_space = NULL;
 292         shared_end = (HeapWord*)(rs.base() + rs.size());
 293         _rs->resize_covered_region(MemRegion(shared_bottom, shared_bottom));
 294       }
 295     }
 296 
 297     // Reserved region includes shared spaces for oop.is_in_reserved().
 298     _reserved.set_end(shared_end);
 299 
 300   } else {
 301     _ro_space = NULL;
 302     _rw_space = NULL;
 303   }
 304 }
 305 
 306 
 307 // Do a complete scan of the shared read write space to catch all
 308 // objects which contain references to any younger generation.  Forward
 309 // the pointers.  Avoid space_iterate, as actually visiting all the
 310 // objects in the space will page in more objects than we need.
 311 // Instead, use the system dictionary as strong roots into the read
 312 // write space.
 313 //
 314 // If a RedefineClasses() call has been made, then we have to iterate
 315 // over the entire shared read-write space in order to find all the
 316 // objects that need to be forwarded. For example, it is possible for
 317 // an nmethod to be found and marked in GC phase-1 only for the nmethod
 318 // to be freed by the time we reach GC phase-3. The underlying method
 319 // is still marked, but we can't (easily) find it in GC phase-3 so we
 320 // blow up in GC phase-4. With RedefineClasses() we want replaced code
 321 // (EMCP or obsolete) to go away (i.e., be collectible) once it is no
 322 // longer being executed by any thread so we keep minimal attachments
 323 // to the replaced code. However, we can't guarantee when those EMCP
 324 // or obsolete methods will be collected so they may still be out there
 325 // even after we've severed our minimal attachments.
 326 
 327 void CompactingPermGenGen::pre_adjust_pointers() {
 328   if (spec()->enable_shared_spaces()) {
 329     if (JvmtiExport::has_redefined_a_class()) {
 330       // RedefineClasses() requires a brute force approach
 331       AdjustSharedObjectClosure blk;
 332       rw_space()->object_iterate(&blk);
 333     } else {
 334       RecursiveAdjustSharedObjectClosure blk;
 335       Universe::oops_do(&blk);
 336       StringTable::oops_do(&blk);
 337       SystemDictionary::always_strong_classes_do(&blk);
 338       TraversePlaceholdersClosure tpc;
 339       SystemDictionary::placeholders_do(&tpc);
 340     }
 341   }
 342 }
 343 
 344 
 345 #ifdef ASSERT
 346 class VerifyMarksClearedClosure : public ObjectClosure {
 347 public:
 348   void do_object(oop obj) {
 349     assert(SharedSkipVerify || !obj->mark()->is_marked(),
 350            "Shared oop still marked?");
 351   }
 352 };
 353 #endif
 354 
 355 
 356 void CompactingPermGenGen::post_compact() {
 357 #ifdef ASSERT
 358   if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
 359     VerifyMarksClearedClosure blk;
 360     rw_space()->object_iterate(&blk);
 361   }
 362 #endif
 363 }
 364 
 365 
 366 // Do not use in time-critical operations due to the possibility of paging
 367 // in otherwise untouched or previously unread portions of the perm gen,
 368 // for instance, the shared spaces. NOTE: Because CompactingPermGenGen
 369 // derives from OneContigSpaceCardGeneration which is supposed to have a
 370 // single space, and does not override its object_iterate() method,
 371 // object iteration via that interface does not look at the objects in
 372 // the shared spaces when using CDS. This should be fixed; see CR 6897798.
 373 void CompactingPermGenGen::space_iterate(SpaceClosure* blk, bool usedOnly) {
 374   OneContigSpaceCardGeneration::space_iterate(blk, usedOnly);
 375   if (spec()->enable_shared_spaces()) {
 376     // Making the rw_space walkable will page in the entire space, and
 377     // is to be avoided in the case of time-critical operations.
 378     // However, this is required for Verify and heap dump operations.
 379     blk->do_space(ro_space());
 380     blk->do_space(rw_space());
 381   }
 382 }
 383 
 384 
 385 void CompactingPermGenGen::print_on(outputStream* st) const {
 386   OneContigSpaceCardGeneration::print_on(st);
 387   if (spec()->enable_shared_spaces()) {
 388     st->print("    ro");
 389     ro_space()->print_on(st);
 390     st->print("    rw");
 391     rw_space()->print_on(st);
 392   } else {
 393     st->print_cr("No shared spaces configured.");
 394   }
 395 }
 396 
 397 
 398 // References from the perm gen to the younger generation objects may
 399 // occur in static fields in Java classes or in constant pool references
 400 // to String objects.
 401 
 402 void CompactingPermGenGen::younger_refs_iterate(OopsInGenClosure* blk) {
 403   OneContigSpaceCardGeneration::younger_refs_iterate(blk);
 404   if (spec()->enable_shared_spaces()) {
 405     blk->set_generation(this);
 406     // ro_space has no younger gen refs.
 407     _rs->younger_refs_in_space_iterate(rw_space(), blk);
 408     blk->reset_generation();
 409   }
 410 }
 411 
 412 
 413 // Shared spaces are addressed in pre_adjust_pointers.
 414 void CompactingPermGenGen::adjust_pointers() {
 415   the_space()->adjust_pointers();
 416 }
 417 
 418 
 419 void CompactingPermGenGen::compact() {
 420   the_space()->compact();
 421 }
 422 
 423 
 424 size_t CompactingPermGenGen::contiguous_available() const {
 425   // Don't include shared spaces.
 426   return OneContigSpaceCardGeneration::contiguous_available()
 427          - _shared_space_size;
 428 }
 429 
 430 size_t CompactingPermGenGen::max_capacity() const {
 431   // Don't include shared spaces.
 432   assert(UseSharedSpaces || (_shared_space_size == 0),
 433     "If not used, the size of shared spaces should be 0");
 434   return OneContigSpaceCardGeneration::max_capacity()
 435           - _shared_space_size;
 436 }
 437 
 438 
 439 // No young generation references, clear this generation's main space's
 440 // card table entries.  Do NOT clear the card table entries for the
 441 // read-only space (always clear) or the read-write space (valuable
 442 // information).
 443 
 444 void CompactingPermGenGen::clear_remembered_set() {
 445   _rs->clear(MemRegion(the_space()->bottom(), the_space()->end()));
 446 }
 447 
 448 
 449 // Objects in this generation's main space may have moved, invalidate
 450 // that space's cards.  Do NOT invalidate the card table entries for the
 451 // read-only or read-write spaces, as those objects never move.
 452 
 453 void CompactingPermGenGen::invalidate_remembered_set() {
 454   _rs->invalidate(used_region());
 455 }
 456 
 457 
 458 void CompactingPermGenGen::verify(bool allow_dirty) {
 459   the_space()->verify(allow_dirty);
 460   if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
 461     ro_space()->verify(allow_dirty);
 462     rw_space()->verify(allow_dirty);
 463   }
 464 }
 465 
 466 
 467 HeapWord* CompactingPermGenGen::unshared_bottom;
 468 HeapWord* CompactingPermGenGen::unshared_end;
 469 HeapWord* CompactingPermGenGen::shared_bottom;
 470 HeapWord* CompactingPermGenGen::shared_end;
 471 HeapWord* CompactingPermGenGen::readonly_bottom;
 472 HeapWord* CompactingPermGenGen::readonly_end;
 473 HeapWord* CompactingPermGenGen::readwrite_bottom;
 474 HeapWord* CompactingPermGenGen::readwrite_end;
 475 HeapWord* CompactingPermGenGen::miscdata_bottom;
 476 HeapWord* CompactingPermGenGen::miscdata_end;
 477 HeapWord* CompactingPermGenGen::misccode_bottom;
 478 HeapWord* CompactingPermGenGen::misccode_end;
 479 
 480 // JVM/TI RedefineClasses() support:
 481 bool CompactingPermGenGen::remap_shared_readonly_as_readwrite() {
 482   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 483 
 484   if (UseSharedSpaces) {
 485     // remap the shared readonly space to shared readwrite, private
 486     FileMapInfo* mapinfo = FileMapInfo::current_info();
 487     if (!mapinfo->remap_shared_readonly_as_readwrite()) {
 488       return false;
 489     }
 490   }
 491   return true;
 492 }
 493 
 494 void** CompactingPermGenGen::_vtbl_list;