1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)compactingPermGenGen.cpp     1.22 08/11/24 12:22:45 JVM"
   3 #endif
   4 /*
   5  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 #include "incls/_precompiled.incl"
  29 #include "incls/_compactingPermGenGen.cpp.incl"
  30 
  31 
  32 // An ObjectClosure helper: Recursively adjust all pointers in an object
  33 // and all objects by referenced it. Clear marks on objects in order to
  34 // prevent visiting any object twice. This helper is used when the
  35 // RedefineClasses() API has been called.
  36 
  37 class AdjustSharedObjectClosure : public ObjectClosure {
  38 public:
  39   void do_object(oop obj) {
  40     if (obj->is_shared_readwrite()) {
  41       if (obj->mark()->is_marked()) {
  42         obj->init_mark();         // Don't revisit this object.
  43         obj->adjust_pointers();   // Adjust this object's references.
  44       }
  45     }
  46   }
  47 };
  48 
  49 
  50 // An OopClosure helper: Recursively adjust all pointers in an object
  51 // and all objects by referenced it. Clear marks on objects in order
  52 // to prevent visiting any object twice.
  53 
  54 class RecursiveAdjustSharedObjectClosure : public OopClosure {
  55 public:
  56   void do_oop(oop* o) {
  57     oop obj = *o;
  58     if (obj->is_shared_readwrite()) {
  59       if (obj->mark()->is_marked()) {
  60         obj->init_mark();         // Don't revisit this object.
  61         obj->oop_iterate(this);   // Recurse - adjust objects referenced.
  62         obj->adjust_pointers();   // Adjust this object's references.
  63 
  64         // Special case: if a class has a read-only constant pool,
  65         // then the read-write objects referenced by the pool must
  66         // have their marks reset.
  67 
  68         if (obj->klass() == Universe::instanceKlassKlassObj()) {
  69           instanceKlass* ik = instanceKlass::cast((klassOop)obj);
  70           constantPoolOop cp = ik->constants();
  71           if (cp->is_shared_readonly()) {
  72             cp->oop_iterate(this);
  73           }
  74         }
  75       }
  76     }
  77   };
  78 };
  79 
  80 
  81 // We need to go through all placeholders in the system dictionary and
  82 // try to resolve them into shared classes. Other threads might be in
  83 // the process of loading a shared class and have strong roots on
  84 // their stack to the class without having added the class to the
  85 // dictionary yet. This means the class will be marked during phase 1
  86 // but will not be unmarked during the application of the
  87 // RecursiveAdjustSharedObjectClosure to the SystemDictionary. Note
  88 // that we must not call find_shared_class with non-read-only symbols
  89 // as doing so can cause hash codes to be computed, destroying
  90 // forwarding pointers.
  91 class TraversePlaceholdersClosure : public OopClosure {
  92  public:
  93   void do_oop(oop* o) {
  94     oop obj = *o;
  95     if (obj->klass() == Universe::symbolKlassObj() &&
  96         obj->is_shared_readonly()) {
  97       symbolHandle sym((symbolOop) obj);
  98       oop k = SystemDictionary::find_shared_class(sym);
  99       if (k != NULL) {
 100         RecursiveAdjustSharedObjectClosure clo;
 101         clo.do_oop(&k);
 102       }
 103     }
 104   }
 105 };
 106 
 107 
 108 void CompactingPermGenGen::initialize_performance_counters() {
 109 
 110   const char* gen_name = "perm";
 111 
 112   // Generation Counters - generation 2, 1 subspace
 113   _gen_counters = new GenerationCounters(gen_name, 2, 1, &_virtual_space);
 114 
 115   _space_counters = new CSpaceCounters(gen_name, 0,
 116                                        _virtual_space.reserved_size(),
 117                                       _the_space, _gen_counters);
 118 }
 119 
 120 void CompactingPermGenGen::update_counters() {
 121   if (UsePerfData) {
 122     _space_counters->update_all();
 123     _gen_counters->update_all();
 124   }
 125 }
 126 
 127 
 128 CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs,
 129                                            ReservedSpace shared_rs,
 130                                            size_t initial_byte_size,
 131                                            int level, GenRemSet* remset,
 132                                            ContiguousSpace* space,
 133                                            PermanentGenerationSpec* spec_) :
 134   OneContigSpaceCardGeneration(rs, initial_byte_size, MinPermHeapExpansion,
 135                                level, remset, space) {
 136 
 137   set_spec(spec_);
 138   if (!UseSharedSpaces && !DumpSharedSpaces) {
 139     spec()->disable_sharing();
 140   }
 141 
 142   // Break virtual space into address ranges for all spaces.
 143 
 144   if (spec()->enable_shared_spaces()) {
 145     shared_end = (HeapWord*)(shared_rs.base() + shared_rs.size());
 146       misccode_end = shared_end;
 147       misccode_bottom = misccode_end - heap_word_size(spec()->misc_code_size());
 148       miscdata_end = misccode_bottom;
 149       miscdata_bottom = miscdata_end - heap_word_size(spec()->misc_data_size());
 150       readwrite_end = miscdata_bottom;
 151       readwrite_bottom =
 152         readwrite_end - heap_word_size(spec()->read_write_size());
 153       readonly_end = readwrite_bottom;
 154       readonly_bottom =
 155         readonly_end - heap_word_size(spec()->read_only_size());
 156     shared_bottom = readonly_bottom;
 157     unshared_end = shared_bottom;
 158     assert((char*)shared_bottom == shared_rs.base(), "shared space mismatch");
 159   } else {
 160     shared_end = (HeapWord*)(rs.base() + rs.size());
 161       misccode_end = shared_end;
 162       misccode_bottom = shared_end;
 163       miscdata_end = shared_end;
 164       miscdata_bottom = shared_end;
 165       readwrite_end = shared_end;
 166       readwrite_bottom = shared_end;
 167       readonly_end = shared_end;
 168       readonly_bottom = shared_end;
 169     shared_bottom = shared_end;
 170     unshared_end = shared_bottom;
 171   }
 172   unshared_bottom = (HeapWord*) rs.base();
 173 
 174   // Verify shared and unshared spaces adjacent.
 175   assert((char*)shared_bottom == rs.base()+rs.size(), "shared space mismatch");
 176   assert(unshared_end > unshared_bottom, "shared space mismatch");
 177 
 178   // Split reserved memory into pieces.
 179 
 180   ReservedSpace ro_rs   = shared_rs.first_part(spec()->read_only_size(),
 181                                               UseSharedSpaces);
 182   ReservedSpace tmp_rs1 = shared_rs.last_part(spec()->read_only_size());
 183   ReservedSpace rw_rs   = tmp_rs1.first_part(spec()->read_write_size(),
 184                                              UseSharedSpaces);
 185   ReservedSpace tmp_rs2 = tmp_rs1.last_part(spec()->read_write_size());
 186   ReservedSpace md_rs   = tmp_rs2.first_part(spec()->misc_data_size(),
 187                                              UseSharedSpaces);
 188   ReservedSpace mc_rs   = tmp_rs2.last_part(spec()->misc_data_size());
 189 
 190   _shared_space_size = spec()->read_only_size()
 191                      + spec()->read_write_size()
 192                      + spec()->misc_data_size()
 193                      + spec()->misc_code_size();
 194 
 195   // Allocate the unshared (default) space.
 196   _the_space = new ContigPermSpace(_bts,
 197                MemRegion(unshared_bottom, heap_word_size(initial_byte_size)));
 198   if (_the_space == NULL)
 199     vm_exit_during_initialization("Could not allocate an unshared"
 200                                   " CompactingPermGen Space");
 201 
 202   // Allocate shared spaces
 203   if (spec()->enable_shared_spaces()) {
 204 
 205     // If mapping a shared file, the space is not committed, don't
 206     // mangle.
 207     NOT_PRODUCT(bool old_ZapUnusedHeapArea = ZapUnusedHeapArea;)
 208     NOT_PRODUCT(if (UseSharedSpaces) ZapUnusedHeapArea = false;)
 209 
 210     // Commit the memory behind the shared spaces if dumping (not
 211     // mapping).
 212     if (DumpSharedSpaces) {
 213       _ro_vs.initialize(ro_rs, spec()->read_only_size());
 214       _rw_vs.initialize(rw_rs, spec()->read_write_size());
 215       _md_vs.initialize(md_rs, spec()->misc_data_size());
 216       _mc_vs.initialize(mc_rs, spec()->misc_code_size());
 217     }
 218 
 219     // Allocate the shared spaces.
 220     _ro_bts = new BlockOffsetSharedArray(
 221                   MemRegion(readonly_bottom,
 222                             heap_word_size(spec()->read_only_size())),
 223                   heap_word_size(spec()->read_only_size()));
 224     _ro_space = new OffsetTableContigSpace(_ro_bts,
 225                   MemRegion(readonly_bottom, readonly_end));
 226     _rw_bts = new BlockOffsetSharedArray(
 227                   MemRegion(readwrite_bottom,
 228                             heap_word_size(spec()->read_write_size())),
 229                   heap_word_size(spec()->read_write_size()));
 230     _rw_space = new OffsetTableContigSpace(_rw_bts,
 231                   MemRegion(readwrite_bottom, readwrite_end));
 232 
 233     // Restore mangling flag.
 234     NOT_PRODUCT(ZapUnusedHeapArea = old_ZapUnusedHeapArea;)
 235 
 236     if (_ro_space == NULL || _rw_space == NULL)
 237       vm_exit_during_initialization("Could not allocate a shared space");
 238 
 239     // Cover both shared spaces entirely with cards.
 240     _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end));
 241 
 242     if (UseSharedSpaces) {
 243 
 244       // Map in the regions in the shared file.
 245       FileMapInfo* mapinfo = FileMapInfo::current_info();
 246       size_t image_alignment = mapinfo->alignment();
 247       CollectedHeap* ch = Universe::heap();
 248       if ((!mapinfo->map_space(ro, ro_rs, _ro_space)) ||
 249           (!mapinfo->map_space(rw, rw_rs, _rw_space)) ||
 250           (!mapinfo->map_space(md, md_rs, NULL))      ||
 251           (!mapinfo->map_space(mc, mc_rs, NULL))      ||
 252           // check the alignment constraints
 253           (ch == NULL || ch->kind() != CollectedHeap::GenCollectedHeap ||
 254            image_alignment != 
 255            ((GenCollectedHeap*)ch)->gen_policy()->max_alignment())) {
 256         // Base addresses didn't match; skip sharing, but continue
 257         shared_rs.release();
 258         spec()->disable_sharing();
 259         // If -Xshare:on is specified, print out the error message and exit VM,
 260         // otherwise, set UseSharedSpaces to false and continue.
 261         if (RequireSharedSpaces) {
 262           vm_exit_during_initialization("Unable to use shared archive.", NULL);
 263         } else {
 264           FLAG_SET_DEFAULT(UseSharedSpaces, false);
 265         }
 266 
 267         // Note: freeing the block offset array objects does not
 268         // currently free up the underlying storage.
 269         delete _ro_bts;
 270         _ro_bts = NULL;
 271         delete _ro_space;
 272         _ro_space = NULL;
 273         delete _rw_bts;
 274         _rw_bts = NULL;
 275         delete _rw_space;
 276         _rw_space = NULL;
 277         shared_end = (HeapWord*)(rs.base() + rs.size());
 278         _rs->resize_covered_region(MemRegion(shared_bottom, shared_bottom));
 279       }
 280     }
 281 
 282     // Reserved region includes shared spaces for oop.is_in_reserved().
 283     _reserved.set_end(shared_end);
 284 
 285   } else {
 286     _ro_space = NULL;
 287     _rw_space = NULL;
 288   }
 289 }
 290 
 291 
 292 // Do a complete scan of the shared read write space to catch all
 293 // objects which contain references to any younger generation.  Forward
 294 // the pointers.  Avoid space_iterate, as actually visiting all the
 295 // objects in the space will page in more objects than we need.
 296 // Instead, use the system dictionary as strong roots into the read
 297 // write space.
 298 //
 299 // If a RedefineClasses() call has been made, then we have to iterate
 300 // over the entire shared read-write space in order to find all the
 301 // objects that need to be forwarded. For example, it is possible for
 302 // an nmethod to be found and marked in GC phase-1 only for the nmethod
 303 // to be freed by the time we reach GC phase-3. The underlying method
 304 // is still marked, but we can't (easily) find it in GC phase-3 so we
 305 // blow up in GC phase-4. With RedefineClasses() we want replaced code
 306 // (EMCP or obsolete) to go away (i.e., be collectible) once it is no
 307 // longer being executed by any thread so we keep minimal attachments
 308 // to the replaced code. However, we can't guarantee when those EMCP
 309 // or obsolete methods will be collected so they may still be out there
 310 // even after we've severed our minimal attachments.
 311 
 312 void CompactingPermGenGen::pre_adjust_pointers() {
 313   if (spec()->enable_shared_spaces()) {
 314     if (JvmtiExport::has_redefined_a_class()) {
 315       // RedefineClasses() requires a brute force approach
 316       AdjustSharedObjectClosure blk;
 317       rw_space()->object_iterate(&blk);
 318     } else {
 319       RecursiveAdjustSharedObjectClosure blk;
 320       Universe::oops_do(&blk);
 321       StringTable::oops_do(&blk);
 322       SystemDictionary::always_strong_classes_do(&blk);
 323       TraversePlaceholdersClosure tpc;
 324       SystemDictionary::placeholders_do(&tpc);
 325     }
 326   }
 327 }
 328 
 329 
 330 #ifdef ASSERT
 331 class VerifyMarksClearedClosure : public ObjectClosure {
 332 public:
 333   void do_object(oop obj) {
 334     assert(SharedSkipVerify || !obj->mark()->is_marked(),
 335            "Shared oop still marked?");
 336   }
 337 };
 338 #endif
 339 
 340 
 341 void CompactingPermGenGen::post_compact() {
 342 #ifdef ASSERT
 343   if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
 344     VerifyMarksClearedClosure blk;
 345     rw_space()->object_iterate(&blk);
 346   }
 347 #endif
 348 }
 349 
 350 
 351 void CompactingPermGenGen::space_iterate(SpaceClosure* blk, bool usedOnly) {
 352   OneContigSpaceCardGeneration::space_iterate(blk, usedOnly);
 353   if (spec()->enable_shared_spaces()) {
 354 #ifdef PRODUCT
 355     // Making the rw_space walkable will page in the entire space, and
 356     // is to be avoided. However, this is required for Verify options.
 357     ShouldNotReachHere();
 358 #endif
 359 
 360     blk->do_space(ro_space());
 361     blk->do_space(rw_space());
 362   }
 363 }
 364 
 365 
 366 void CompactingPermGenGen::print_on(outputStream* st) const {
 367   OneContigSpaceCardGeneration::print_on(st);
 368   if (spec()->enable_shared_spaces()) {
 369     st->print("    ro");
 370     ro_space()->print_on(st);
 371     st->print("    rw");
 372     rw_space()->print_on(st);
 373   } else {
 374     st->print_cr("No shared spaces configured.");
 375   }
 376 }
 377 
 378 
 379 // References from the perm gen to the younger generation objects may
 380 // occur in static fields in Java classes or in constant pool references
 381 // to String objects. 
 382 
 383 void CompactingPermGenGen::younger_refs_iterate(OopsInGenClosure* blk) {
 384   OneContigSpaceCardGeneration::younger_refs_iterate(blk);
 385   if (spec()->enable_shared_spaces()) {
 386     blk->set_generation(this);
 387     // ro_space has no younger gen refs.
 388     _rs->younger_refs_in_space_iterate(rw_space(), blk);
 389     blk->reset_generation();
 390   }
 391 }
 392 
 393 
 394 // Shared spaces are addressed in pre_adjust_pointers.
 395 void CompactingPermGenGen::adjust_pointers() {
 396   the_space()->adjust_pointers();
 397 }
 398 
 399 
 400 void CompactingPermGenGen::compact() {
 401   the_space()->compact();
 402 }
 403 
 404 
 405 size_t CompactingPermGenGen::contiguous_available() const {
 406   // Don't include shared spaces.
 407   return OneContigSpaceCardGeneration::contiguous_available()
 408          - _shared_space_size;
 409 }
 410 
 411 size_t CompactingPermGenGen::max_capacity() const {
 412   // Don't include shared spaces.
 413   assert(UseSharedSpaces || (_shared_space_size == 0),
 414     "If not used, the size of shared spaces should be 0");
 415   return OneContigSpaceCardGeneration::max_capacity()
 416           - _shared_space_size;
 417 }
 418 
 419 
 420 
 421 bool CompactingPermGenGen::grow_by(size_t bytes) {
 422   // Don't allow _virtual_size to expand into shared spaces.
 423   size_t max_bytes = _virtual_space.uncommitted_size() - _shared_space_size;
 424   if (bytes > _shared_space_size) {
 425     bytes = _shared_space_size;
 426   }
 427   return OneContigSpaceCardGeneration::grow_by(bytes);
 428 }
 429 
 430 
 431 void CompactingPermGenGen::grow_to_reserved() {
 432   // Don't allow _virtual_size to expand into shared spaces.
 433   if (_virtual_space.uncommitted_size() > _shared_space_size) {
 434     size_t remaining_bytes = 
 435       _virtual_space.uncommitted_size() - _shared_space_size;
 436     bool success = OneContigSpaceCardGeneration::grow_by(remaining_bytes);
 437     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
 438   }
 439 }
 440 
 441 
 442 // No young generation references, clear this generation's main space's
 443 // card table entries.  Do NOT clear the card table entries for the
 444 // read-only space (always clear) or the read-write space (valuable
 445 // information).
 446 
 447 void CompactingPermGenGen::clear_remembered_set() {
 448   _rs->clear(MemRegion(the_space()->bottom(), the_space()->end()));
 449 }
 450 
 451 
 452 // Objects in this generation's main space may have moved, invalidate
 453 // that space's cards.  Do NOT invalidate the card table entries for the
 454 // read-only or read-write spaces, as those objects never move.
 455 
 456 void CompactingPermGenGen::invalidate_remembered_set() {
 457   _rs->invalidate(used_region());
 458 }
 459 
 460 
 461 void CompactingPermGenGen::verify(bool allow_dirty) {
 462   the_space()->verify(allow_dirty);
 463   if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
 464     ro_space()->verify(allow_dirty);
 465     rw_space()->verify(allow_dirty);
 466   }
 467 }
 468 
 469 
 470 HeapWord* CompactingPermGenGen::unshared_bottom;
 471 HeapWord* CompactingPermGenGen::unshared_end;
 472 HeapWord* CompactingPermGenGen::shared_bottom;
 473 HeapWord* CompactingPermGenGen::shared_end;
 474 HeapWord* CompactingPermGenGen::readonly_bottom;
 475 HeapWord* CompactingPermGenGen::readonly_end;
 476 HeapWord* CompactingPermGenGen::readwrite_bottom;
 477 HeapWord* CompactingPermGenGen::readwrite_end;
 478 HeapWord* CompactingPermGenGen::miscdata_bottom;
 479 HeapWord* CompactingPermGenGen::miscdata_end;
 480 HeapWord* CompactingPermGenGen::misccode_bottom;
 481 HeapWord* CompactingPermGenGen::misccode_end;
 482 
 483 // JVM/TI RedefineClasses() support:
 484 bool CompactingPermGenGen::remap_shared_readonly_as_readwrite() {
 485   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 486 
 487   if (UseSharedSpaces) {
 488     // remap the shared readonly space to shared readwrite, private
 489     FileMapInfo* mapinfo = FileMapInfo::current_info();
 490     if (!mapinfo->remap_shared_readonly_as_readwrite()) {
 491       return false;
 492     }
 493   }
 494   return true;
 495 }
 496 
 497 void** CompactingPermGenGen::_vtbl_list;