1 /*
   2  * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
  30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
  31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
  32 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
  33 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
  34 #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp"
  35 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
  36 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
  37 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
  38 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
  39 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
  40 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
  41 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
  42 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
  43 #include "gc_implementation/shared/isGCActiveMark.hpp"
  44 #include "gc_interface/gcCause.hpp"
  45 #include "memory/gcLocker.inline.hpp"
  46 #include "memory/referencePolicy.hpp"
  47 #include "memory/referenceProcessor.hpp"
  48 #include "oops/methodDataOop.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "oops/oop.pcgc.inline.hpp"
  51 #include "runtime/fprofiler.hpp"
  52 #include "runtime/safepoint.hpp"
  53 #include "runtime/vmThread.hpp"
  54 #include "services/management.hpp"
  55 #include "services/memoryService.hpp"
  56 #include "utilities/events.hpp"
  57 #include "utilities/stack.inline.hpp"
  58 
  59 #include <math.h>
  60 
  61 // All sizes are in HeapWords.
  62 const size_t ParallelCompactData::Log2RegionSize  = 9; // 512 words
  63 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
  64 const size_t ParallelCompactData::RegionSizeBytes =
  65   RegionSize << LogHeapWordSize;
  66 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
  67 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
  68 const size_t ParallelCompactData::RegionAddrMask  = ~RegionAddrOffsetMask;
  69 
  70 const ParallelCompactData::RegionData::region_sz_t
  71 ParallelCompactData::RegionData::dc_shift = 27;
  72 
  73 const ParallelCompactData::RegionData::region_sz_t
  74 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
  75 
  76 const ParallelCompactData::RegionData::region_sz_t
  77 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
  78 
  79 const ParallelCompactData::RegionData::region_sz_t
  80 ParallelCompactData::RegionData::los_mask = ~dc_mask;
  81 
  82 const ParallelCompactData::RegionData::region_sz_t
  83 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
  84 
  85 const ParallelCompactData::RegionData::region_sz_t
  86 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
  87 
  88 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
  89 bool      PSParallelCompact::_print_phases = false;
  90 
  91 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
  92 klassOop            PSParallelCompact::_updated_int_array_klass_obj = NULL;
  93 
  94 double PSParallelCompact::_dwl_mean;
  95 double PSParallelCompact::_dwl_std_dev;
  96 double PSParallelCompact::_dwl_first_term;
  97 double PSParallelCompact::_dwl_adjustment;
  98 #ifdef  ASSERT
  99 bool   PSParallelCompact::_dwl_initialized = false;
 100 #endif  // #ifdef ASSERT
 101 
 102 #ifdef VALIDATE_MARK_SWEEP
 103 GrowableArray<void*>*   PSParallelCompact::_root_refs_stack = NULL;
 104 GrowableArray<oop> *    PSParallelCompact::_live_oops = NULL;
 105 GrowableArray<oop> *    PSParallelCompact::_live_oops_moved_to = NULL;
 106 GrowableArray<size_t>*  PSParallelCompact::_live_oops_size = NULL;
 107 size_t                  PSParallelCompact::_live_oops_index = 0;
 108 size_t                  PSParallelCompact::_live_oops_index_at_perm = 0;
 109 GrowableArray<void*>*   PSParallelCompact::_other_refs_stack = NULL;
 110 GrowableArray<void*>*   PSParallelCompact::_adjusted_pointers = NULL;
 111 bool                    PSParallelCompact::_pointer_tracking = false;
 112 bool                    PSParallelCompact::_root_tracking = true;
 113 
 114 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL;
 115 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL;
 116 GrowableArray<size_t>   * PSParallelCompact::_cur_gc_live_oops_size = NULL;
 117 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL;
 118 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
 119 GrowableArray<size_t>   * PSParallelCompact::_last_gc_live_oops_size = NULL;
 120 #endif
 121 
 122 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
 123                        HeapWord* destination)
 124 {
 125   assert(src_region_idx != 0, "invalid src_region_idx");
 126   assert(partial_obj_size != 0, "invalid partial_obj_size argument");
 127   assert(destination != NULL, "invalid destination argument");
 128 
 129   _src_region_idx = src_region_idx;
 130   _partial_obj_size = partial_obj_size;
 131   _destination = destination;
 132 
 133   // These fields may not be updated below, so make sure they're clear.
 134   assert(_dest_region_addr == NULL, "should have been cleared");
 135   assert(_first_src_addr == NULL, "should have been cleared");
 136 
 137   // Determine the number of destination regions for the partial object.
 138   HeapWord* const last_word = destination + partial_obj_size - 1;
 139   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 140   HeapWord* const beg_region_addr = sd.region_align_down(destination);
 141   HeapWord* const end_region_addr = sd.region_align_down(last_word);
 142 
 143   if (beg_region_addr == end_region_addr) {
 144     // One destination region.
 145     _destination_count = 1;
 146     if (end_region_addr == destination) {
 147       // The destination falls on a region boundary, thus the first word of the
 148       // partial object will be the first word copied to the destination region.
 149       _dest_region_addr = end_region_addr;
 150       _first_src_addr = sd.region_to_addr(src_region_idx);
 151     }
 152   } else {
 153     // Two destination regions.  When copied, the partial object will cross a
 154     // destination region boundary, so a word somewhere within the partial
 155     // object will be the first word copied to the second destination region.
 156     _destination_count = 2;
 157     _dest_region_addr = end_region_addr;
 158     const size_t ofs = pointer_delta(end_region_addr, destination);
 159     assert(ofs < _partial_obj_size, "sanity");
 160     _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
 161   }
 162 }
 163 
 164 void SplitInfo::clear()
 165 {
 166   _src_region_idx = 0;
 167   _partial_obj_size = 0;
 168   _destination = NULL;
 169   _destination_count = 0;
 170   _dest_region_addr = NULL;
 171   _first_src_addr = NULL;
 172   assert(!is_valid(), "sanity");
 173 }
 174 
 175 #ifdef  ASSERT
 176 void SplitInfo::verify_clear()
 177 {
 178   assert(_src_region_idx == 0, "not clear");
 179   assert(_partial_obj_size == 0, "not clear");
 180   assert(_destination == NULL, "not clear");
 181   assert(_destination_count == 0, "not clear");
 182   assert(_dest_region_addr == NULL, "not clear");
 183   assert(_first_src_addr == NULL, "not clear");
 184 }
 185 #endif  // #ifdef ASSERT
 186 
 187 
 188 #ifndef PRODUCT
 189 const char* PSParallelCompact::space_names[] = {
 190   "perm", "old ", "eden", "from", "to  "
 191 };
 192 
 193 void PSParallelCompact::print_region_ranges()
 194 {
 195   tty->print_cr("space  bottom     top        end        new_top");
 196   tty->print_cr("------ ---------- ---------- ---------- ----------");
 197 
 198   for (unsigned int id = 0; id < last_space_id; ++id) {
 199     const MutableSpace* space = _space_info[id].space();
 200     tty->print_cr("%u %s "
 201                   SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
 202                   SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
 203                   id, space_names[id],
 204                   summary_data().addr_to_region_idx(space->bottom()),
 205                   summary_data().addr_to_region_idx(space->top()),
 206                   summary_data().addr_to_region_idx(space->end()),
 207                   summary_data().addr_to_region_idx(_space_info[id].new_top()));
 208   }
 209 }
 210 
 211 void
 212 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
 213 {
 214 #define REGION_IDX_FORMAT        SIZE_FORMAT_W(7)
 215 #define REGION_DATA_FORMAT       SIZE_FORMAT_W(5)
 216 
 217   ParallelCompactData& sd = PSParallelCompact::summary_data();
 218   size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
 219   tty->print_cr(REGION_IDX_FORMAT " " PTR_FORMAT " "
 220                 REGION_IDX_FORMAT " " PTR_FORMAT " "
 221                 REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
 222                 REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
 223                 i, c->data_location(), dci, c->destination(),
 224                 c->partial_obj_size(), c->live_obj_size(),
 225                 c->data_size(), c->source_region(), c->destination_count());
 226 
 227 #undef  REGION_IDX_FORMAT
 228 #undef  REGION_DATA_FORMAT
 229 }
 230 
 231 void
 232 print_generic_summary_data(ParallelCompactData& summary_data,
 233                            HeapWord* const beg_addr,
 234                            HeapWord* const end_addr)
 235 {
 236   size_t total_words = 0;
 237   size_t i = summary_data.addr_to_region_idx(beg_addr);
 238   const size_t last = summary_data.addr_to_region_idx(end_addr);
 239   HeapWord* pdest = 0;
 240 
 241   while (i <= last) {
 242     ParallelCompactData::RegionData* c = summary_data.region(i);
 243     if (c->data_size() != 0 || c->destination() != pdest) {
 244       print_generic_summary_region(i, c);
 245       total_words += c->data_size();
 246       pdest = c->destination();
 247     }
 248     ++i;
 249   }
 250 
 251   tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
 252 }
 253 
 254 void
 255 print_generic_summary_data(ParallelCompactData& summary_data,
 256                            SpaceInfo* space_info)
 257 {
 258   for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
 259     const MutableSpace* space = space_info[id].space();
 260     print_generic_summary_data(summary_data, space->bottom(),
 261                                MAX2(space->top(), space_info[id].new_top()));
 262   }
 263 }
 264 
 265 void
 266 print_initial_summary_region(size_t i,
 267                              const ParallelCompactData::RegionData* c,
 268                              bool newline = true)
 269 {
 270   tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
 271              SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
 272              SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 273              i, c->destination(),
 274              c->partial_obj_size(), c->live_obj_size(),
 275              c->data_size(), c->source_region(), c->destination_count());
 276   if (newline) tty->cr();
 277 }
 278 
 279 void
 280 print_initial_summary_data(ParallelCompactData& summary_data,
 281                            const MutableSpace* space) {
 282   if (space->top() == space->bottom()) {
 283     return;
 284   }
 285 
 286   const size_t region_size = ParallelCompactData::RegionSize;
 287   typedef ParallelCompactData::RegionData RegionData;
 288   HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
 289   const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
 290   const RegionData* c = summary_data.region(end_region - 1);
 291   HeapWord* end_addr = c->destination() + c->data_size();
 292   const size_t live_in_space = pointer_delta(end_addr, space->bottom());
 293 
 294   // Print (and count) the full regions at the beginning of the space.
 295   size_t full_region_count = 0;
 296   size_t i = summary_data.addr_to_region_idx(space->bottom());
 297   while (i < end_region && summary_data.region(i)->data_size() == region_size) {
 298     print_initial_summary_region(i, summary_data.region(i));
 299     ++full_region_count;
 300     ++i;
 301   }
 302 
 303   size_t live_to_right = live_in_space - full_region_count * region_size;
 304 
 305   double max_reclaimed_ratio = 0.0;
 306   size_t max_reclaimed_ratio_region = 0;
 307   size_t max_dead_to_right = 0;
 308   size_t max_live_to_right = 0;
 309 
 310   // Print the 'reclaimed ratio' for regions while there is something live in
 311   // the region or to the right of it.  The remaining regions are empty (and
 312   // uninteresting), and computing the ratio will result in division by 0.
 313   while (i < end_region && live_to_right > 0) {
 314     c = summary_data.region(i);
 315     HeapWord* const region_addr = summary_data.region_to_addr(i);
 316     const size_t used_to_right = pointer_delta(space->top(), region_addr);
 317     const size_t dead_to_right = used_to_right - live_to_right;
 318     const double reclaimed_ratio = double(dead_to_right) / live_to_right;
 319 
 320     if (reclaimed_ratio > max_reclaimed_ratio) {
 321             max_reclaimed_ratio = reclaimed_ratio;
 322             max_reclaimed_ratio_region = i;
 323             max_dead_to_right = dead_to_right;
 324             max_live_to_right = live_to_right;
 325     }
 326 
 327     print_initial_summary_region(i, c, false);
 328     tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
 329                   reclaimed_ratio, dead_to_right, live_to_right);
 330 
 331     live_to_right -= c->data_size();
 332     ++i;
 333   }
 334 
 335   // Any remaining regions are empty.  Print one more if there is one.
 336   if (i < end_region) {
 337     print_initial_summary_region(i, summary_data.region(i));
 338   }
 339 
 340   tty->print_cr("max:  " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
 341                 "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
 342                 max_reclaimed_ratio_region, max_dead_to_right,
 343                 max_live_to_right, max_reclaimed_ratio);
 344 }
 345 
 346 void
 347 print_initial_summary_data(ParallelCompactData& summary_data,
 348                            SpaceInfo* space_info) {
 349   unsigned int id = PSParallelCompact::perm_space_id;
 350   const MutableSpace* space;
 351   do {
 352     space = space_info[id].space();
 353     print_initial_summary_data(summary_data, space);
 354   } while (++id < PSParallelCompact::eden_space_id);
 355 
 356   do {
 357     space = space_info[id].space();
 358     print_generic_summary_data(summary_data, space->bottom(), space->top());
 359   } while (++id < PSParallelCompact::last_space_id);
 360 }
 361 #endif  // #ifndef PRODUCT
 362 
 363 #ifdef  ASSERT
 364 size_t add_obj_count;
 365 size_t add_obj_size;
 366 size_t mark_bitmap_count;
 367 size_t mark_bitmap_size;
 368 #endif  // #ifdef ASSERT
 369 
 370 ParallelCompactData::ParallelCompactData()
 371 {
 372   _region_start = 0;
 373 
 374   _region_vspace = 0;
 375   _region_data = 0;
 376   _region_count = 0;
 377 }
 378 
 379 bool ParallelCompactData::initialize(MemRegion covered_region)
 380 {
 381   _region_start = covered_region.start();
 382   const size_t region_size = covered_region.word_size();
 383   DEBUG_ONLY(_region_end = _region_start + region_size;)
 384 
 385   assert(region_align_down(_region_start) == _region_start,
 386          "region start not aligned");
 387   assert((region_size & RegionSizeOffsetMask) == 0,
 388          "region size not a multiple of RegionSize");
 389 
 390   bool result = initialize_region_data(region_size);
 391 
 392   return result;
 393 }
 394 
 395 PSVirtualSpace*
 396 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 397 {
 398   const size_t raw_bytes = count * element_size;
 399   const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
 400   const size_t granularity = os::vm_allocation_granularity();
 401   const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 402 
 403   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
 404     MAX2(page_sz, granularity);
 405   ReservedSpace rs(bytes, rs_align, rs_align > 0);
 406   os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
 407                        rs.size());
 408   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 409   if (vspace != 0) {
 410     if (vspace->expand_by(bytes)) {
 411       return vspace;
 412     }
 413     delete vspace;
 414     // Release memory reserved in the space.
 415     rs.release();
 416   }
 417 
 418   return 0;
 419 }
 420 
 421 bool ParallelCompactData::initialize_region_data(size_t region_size)
 422 {
 423   const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
 424   _region_vspace = create_vspace(count, sizeof(RegionData));
 425   if (_region_vspace != 0) {
 426     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 427     _region_count = count;
 428     return true;
 429   }
 430   return false;
 431 }
 432 
 433 void ParallelCompactData::clear()
 434 {
 435   memset(_region_data, 0, _region_vspace->committed_size());
 436 }
 437 
 438 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
 439   assert(beg_region <= _region_count, "beg_region out of range");
 440   assert(end_region <= _region_count, "end_region out of range");
 441 
 442   const size_t region_cnt = end_region - beg_region;
 443   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
 444 }
 445 
 446 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
 447 {
 448   const RegionData* cur_cp = region(region_idx);
 449   const RegionData* const end_cp = region(region_count() - 1);
 450 
 451   HeapWord* result = region_to_addr(region_idx);
 452   if (cur_cp < end_cp) {
 453     do {
 454       result += cur_cp->partial_obj_size();
 455     } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
 456   }
 457   return result;
 458 }
 459 
 460 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
 461 {
 462   const size_t obj_ofs = pointer_delta(addr, _region_start);
 463   const size_t beg_region = obj_ofs >> Log2RegionSize;
 464   const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
 465 
 466   DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
 467   DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
 468 
 469   if (beg_region == end_region) {
 470     // All in one region.
 471     _region_data[beg_region].add_live_obj(len);
 472     return;
 473   }
 474 
 475   // First region.
 476   const size_t beg_ofs = region_offset(addr);
 477   _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
 478 
 479   klassOop klass = ((oop)addr)->klass();
 480   // Middle regions--completely spanned by this object.
 481   for (size_t region = beg_region + 1; region < end_region; ++region) {
 482     _region_data[region].set_partial_obj_size(RegionSize);
 483     _region_data[region].set_partial_obj_addr(addr);
 484   }
 485 
 486   // Last region.
 487   const size_t end_ofs = region_offset(addr + len - 1);
 488   _region_data[end_region].set_partial_obj_size(end_ofs + 1);
 489   _region_data[end_region].set_partial_obj_addr(addr);
 490 }
 491 
 492 void
 493 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
 494 {
 495   assert(region_offset(beg) == 0, "not RegionSize aligned");
 496   assert(region_offset(end) == 0, "not RegionSize aligned");
 497 
 498   size_t cur_region = addr_to_region_idx(beg);
 499   const size_t end_region = addr_to_region_idx(end);
 500   HeapWord* addr = beg;
 501   while (cur_region < end_region) {
 502     _region_data[cur_region].set_destination(addr);
 503     _region_data[cur_region].set_destination_count(0);
 504     _region_data[cur_region].set_source_region(cur_region);
 505     _region_data[cur_region].set_data_location(addr);
 506 
 507     // Update live_obj_size so the region appears completely full.
 508     size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
 509     _region_data[cur_region].set_live_obj_size(live_size);
 510 
 511     ++cur_region;
 512     addr += RegionSize;
 513   }
 514 }
 515 
 516 // Find the point at which a space can be split and, if necessary, record the
 517 // split point.
 518 //
 519 // If the current src region (which overflowed the destination space) doesn't
 520 // have a partial object, the split point is at the beginning of the current src
 521 // region (an "easy" split, no extra bookkeeping required).
 522 //
 523 // If the current src region has a partial object, the split point is in the
 524 // region where that partial object starts (call it the split_region).  If
 525 // split_region has a partial object, then the split point is just after that
 526 // partial object (a "hard" split where we have to record the split data and
 527 // zero the partial_obj_size field).  With a "hard" split, we know that the
 528 // partial_obj ends within split_region because the partial object that caused
 529 // the overflow starts in split_region.  If split_region doesn't have a partial
 530 // obj, then the split is at the beginning of split_region (another "easy"
 531 // split).
 532 HeapWord*
 533 ParallelCompactData::summarize_split_space(size_t src_region,
 534                                            SplitInfo& split_info,
 535                                            HeapWord* destination,
 536                                            HeapWord* target_end,
 537                                            HeapWord** target_next)
 538 {
 539   assert(destination <= target_end, "sanity");
 540   assert(destination + _region_data[src_region].data_size() > target_end,
 541     "region should not fit into target space");
 542   assert(is_region_aligned(target_end), "sanity");
 543 
 544   size_t split_region = src_region;
 545   HeapWord* split_destination = destination;
 546   size_t partial_obj_size = _region_data[src_region].partial_obj_size();
 547 
 548   if (destination + partial_obj_size > target_end) {
 549     // The split point is just after the partial object (if any) in the
 550     // src_region that contains the start of the object that overflowed the
 551     // destination space.
 552     //
 553     // Find the start of the "overflow" object and set split_region to the
 554     // region containing it.
 555     HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
 556     split_region = addr_to_region_idx(overflow_obj);
 557 
 558     // Clear the source_region field of all destination regions whose first word
 559     // came from data after the split point (a non-null source_region field
 560     // implies a region must be filled).
 561     //
 562     // An alternative to the simple loop below:  clear during post_compact(),
 563     // which uses memcpy instead of individual stores, and is easy to
 564     // parallelize.  (The downside is that it clears the entire RegionData
 565     // object as opposed to just one field.)
 566     //
 567     // post_compact() would have to clear the summary data up to the highest
 568     // address that was written during the summary phase, which would be
 569     //
 570     //         max(top, max(new_top, clear_top))
 571     //
 572     // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
 573     // to target_end.
 574     const RegionData* const sr = region(split_region);
 575     const size_t beg_idx =
 576       addr_to_region_idx(region_align_up(sr->destination() +
 577                                          sr->partial_obj_size()));
 578     const size_t end_idx = addr_to_region_idx(target_end);
 579 
 580     if (TraceParallelOldGCSummaryPhase) {
 581         gclog_or_tty->print_cr("split:  clearing source_region field in ["
 582                                SIZE_FORMAT ", " SIZE_FORMAT ")",
 583                                beg_idx, end_idx);
 584     }
 585     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
 586       _region_data[idx].set_source_region(0);
 587     }
 588 
 589     // Set split_destination and partial_obj_size to reflect the split region.
 590     split_destination = sr->destination();
 591     partial_obj_size = sr->partial_obj_size();
 592   }
 593 
 594   // The split is recorded only if a partial object extends onto the region.
 595   if (partial_obj_size != 0) {
 596     _region_data[split_region].set_partial_obj_size(0);
 597     split_info.record(split_region, partial_obj_size, split_destination);
 598   }
 599 
 600   // Setup the continuation addresses.
 601   *target_next = split_destination + partial_obj_size;
 602   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 603 
 604   if (TraceParallelOldGCSummaryPhase) {
 605     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
 606     gclog_or_tty->print_cr("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT
 607                            " pos=" SIZE_FORMAT,
 608                            split_type, source_next, split_region,
 609                            partial_obj_size);
 610     gclog_or_tty->print_cr("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT
 611                            " tn=" PTR_FORMAT,
 612                            split_type, split_destination,
 613                            addr_to_region_idx(split_destination),
 614                            *target_next);
 615 
 616     if (partial_obj_size != 0) {
 617       HeapWord* const po_beg = split_info.destination();
 618       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
 619       gclog_or_tty->print_cr("%s split:  "
 620                              "po_beg=" PTR_FORMAT " " SIZE_FORMAT " "
 621                              "po_end=" PTR_FORMAT " " SIZE_FORMAT,
 622                              split_type,
 623                              po_beg, addr_to_region_idx(po_beg),
 624                              po_end, addr_to_region_idx(po_end));
 625     }
 626   }
 627 
 628   return source_next;
 629 }
 630 
 631 bool ParallelCompactData::summarize(SplitInfo& split_info,
 632                                     HeapWord* source_beg, HeapWord* source_end,
 633                                     HeapWord** source_next,
 634                                     HeapWord* target_beg, HeapWord* target_end,
 635                                     HeapWord** target_next)
 636 {
 637   if (TraceParallelOldGCSummaryPhase) {
 638     HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
 639     tty->print_cr("sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 640                   "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 641                   source_beg, source_end, source_next_val,
 642                   target_beg, target_end, *target_next);
 643   }
 644 
 645   size_t cur_region = addr_to_region_idx(source_beg);
 646   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 647 
 648   HeapWord *dest_addr = target_beg;
 649   while (cur_region < end_region) {
 650     // The destination must be set even if the region has no data.
 651     _region_data[cur_region].set_destination(dest_addr);
 652 
 653     size_t words = _region_data[cur_region].data_size();
 654     if (words > 0) {
 655       // If cur_region does not fit entirely into the target space, find a point
 656       // at which the source space can be 'split' so that part is copied to the
 657       // target space and the rest is copied elsewhere.
 658       if (dest_addr + words > target_end) {
 659         assert(source_next != NULL, "source_next is NULL when splitting");
 660         *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 661                                              target_end, target_next);
 662         return false;
 663       }
 664 
 665       // Compute the destination_count for cur_region, and if necessary, update
 666       // source_region for a destination region.  The source_region field is
 667       // updated if cur_region is the first (left-most) region to be copied to a
 668       // destination region.
 669       //
 670       // The destination_count calculation is a bit subtle.  A region that has
 671       // data that compacts into itself does not count itself as a destination.
 672       // This maintains the invariant that a zero count means the region is
 673       // available and can be claimed and then filled.
 674       uint destination_count = 0;
 675       if (split_info.is_split(cur_region)) {
 676         // The current region has been split:  the partial object will be copied
 677         // to one destination space and the remaining data will be copied to
 678         // another destination space.  Adjust the initial destination_count and,
 679         // if necessary, set the source_region field if the partial object will
 680         // cross a destination region boundary.
 681         destination_count = split_info.destination_count();
 682         if (destination_count == 2) {
 683           size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
 684           _region_data[dest_idx].set_source_region(cur_region);
 685         }
 686       }
 687 
 688       HeapWord* const last_addr = dest_addr + words - 1;
 689       const size_t dest_region_1 = addr_to_region_idx(dest_addr);
 690       const size_t dest_region_2 = addr_to_region_idx(last_addr);
 691 
 692       // Initially assume that the destination regions will be the same and
 693       // adjust the value below if necessary.  Under this assumption, if
 694       // cur_region == dest_region_2, then cur_region will be compacted
 695       // completely into itself.
 696       destination_count += cur_region == dest_region_2 ? 0 : 1;
 697       if (dest_region_1 != dest_region_2) {
 698         // Destination regions differ; adjust destination_count.
 699         destination_count += 1;
 700         // Data from cur_region will be copied to the start of dest_region_2.
 701         _region_data[dest_region_2].set_source_region(cur_region);
 702       } else if (region_offset(dest_addr) == 0) {
 703         // Data from cur_region will be copied to the start of the destination
 704         // region.
 705         _region_data[dest_region_1].set_source_region(cur_region);
 706       }
 707 
 708       _region_data[cur_region].set_destination_count(destination_count);
 709       _region_data[cur_region].set_data_location(region_to_addr(cur_region));
 710       dest_addr += words;
 711     }
 712 
 713     ++cur_region;
 714   }
 715 
 716   *target_next = dest_addr;
 717   return true;
 718 }
 719 
 720 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
 721   assert(addr != NULL, "Should detect NULL oop earlier");
 722   assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
 723 #ifdef ASSERT
 724   if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
 725     gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
 726   }
 727 #endif
 728   assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
 729 
 730   // Region covering the object.
 731   size_t region_index = addr_to_region_idx(addr);
 732   const RegionData* const region_ptr = region(region_index);
 733   HeapWord* const region_addr = region_align_down(addr);
 734 
 735   assert(addr < region_addr + RegionSize, "Region does not cover object");
 736   assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
 737 
 738   HeapWord* result = region_ptr->destination();
 739 
 740   // If all the data in the region is live, then the new location of the object
 741   // can be calculated from the destination of the region plus the offset of the
 742   // object in the region.
 743   if (region_ptr->data_size() == RegionSize) {
 744     result += pointer_delta(addr, region_addr);
 745     DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
 746     return result;
 747   }
 748 
 749   // The new location of the object is
 750   //    region destination +
 751   //    size of the partial object extending onto the region +
 752   //    sizes of the live objects in the Region that are to the left of addr
 753   const size_t partial_obj_size = region_ptr->partial_obj_size();
 754   HeapWord* const search_start = region_addr + partial_obj_size;
 755 
 756   const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
 757   size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
 758 
 759   result += partial_obj_size + live_to_left;
 760   DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
 761   return result;
 762 }
 763 
 764 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
 765   klassOop updated_klass;
 766   if (PSParallelCompact::should_update_klass(old_klass)) {
 767     updated_klass = (klassOop) calc_new_pointer(old_klass);
 768   } else {
 769     updated_klass = old_klass;
 770   }
 771 
 772   return updated_klass;
 773 }
 774 
 775 #ifdef  ASSERT
 776 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
 777 {
 778   const size_t* const beg = (const size_t*)vspace->committed_low_addr();
 779   const size_t* const end = (const size_t*)vspace->committed_high_addr();
 780   for (const size_t* p = beg; p < end; ++p) {
 781     assert(*p == 0, "not zero");
 782   }
 783 }
 784 
 785 void ParallelCompactData::verify_clear()
 786 {
 787   verify_clear(_region_vspace);
 788 }
 789 #endif  // #ifdef ASSERT
 790 
 791 #ifdef NOT_PRODUCT
 792 ParallelCompactData::RegionData* debug_region(size_t region_index) {
 793   ParallelCompactData& sd = PSParallelCompact::summary_data();
 794   return sd.region(region_index);
 795 }
 796 #endif
 797 
 798 elapsedTimer        PSParallelCompact::_accumulated_time;
 799 unsigned int        PSParallelCompact::_total_invocations = 0;
 800 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 801 jlong               PSParallelCompact::_time_of_last_gc = 0;
 802 CollectorCounters*  PSParallelCompact::_counters = NULL;
 803 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 804 ParallelCompactData PSParallelCompact::_summary_data;
 805 
 806 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 807 
 808 void PSParallelCompact::IsAliveClosure::do_object(oop p)   { ShouldNotReachHere(); }
 809 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 810 
 811 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p)       { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
 812 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
 813 
 814 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
 815 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
 816 
 817 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p, _is_root); }
 818 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
 819 
 820 void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
 821 
 822 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p)       { mark_and_push(_compaction_manager, p); }
 823 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
 824 
 825 void PSParallelCompact::post_initialize() {
 826   ParallelScavengeHeap* heap = gc_heap();
 827   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 828 
 829   MemRegion mr = heap->reserved_region();
 830   _ref_processor =
 831     new ReferenceProcessor(mr,            // span
 832                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
 833                            (int) ParallelGCThreads, // mt processing degree
 834                            true,          // mt discovery
 835                            (int) ParallelGCThreads, // mt discovery degree
 836                            true,          // atomic_discovery
 837                            &_is_alive_closure, // non-header is alive closure
 838                            false);        // write barrier for next field updates
 839   _counters = new CollectorCounters("PSParallelCompact", 1);
 840 
 841   // Initialize static fields in ParCompactionManager.
 842   ParCompactionManager::initialize(mark_bitmap());
 843 }
 844 
 845 bool PSParallelCompact::initialize() {
 846   ParallelScavengeHeap* heap = gc_heap();
 847   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 848   MemRegion mr = heap->reserved_region();
 849 
 850   // Was the old gen get allocated successfully?
 851   if (!heap->old_gen()->is_allocated()) {
 852     return false;
 853   }
 854 
 855   initialize_space_info();
 856   initialize_dead_wood_limiter();
 857 
 858   if (!_mark_bitmap.initialize(mr)) {
 859     vm_shutdown_during_initialization("Unable to allocate bit map for "
 860       "parallel garbage collection for the requested heap size.");
 861     return false;
 862   }
 863 
 864   if (!_summary_data.initialize(mr)) {
 865     vm_shutdown_during_initialization("Unable to allocate tables for "
 866       "parallel garbage collection for the requested heap size.");
 867     return false;
 868   }
 869 
 870   return true;
 871 }
 872 
 873 void PSParallelCompact::initialize_space_info()
 874 {
 875   memset(&_space_info, 0, sizeof(_space_info));
 876 
 877   ParallelScavengeHeap* heap = gc_heap();
 878   PSYoungGen* young_gen = heap->young_gen();
 879   MutableSpace* perm_space = heap->perm_gen()->object_space();
 880 
 881   _space_info[perm_space_id].set_space(perm_space);
 882   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 883   _space_info[eden_space_id].set_space(young_gen->eden_space());
 884   _space_info[from_space_id].set_space(young_gen->from_space());
 885   _space_info[to_space_id].set_space(young_gen->to_space());
 886 
 887   _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array());
 888   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 889 
 890   _space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
 891   if (TraceParallelOldGCDensePrefix) {
 892     tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
 893                   _space_info[perm_space_id].min_dense_prefix());
 894   }
 895 }
 896 
 897 void PSParallelCompact::initialize_dead_wood_limiter()
 898 {
 899   const size_t max = 100;
 900   _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
 901   _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
 902   _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
 903   DEBUG_ONLY(_dwl_initialized = true;)
 904   _dwl_adjustment = normal_distribution(1.0);
 905 }
 906 
 907 // Simple class for storing info about the heap at the start of GC, to be used
 908 // after GC for comparison/printing.
 909 class PreGCValues {
 910 public:
 911   PreGCValues() { }
 912   PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
 913 
 914   void fill(ParallelScavengeHeap* heap) {
 915     _heap_used      = heap->used();
 916     _young_gen_used = heap->young_gen()->used_in_bytes();
 917     _old_gen_used   = heap->old_gen()->used_in_bytes();
 918     _perm_gen_used  = heap->perm_gen()->used_in_bytes();
 919   };
 920 
 921   size_t heap_used() const      { return _heap_used; }
 922   size_t young_gen_used() const { return _young_gen_used; }
 923   size_t old_gen_used() const   { return _old_gen_used; }
 924   size_t perm_gen_used() const  { return _perm_gen_used; }
 925 
 926 private:
 927   size_t _heap_used;
 928   size_t _young_gen_used;
 929   size_t _old_gen_used;
 930   size_t _perm_gen_used;
 931 };
 932 
 933 void
 934 PSParallelCompact::clear_data_covering_space(SpaceId id)
 935 {
 936   // At this point, top is the value before GC, new_top() is the value that will
 937   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 938   // should be marked above top.  The summary data is cleared to the larger of
 939   // top & new_top.
 940   MutableSpace* const space = _space_info[id].space();
 941   HeapWord* const bot = space->bottom();
 942   HeapWord* const top = space->top();
 943   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 944 
 945   const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
 946   const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
 947   _mark_bitmap.clear_range(beg_bit, end_bit);
 948 
 949   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 950   const size_t end_region =
 951     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 952   _summary_data.clear_range(beg_region, end_region);
 953 
 954   // Clear the data used to 'split' regions.
 955   SplitInfo& split_info = _space_info[id].split_info();
 956   if (split_info.is_valid()) {
 957     split_info.clear();
 958   }
 959   DEBUG_ONLY(split_info.verify_clear();)
 960 }
 961 
 962 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
 963 {
 964   // Update the from & to space pointers in space_info, since they are swapped
 965   // at each young gen gc.  Do the update unconditionally (even though a
 966   // promotion failure does not swap spaces) because an unknown number of minor
 967   // collections will have swapped the spaces an unknown number of times.
 968   TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
 969   ParallelScavengeHeap* heap = gc_heap();
 970   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 971   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 972 
 973   pre_gc_values->fill(heap);
 974 
 975   ParCompactionManager::reset();
 976   NOT_PRODUCT(_mark_bitmap.reset_counters());
 977   DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
 978   DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
 979 
 980   // Increment the invocation count
 981   heap->increment_total_collections(true);
 982 
 983   // We need to track unique mark sweep invocations as well.
 984   _total_invocations++;
 985 
 986   if (PrintHeapAtGC) {
 987     Universe::print_heap_before_gc();
 988   }
 989 
 990   // Fill in TLABs
 991   heap->accumulate_statistics_all_tlabs();
 992   heap->ensure_parsability(true);  // retire TLABs
 993 
 994   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 995     HandleMark hm;  // Discard invalid handles created during verification
 996     gclog_or_tty->print(" VerifyBeforeGC:");
 997     Universe::verify(true);
 998   }
 999 
1000   // Verify object start arrays
1001   if (VerifyObjectStartArray &&
1002       VerifyBeforeGC) {
1003     heap->old_gen()->verify_object_start_array();
1004     heap->perm_gen()->verify_object_start_array();
1005   }
1006 
1007   DEBUG_ONLY(mark_bitmap()->verify_clear();)
1008   DEBUG_ONLY(summary_data().verify_clear();)
1009 
1010   // Have worker threads release resources the next time they run a task.
1011   gc_task_manager()->release_all_resources();
1012 }
1013 
1014 void PSParallelCompact::post_compact()
1015 {
1016   TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
1017 
1018   for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
1019     // Clear the marking bitmap, summary data and split info.
1020     clear_data_covering_space(SpaceId(id));
1021     // Update top().  Must be done after clearing the bitmap and summary data.
1022     _space_info[id].publish_new_top();
1023   }
1024 
1025   MutableSpace* const eden_space = _space_info[eden_space_id].space();
1026   MutableSpace* const from_space = _space_info[from_space_id].space();
1027   MutableSpace* const to_space   = _space_info[to_space_id].space();
1028 
1029   ParallelScavengeHeap* heap = gc_heap();
1030   bool eden_empty = eden_space->is_empty();
1031   if (!eden_empty) {
1032     eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1033                                             heap->young_gen(), heap->old_gen());
1034   }
1035 
1036   // Update heap occupancy information which is used as input to the soft ref
1037   // clearing policy at the next gc.
1038   Universe::update_heap_info_at_gc();
1039 
1040   bool young_gen_empty = eden_empty && from_space->is_empty() &&
1041     to_space->is_empty();
1042 
1043   BarrierSet* bs = heap->barrier_set();
1044   if (bs->is_a(BarrierSet::ModRef)) {
1045     ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
1046     MemRegion old_mr = heap->old_gen()->reserved();
1047     MemRegion perm_mr = heap->perm_gen()->reserved();
1048     assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
1049 
1050     if (young_gen_empty) {
1051       modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
1052     } else {
1053       modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
1054     }
1055   }
1056 
1057   Threads::gc_epilogue();
1058   CodeCache::gc_epilogue();
1059   JvmtiExport::gc_epilogue();
1060 
1061   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1062 
1063   ref_processor()->enqueue_discovered_references(NULL);
1064 
1065   if (ZapUnusedHeapArea) {
1066     heap->gen_mangle_unused_area();
1067   }
1068 
1069   // Update time of last GC
1070   reset_millis_since_last_gc();
1071 }
1072 
1073 HeapWord*
1074 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1075                                                     bool maximum_compaction)
1076 {
1077   const size_t region_size = ParallelCompactData::RegionSize;
1078   const ParallelCompactData& sd = summary_data();
1079 
1080   const MutableSpace* const space = _space_info[id].space();
1081   HeapWord* const top_aligned_up = sd.region_align_up(space->top());
1082   const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
1083   const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
1084 
1085   // Skip full regions at the beginning of the space--they are necessarily part
1086   // of the dense prefix.
1087   size_t full_count = 0;
1088   const RegionData* cp;
1089   for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1090     ++full_count;
1091   }
1092 
1093   assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1094   const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1095   const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1096   if (maximum_compaction || cp == end_cp || interval_ended) {
1097     _maximum_compaction_gc_num = total_invocations();
1098     return sd.region_to_addr(cp);
1099   }
1100 
1101   HeapWord* const new_top = _space_info[id].new_top();
1102   const size_t space_live = pointer_delta(new_top, space->bottom());
1103   const size_t space_used = space->used_in_words();
1104   const size_t space_capacity = space->capacity_in_words();
1105 
1106   const double cur_density = double(space_live) / space_capacity;
1107   const double deadwood_density =
1108     (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
1109   const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
1110 
1111   if (TraceParallelOldGCDensePrefix) {
1112     tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
1113                   cur_density, deadwood_density, deadwood_goal);
1114     tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1115                   "space_cap=" SIZE_FORMAT,
1116                   space_live, space_used,
1117                   space_capacity);
1118   }
1119 
1120   // XXX - Use binary search?
1121   HeapWord* dense_prefix = sd.region_to_addr(cp);
1122   const RegionData* full_cp = cp;
1123   const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
1124   while (cp < end_cp) {
1125     HeapWord* region_destination = cp->destination();
1126     const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
1127     if (TraceParallelOldGCDensePrefix && Verbose) {
1128       tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
1129                     "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
1130                     sd.region(cp), region_destination,
1131                     dense_prefix, cur_deadwood);
1132     }
1133 
1134     if (cur_deadwood >= deadwood_goal) {
1135       // Found the region that has the correct amount of deadwood to the left.
1136       // This typically occurs after crossing a fairly sparse set of regions, so
1137       // iterate backwards over those sparse regions, looking for the region
1138       // that has the lowest density of live objects 'to the right.'
1139       size_t space_to_left = sd.region(cp) * region_size;
1140       size_t live_to_left = space_to_left - cur_deadwood;
1141       size_t space_to_right = space_capacity - space_to_left;
1142       size_t live_to_right = space_live - live_to_left;
1143       double density_to_right = double(live_to_right) / space_to_right;
1144       while (cp > full_cp) {
1145         --cp;
1146         const size_t prev_region_live_to_right = live_to_right -
1147           cp->data_size();
1148         const size_t prev_region_space_to_right = space_to_right + region_size;
1149         double prev_region_density_to_right =
1150           double(prev_region_live_to_right) / prev_region_space_to_right;
1151         if (density_to_right <= prev_region_density_to_right) {
1152           return dense_prefix;
1153         }
1154         if (TraceParallelOldGCDensePrefix && Verbose) {
1155           tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
1156                         "pc_d2r=%10.8f", sd.region(cp), density_to_right,
1157                         prev_region_density_to_right);
1158         }
1159         dense_prefix -= region_size;
1160         live_to_right = prev_region_live_to_right;
1161         space_to_right = prev_region_space_to_right;
1162         density_to_right = prev_region_density_to_right;
1163       }
1164       return dense_prefix;
1165     }
1166 
1167     dense_prefix += region_size;
1168     ++cp;
1169   }
1170 
1171   return dense_prefix;
1172 }
1173 
1174 #ifndef PRODUCT
1175 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1176                                                  const SpaceId id,
1177                                                  const bool maximum_compaction,
1178                                                  HeapWord* const addr)
1179 {
1180   const size_t region_idx = summary_data().addr_to_region_idx(addr);
1181   RegionData* const cp = summary_data().region(region_idx);
1182   const MutableSpace* const space = _space_info[id].space();
1183   HeapWord* const new_top = _space_info[id].new_top();
1184 
1185   const size_t space_live = pointer_delta(new_top, space->bottom());
1186   const size_t dead_to_left = pointer_delta(addr, cp->destination());
1187   const size_t space_cap = space->capacity_in_words();
1188   const double dead_to_left_pct = double(dead_to_left) / space_cap;
1189   const size_t live_to_right = new_top - cp->destination();
1190   const size_t dead_to_right = space->top() - addr - live_to_right;
1191 
1192   tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
1193                 "spl=" SIZE_FORMAT " "
1194                 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1195                 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
1196                 " ratio=%10.8f",
1197                 algorithm, addr, region_idx,
1198                 space_live,
1199                 dead_to_left, dead_to_left_pct,
1200                 dead_to_right, live_to_right,
1201                 double(dead_to_right) / live_to_right);
1202 }
1203 #endif  // #ifndef PRODUCT
1204 
1205 // Return a fraction indicating how much of the generation can be treated as
1206 // "dead wood" (i.e., not reclaimed).  The function uses a normal distribution
1207 // based on the density of live objects in the generation to determine a limit,
1208 // which is then adjusted so the return value is min_percent when the density is
1209 // 1.
1210 //
1211 // The following table shows some return values for a different values of the
1212 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1213 // min_percent is 1.
1214 //
1215 //                          fraction allowed as dead wood
1216 //         -----------------------------------------------------------------
1217 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1218 // ------- ---------- ---------- ---------- ---------- ---------- ----------
1219 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1220 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1221 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1222 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1223 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1224 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1225 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1226 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1227 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1228 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1229 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1230 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1231 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1232 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1233 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1234 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1235 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1236 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1237 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1238 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1239 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1240 
1241 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1242 {
1243   assert(_dwl_initialized, "uninitialized");
1244 
1245   // The raw limit is the value of the normal distribution at x = density.
1246   const double raw_limit = normal_distribution(density);
1247 
1248   // Adjust the raw limit so it becomes the minimum when the density is 1.
1249   //
1250   // First subtract the adjustment value (which is simply the precomputed value
1251   // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1252   // Then add the minimum value, so the minimum is returned when the density is
1253   // 1.  Finally, prevent negative values, which occur when the mean is not 0.5.
1254   const double min = double(min_percent) / 100.0;
1255   const double limit = raw_limit - _dwl_adjustment + min;
1256   return MAX2(limit, 0.0);
1257 }
1258 
1259 ParallelCompactData::RegionData*
1260 PSParallelCompact::first_dead_space_region(const RegionData* beg,
1261                                            const RegionData* end)
1262 {
1263   const size_t region_size = ParallelCompactData::RegionSize;
1264   ParallelCompactData& sd = summary_data();
1265   size_t left = sd.region(beg);
1266   size_t right = end > beg ? sd.region(end) - 1 : left;
1267 
1268   // Binary search.
1269   while (left < right) {
1270     // Equivalent to (left + right) / 2, but does not overflow.
1271     const size_t middle = left + (right - left) / 2;
1272     RegionData* const middle_ptr = sd.region(middle);
1273     HeapWord* const dest = middle_ptr->destination();
1274     HeapWord* const addr = sd.region_to_addr(middle);
1275     assert(dest != NULL, "sanity");
1276     assert(dest <= addr, "must move left");
1277 
1278     if (middle > left && dest < addr) {
1279       right = middle - 1;
1280     } else if (middle < right && middle_ptr->data_size() == region_size) {
1281       left = middle + 1;
1282     } else {
1283       return middle_ptr;
1284     }
1285   }
1286   return sd.region(left);
1287 }
1288 
1289 ParallelCompactData::RegionData*
1290 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
1291                                           const RegionData* end,
1292                                           size_t dead_words)
1293 {
1294   ParallelCompactData& sd = summary_data();
1295   size_t left = sd.region(beg);
1296   size_t right = end > beg ? sd.region(end) - 1 : left;
1297 
1298   // Binary search.
1299   while (left < right) {
1300     // Equivalent to (left + right) / 2, but does not overflow.
1301     const size_t middle = left + (right - left) / 2;
1302     RegionData* const middle_ptr = sd.region(middle);
1303     HeapWord* const dest = middle_ptr->destination();
1304     HeapWord* const addr = sd.region_to_addr(middle);
1305     assert(dest != NULL, "sanity");
1306     assert(dest <= addr, "must move left");
1307 
1308     const size_t dead_to_left = pointer_delta(addr, dest);
1309     if (middle > left && dead_to_left > dead_words) {
1310       right = middle - 1;
1311     } else if (middle < right && dead_to_left < dead_words) {
1312       left = middle + 1;
1313     } else {
1314       return middle_ptr;
1315     }
1316   }
1317   return sd.region(left);
1318 }
1319 
1320 // The result is valid during the summary phase, after the initial summarization
1321 // of each space into itself, and before final summarization.
1322 inline double
1323 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
1324                                    HeapWord* const bottom,
1325                                    HeapWord* const top,
1326                                    HeapWord* const new_top)
1327 {
1328   ParallelCompactData& sd = summary_data();
1329 
1330   assert(cp != NULL, "sanity");
1331   assert(bottom != NULL, "sanity");
1332   assert(top != NULL, "sanity");
1333   assert(new_top != NULL, "sanity");
1334   assert(top >= new_top, "summary data problem?");
1335   assert(new_top > bottom, "space is empty; should not be here");
1336   assert(new_top >= cp->destination(), "sanity");
1337   assert(top >= sd.region_to_addr(cp), "sanity");
1338 
1339   HeapWord* const destination = cp->destination();
1340   const size_t dense_prefix_live  = pointer_delta(destination, bottom);
1341   const size_t compacted_region_live = pointer_delta(new_top, destination);
1342   const size_t compacted_region_used = pointer_delta(top,
1343                                                      sd.region_to_addr(cp));
1344   const size_t reclaimable = compacted_region_used - compacted_region_live;
1345 
1346   const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1347   return double(reclaimable) / divisor;
1348 }
1349 
1350 // Return the address of the end of the dense prefix, a.k.a. the start of the
1351 // compacted region.  The address is always on a region boundary.
1352 //
1353 // Completely full regions at the left are skipped, since no compaction can
1354 // occur in those regions.  Then the maximum amount of dead wood to allow is
1355 // computed, based on the density (amount live / capacity) of the generation;
1356 // the region with approximately that amount of dead space to the left is
1357 // identified as the limit region.  Regions between the last completely full
1358 // region and the limit region are scanned and the one that has the best
1359 // (maximum) reclaimed_ratio() is selected.
1360 HeapWord*
1361 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1362                                         bool maximum_compaction)
1363 {
1364   if (ParallelOldGCSplitALot) {
1365     if (_space_info[id].dense_prefix() != _space_info[id].space()->bottom()) {
1366       // The value was chosen to provoke splitting a young gen space; use it.
1367       return _space_info[id].dense_prefix();
1368     }
1369   }
1370 
1371   const size_t region_size = ParallelCompactData::RegionSize;
1372   const ParallelCompactData& sd = summary_data();
1373 
1374   const MutableSpace* const space = _space_info[id].space();
1375   HeapWord* const top = space->top();
1376   HeapWord* const top_aligned_up = sd.region_align_up(top);
1377   HeapWord* const new_top = _space_info[id].new_top();
1378   HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
1379   HeapWord* const bottom = space->bottom();
1380   const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
1381   const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
1382   const RegionData* const new_top_cp =
1383     sd.addr_to_region_ptr(new_top_aligned_up);
1384 
1385   // Skip full regions at the beginning of the space--they are necessarily part
1386   // of the dense prefix.
1387   const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
1388   assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
1389          space->is_empty(), "no dead space allowed to the left");
1390   assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
1391          "region must have dead space");
1392 
1393   // The gc number is saved whenever a maximum compaction is done, and used to
1394   // determine when the maximum compaction interval has expired.  This avoids
1395   // successive max compactions for different reasons.
1396   assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1397   const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1398   const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1399     total_invocations() == HeapFirstMaximumCompactionCount;
1400   if (maximum_compaction || full_cp == top_cp || interval_ended) {
1401     _maximum_compaction_gc_num = total_invocations();
1402     return sd.region_to_addr(full_cp);
1403   }
1404 
1405   const size_t space_live = pointer_delta(new_top, bottom);
1406   const size_t space_used = space->used_in_words();
1407   const size_t space_capacity = space->capacity_in_words();
1408 
1409   const double density = double(space_live) / double(space_capacity);
1410   const size_t min_percent_free =
1411           id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
1412   const double limiter = dead_wood_limiter(density, min_percent_free);
1413   const size_t dead_wood_max = space_used - space_live;
1414   const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1415                                       dead_wood_max);
1416 
1417   if (TraceParallelOldGCDensePrefix) {
1418     tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1419                   "space_cap=" SIZE_FORMAT,
1420                   space_live, space_used,
1421                   space_capacity);
1422     tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
1423                   "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1424                   density, min_percent_free, limiter,
1425                   dead_wood_max, dead_wood_limit);
1426   }
1427 
1428   // Locate the region with the desired amount of dead space to the left.
1429   const RegionData* const limit_cp =
1430     dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
1431 
1432   // Scan from the first region with dead space to the limit region and find the
1433   // one with the best (largest) reclaimed ratio.
1434   double best_ratio = 0.0;
1435   const RegionData* best_cp = full_cp;
1436   for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
1437     double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1438     if (tmp_ratio > best_ratio) {
1439       best_cp = cp;
1440       best_ratio = tmp_ratio;
1441     }
1442   }
1443 
1444 #if     0
1445   // Something to consider:  if the region with the best ratio is 'close to' the
1446   // first region w/free space, choose the first region with free space
1447   // ("first-free").  The first-free region is usually near the start of the
1448   // heap, which means we are copying most of the heap already, so copy a bit
1449   // more to get complete compaction.
1450   if (pointer_delta(best_cp, full_cp, sizeof(RegionData)) < 4) {
1451     _maximum_compaction_gc_num = total_invocations();
1452     best_cp = full_cp;
1453   }
1454 #endif  // #if 0
1455 
1456   return sd.region_to_addr(best_cp);
1457 }
1458 
1459 #ifndef PRODUCT
1460 void
1461 PSParallelCompact::fill_with_live_objects(SpaceId id, HeapWord* const start,
1462                                           size_t words)
1463 {
1464   if (TraceParallelOldGCSummaryPhase) {
1465     tty->print_cr("fill_with_live_objects [" PTR_FORMAT " " PTR_FORMAT ") "
1466                   SIZE_FORMAT, start, start + words, words);
1467   }
1468 
1469   ObjectStartArray* const start_array = _space_info[id].start_array();
1470   CollectedHeap::fill_with_objects(start, words);
1471   for (HeapWord* p = start; p < start + words; p += oop(p)->size()) {
1472     _mark_bitmap.mark_obj(p, words);
1473     _summary_data.add_obj(p, words);
1474     start_array->allocate_block(p);
1475   }
1476 }
1477 
1478 void
1479 PSParallelCompact::summarize_new_objects(SpaceId id, HeapWord* start)
1480 {
1481   ParallelCompactData& sd = summary_data();
1482   MutableSpace* space = _space_info[id].space();
1483 
1484   // Find the source and destination start addresses.
1485   HeapWord* const src_addr = sd.region_align_down(start);
1486   HeapWord* dst_addr;
1487   if (src_addr < start) {
1488     dst_addr = sd.addr_to_region_ptr(src_addr)->destination();
1489   } else if (src_addr > space->bottom()) {
1490     // The start (the original top() value) is aligned to a region boundary so
1491     // the associated region does not have a destination.  Compute the
1492     // destination from the previous region.
1493     RegionData* const cp = sd.addr_to_region_ptr(src_addr) - 1;
1494     dst_addr = cp->destination() + cp->data_size();
1495   } else {
1496     // Filling the entire space.
1497     dst_addr = space->bottom();
1498   }
1499   assert(dst_addr != NULL, "sanity");
1500 
1501   // Update the summary data.
1502   bool result = _summary_data.summarize(_space_info[id].split_info(),
1503                                         src_addr, space->top(), NULL,
1504                                         dst_addr, space->end(),
1505                                         _space_info[id].new_top_addr());
1506   assert(result, "should not fail:  bad filler object size");
1507 }
1508 
1509 void
1510 PSParallelCompact::provoke_split_fill_survivor(SpaceId id)
1511 {
1512   if (total_invocations() % (ParallelOldGCSplitInterval * 3) != 0) {
1513     return;
1514   }
1515 
1516   MutableSpace* const space = _space_info[id].space();
1517   if (space->is_empty()) {
1518     HeapWord* b = space->bottom();
1519     HeapWord* t = b + space->capacity_in_words() / 2;
1520     space->set_top(t);
1521     if (ZapUnusedHeapArea) {
1522       space->set_top_for_allocations();
1523     }
1524 
1525     size_t min_size = CollectedHeap::min_fill_size();
1526     size_t obj_len = min_size;
1527     while (b + obj_len <= t) {
1528       CollectedHeap::fill_with_object(b, obj_len);
1529       mark_bitmap()->mark_obj(b, obj_len);
1530       summary_data().add_obj(b, obj_len);
1531       b += obj_len;
1532       obj_len = (obj_len & (min_size*3)) + min_size; // 8 16 24 32 8 16 24 32 ...
1533     }
1534     if (b < t) {
1535       // The loop didn't completely fill to t (top); adjust top downward.
1536       space->set_top(b);
1537       if (ZapUnusedHeapArea) {
1538         space->set_top_for_allocations();
1539       }
1540     }
1541 
1542     HeapWord** nta = _space_info[id].new_top_addr();
1543     bool result = summary_data().summarize(_space_info[id].split_info(),
1544                                            space->bottom(), space->top(), NULL,
1545                                            space->bottom(), space->end(), nta);
1546     assert(result, "space must fit into itself");
1547   }
1548 }
1549 
1550 void
1551 PSParallelCompact::provoke_split(bool & max_compaction)
1552 {
1553   if (total_invocations() % ParallelOldGCSplitInterval != 0) {
1554     return;
1555   }
1556 
1557   const size_t region_size = ParallelCompactData::RegionSize;
1558   ParallelCompactData& sd = summary_data();
1559 
1560   MutableSpace* const eden_space = _space_info[eden_space_id].space();
1561   MutableSpace* const from_space = _space_info[from_space_id].space();
1562   const size_t eden_live = pointer_delta(eden_space->top(),
1563                                          _space_info[eden_space_id].new_top());
1564   const size_t from_live = pointer_delta(from_space->top(),
1565                                          _space_info[from_space_id].new_top());
1566 
1567   const size_t min_fill_size = CollectedHeap::min_fill_size();
1568   const size_t eden_free = pointer_delta(eden_space->end(), eden_space->top());
1569   const size_t eden_fillable = eden_free >= min_fill_size ? eden_free : 0;
1570   const size_t from_free = pointer_delta(from_space->end(), from_space->top());
1571   const size_t from_fillable = from_free >= min_fill_size ? from_free : 0;
1572 
1573   // Choose the space to split; need at least 2 regions live (or fillable).
1574   SpaceId id;
1575   MutableSpace* space;
1576   size_t live_words;
1577   size_t fill_words;
1578   if (eden_live + eden_fillable >= region_size * 2) {
1579     id = eden_space_id;
1580     space = eden_space;
1581     live_words = eden_live;
1582     fill_words = eden_fillable;
1583   } else if (from_live + from_fillable >= region_size * 2) {
1584     id = from_space_id;
1585     space = from_space;
1586     live_words = from_live;
1587     fill_words = from_fillable;
1588   } else {
1589     return; // Give up.
1590   }
1591   assert(fill_words == 0 || fill_words >= min_fill_size, "sanity");
1592 
1593   if (live_words < region_size * 2) {
1594     // Fill from top() to end() w/live objects of mixed sizes.
1595     HeapWord* const fill_start = space->top();
1596     live_words += fill_words;
1597 
1598     space->set_top(fill_start + fill_words);
1599     if (ZapUnusedHeapArea) {
1600       space->set_top_for_allocations();
1601     }
1602 
1603     HeapWord* cur_addr = fill_start;
1604     while (fill_words > 0) {
1605       const size_t r = (size_t)os::random() % (region_size / 2) + min_fill_size;
1606       size_t cur_size = MIN2(align_object_size_(r), fill_words);
1607       if (fill_words - cur_size < min_fill_size) {
1608         cur_size = fill_words; // Avoid leaving a fragment too small to fill.
1609       }
1610 
1611       CollectedHeap::fill_with_object(cur_addr, cur_size);
1612       mark_bitmap()->mark_obj(cur_addr, cur_size);
1613       sd.add_obj(cur_addr, cur_size);
1614 
1615       cur_addr += cur_size;
1616       fill_words -= cur_size;
1617     }
1618 
1619     summarize_new_objects(id, fill_start);
1620   }
1621 
1622   max_compaction = false;
1623 
1624   // Manipulate the old gen so that it has room for about half of the live data
1625   // in the target young gen space (live_words / 2).
1626   id = old_space_id;
1627   space = _space_info[id].space();
1628   const size_t free_at_end = space->free_in_words();
1629   const size_t free_target = align_object_size(live_words / 2);
1630   const size_t dead = pointer_delta(space->top(), _space_info[id].new_top());
1631 
1632   if (free_at_end >= free_target + min_fill_size) {
1633     // Fill space above top() and set the dense prefix so everything survives.
1634     HeapWord* const fill_start = space->top();
1635     const size_t fill_size = free_at_end - free_target;
1636     space->set_top(space->top() + fill_size);
1637     if (ZapUnusedHeapArea) {
1638       space->set_top_for_allocations();
1639     }
1640     fill_with_live_objects(id, fill_start, fill_size);
1641     summarize_new_objects(id, fill_start);
1642     _space_info[id].set_dense_prefix(sd.region_align_down(space->top()));
1643   } else if (dead + free_at_end > free_target) {
1644     // Find a dense prefix that makes the right amount of space available.
1645     HeapWord* cur = sd.region_align_down(space->top());
1646     HeapWord* cur_destination = sd.addr_to_region_ptr(cur)->destination();
1647     size_t dead_to_right = pointer_delta(space->end(), cur_destination);
1648     while (dead_to_right < free_target) {
1649       cur -= region_size;
1650       cur_destination = sd.addr_to_region_ptr(cur)->destination();
1651       dead_to_right = pointer_delta(space->end(), cur_destination);
1652     }
1653     _space_info[id].set_dense_prefix(cur);
1654   }
1655 }
1656 #endif // #ifndef PRODUCT
1657 
1658 void PSParallelCompact::summarize_spaces_quick()
1659 {
1660   for (unsigned int i = 0; i < last_space_id; ++i) {
1661     const MutableSpace* space = _space_info[i].space();
1662     HeapWord** nta = _space_info[i].new_top_addr();
1663     bool result = _summary_data.summarize(_space_info[i].split_info(),
1664                                           space->bottom(), space->top(), NULL,
1665                                           space->bottom(), space->end(), nta);
1666     assert(result, "space must fit into itself");
1667     _space_info[i].set_dense_prefix(space->bottom());
1668   }
1669 
1670 #ifndef PRODUCT
1671   if (ParallelOldGCSplitALot) {
1672     provoke_split_fill_survivor(to_space_id);
1673   }
1674 #endif // #ifndef PRODUCT
1675 }
1676 
1677 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1678 {
1679   HeapWord* const dense_prefix_end = dense_prefix(id);
1680   const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
1681   const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1682   if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
1683     // Only enough dead space is filled so that any remaining dead space to the
1684     // left is larger than the minimum filler object.  (The remainder is filled
1685     // during the copy/update phase.)
1686     //
1687     // The size of the dead space to the right of the boundary is not a
1688     // concern, since compaction will be able to use whatever space is
1689     // available.
1690     //
1691     // Here '||' is the boundary, 'x' represents a don't care bit and a box
1692     // surrounds the space to be filled with an object.
1693     //
1694     // In the 32-bit VM, each bit represents two 32-bit words:
1695     //                              +---+
1696     // a) beg_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
1697     //    end_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
1698     //                              +---+
1699     //
1700     // In the 64-bit VM, each bit represents one 64-bit word:
1701     //                              +------------+
1702     // b) beg_bits:  ...  x   x   x | 0   ||   0 | x  x  ...
1703     //    end_bits:  ...  x   x   1 | 0   ||   0 | x  x  ...
1704     //                              +------------+
1705     //                          +-------+
1706     // c) beg_bits:  ...  x   x | 0   0 | ||   0   x  x  ...
1707     //    end_bits:  ...  x   1 | 0   0 | ||   0   x  x  ...
1708     //                          +-------+
1709     //                      +-----------+
1710     // d) beg_bits:  ...  x | 0   0   0 | ||   0   x  x  ...
1711     //    end_bits:  ...  1 | 0   0   0 | ||   0   x  x  ...
1712     //                      +-----------+
1713     //                          +-------+
1714     // e) beg_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
1715     //    end_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
1716     //                          +-------+
1717 
1718     // Initially assume case a, c or e will apply.
1719     size_t obj_len = CollectedHeap::min_fill_size();
1720     HeapWord* obj_beg = dense_prefix_end - obj_len;
1721 
1722 #ifdef  _LP64
1723     if (MinObjAlignment > 1) { // object alignment > heap word size
1724       // Cases a, c or e.
1725     } else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1726       // Case b above.
1727       obj_beg = dense_prefix_end - 1;
1728     } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1729                _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1730       // Case d above.
1731       obj_beg = dense_prefix_end - 3;
1732       obj_len = 3;
1733     }
1734 #endif  // #ifdef _LP64
1735 
1736     CollectedHeap::fill_with_object(obj_beg, obj_len);
1737     _mark_bitmap.mark_obj(obj_beg, obj_len);
1738     _summary_data.add_obj(obj_beg, obj_len);
1739     assert(start_array(id) != NULL, "sanity");
1740     start_array(id)->allocate_block(obj_beg);
1741   }
1742 }
1743 
1744 void
1745 PSParallelCompact::clear_source_region(HeapWord* beg_addr, HeapWord* end_addr)
1746 {
1747   RegionData* const beg_ptr = _summary_data.addr_to_region_ptr(beg_addr);
1748   HeapWord* const end_aligned_up = _summary_data.region_align_up(end_addr);
1749   RegionData* const end_ptr = _summary_data.addr_to_region_ptr(end_aligned_up);
1750   for (RegionData* cur = beg_ptr; cur < end_ptr; ++cur) {
1751     cur->set_source_region(0);
1752   }
1753 }
1754 
1755 void
1756 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1757 {
1758   assert(id < last_space_id, "id out of range");
1759   assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom() ||
1760          ParallelOldGCSplitALot && id == old_space_id,
1761          "should have been reset in summarize_spaces_quick()");
1762 
1763   const MutableSpace* space = _space_info[id].space();
1764   if (_space_info[id].new_top() != space->bottom()) {
1765     HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1766     _space_info[id].set_dense_prefix(dense_prefix_end);
1767 
1768 #ifndef PRODUCT
1769     if (TraceParallelOldGCDensePrefix) {
1770       print_dense_prefix_stats("ratio", id, maximum_compaction,
1771                                dense_prefix_end);
1772       HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1773       print_dense_prefix_stats("density", id, maximum_compaction, addr);
1774     }
1775 #endif  // #ifndef PRODUCT
1776 
1777     // Recompute the summary data, taking into account the dense prefix.  If
1778     // every last byte will be reclaimed, then the existing summary data which
1779     // compacts everything can be left in place.
1780     if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1781       // If dead space crosses the dense prefix boundary, it is (at least
1782       // partially) filled with a dummy object, marked live and added to the
1783       // summary data.  This simplifies the copy/update phase and must be done
1784       // before the final locations of objects are determined, to prevent
1785       // leaving a fragment of dead space that is too small to fill.
1786       fill_dense_prefix_end(id);
1787 
1788       // Compute the destination of each Region, and thus each object.
1789       _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1790       _summary_data.summarize(_space_info[id].split_info(),
1791                               dense_prefix_end, space->top(), NULL,
1792                               dense_prefix_end, space->end(),
1793                               _space_info[id].new_top_addr());
1794     }
1795   }
1796 
1797   if (TraceParallelOldGCSummaryPhase) {
1798     const size_t region_size = ParallelCompactData::RegionSize;
1799     HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1800     const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1801     const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1802     HeapWord* const new_top = _space_info[id].new_top();
1803     const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1804     const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1805     tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1806                   "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1807                   "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1808                   id, space->capacity_in_words(), dense_prefix_end,
1809                   dp_region, dp_words / region_size,
1810                   cr_words / region_size, new_top);
1811   }
1812 }
1813 
1814 #ifndef PRODUCT
1815 void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1816                                           HeapWord* dst_beg, HeapWord* dst_end,
1817                                           SpaceId src_space_id,
1818                                           HeapWord* src_beg, HeapWord* src_end)
1819 {
1820   if (TraceParallelOldGCSummaryPhase) {
1821     tty->print_cr("summarizing %d [%s] into %d [%s]:  "
1822                   "src=" PTR_FORMAT "-" PTR_FORMAT " "
1823                   SIZE_FORMAT "-" SIZE_FORMAT " "
1824                   "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1825                   SIZE_FORMAT "-" SIZE_FORMAT,
1826                   src_space_id, space_names[src_space_id],
1827                   dst_space_id, space_names[dst_space_id],
1828                   src_beg, src_end,
1829                   _summary_data.addr_to_region_idx(src_beg),
1830                   _summary_data.addr_to_region_idx(src_end),
1831                   dst_beg, dst_end,
1832                   _summary_data.addr_to_region_idx(dst_beg),
1833                   _summary_data.addr_to_region_idx(dst_end));
1834   }
1835 }
1836 #endif  // #ifndef PRODUCT
1837 
1838 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1839                                       bool maximum_compaction)
1840 {
1841   EventMark m("2 summarize");
1842   TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
1843   // trace("2");
1844 
1845 #ifdef  ASSERT
1846   if (TraceParallelOldGCMarkingPhase) {
1847     tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1848                   "add_obj_bytes=" SIZE_FORMAT,
1849                   add_obj_count, add_obj_size * HeapWordSize);
1850     tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1851                   "mark_bitmap_bytes=" SIZE_FORMAT,
1852                   mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1853   }
1854 #endif  // #ifdef ASSERT
1855 
1856   // Quick summarization of each space into itself, to see how much is live.
1857   summarize_spaces_quick();
1858 
1859   if (TraceParallelOldGCSummaryPhase) {
1860     tty->print_cr("summary_phase:  after summarizing each space to self");
1861     Universe::print();
1862     NOT_PRODUCT(print_region_ranges());
1863     if (Verbose) {
1864       NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1865     }
1866   }
1867 
1868   // The amount of live data that will end up in old space (assuming it fits).
1869   size_t old_space_total_live = 0;
1870   assert(perm_space_id < old_space_id, "should not count perm data here");
1871   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1872     old_space_total_live += pointer_delta(_space_info[id].new_top(),
1873                                           _space_info[id].space()->bottom());
1874   }
1875 
1876   MutableSpace* const old_space = _space_info[old_space_id].space();
1877   const size_t old_capacity = old_space->capacity_in_words();
1878   if (old_space_total_live > old_capacity) {
1879     // XXX - should also try to expand
1880     maximum_compaction = true;
1881   }
1882 #ifndef PRODUCT
1883   if (ParallelOldGCSplitALot && old_space_total_live < old_capacity) {
1884     provoke_split(maximum_compaction);
1885   }
1886 #endif // #ifndef PRODUCT
1887 
1888   // Permanent and Old generations.
1889   summarize_space(perm_space_id, maximum_compaction);
1890   summarize_space(old_space_id, maximum_compaction);
1891 
1892   // Summarize the remaining spaces in the young gen.  The initial target space
1893   // is the old gen.  If a space does not fit entirely into the target, then the
1894   // remainder is compacted into the space itself and that space becomes the new
1895   // target.
1896   SpaceId dst_space_id = old_space_id;
1897   HeapWord* dst_space_end = old_space->end();
1898   HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
1899   for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
1900     const MutableSpace* space = _space_info[id].space();
1901     const size_t live = pointer_delta(_space_info[id].new_top(),
1902                                       space->bottom());
1903     const size_t available = pointer_delta(dst_space_end, *new_top_addr);
1904 
1905     NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
1906                                   SpaceId(id), space->bottom(), space->top());)
1907     if (live > 0 && live <= available) {
1908       // All the live data will fit.
1909       bool done = _summary_data.summarize(_space_info[id].split_info(),
1910                                           space->bottom(), space->top(),
1911                                           NULL,
1912                                           *new_top_addr, dst_space_end,
1913                                           new_top_addr);
1914       assert(done, "space must fit into old gen");
1915 
1916       // Reset the new_top value for the space.
1917       _space_info[id].set_new_top(space->bottom());
1918     } else if (live > 0) {
1919       // Attempt to fit part of the source space into the target space.
1920       HeapWord* next_src_addr = NULL;
1921       bool done = _summary_data.summarize(_space_info[id].split_info(),
1922                                           space->bottom(), space->top(),
1923                                           &next_src_addr,
1924                                           *new_top_addr, dst_space_end,
1925                                           new_top_addr);
1926       assert(!done, "space should not fit into old gen");
1927       assert(next_src_addr != NULL, "sanity");
1928 
1929       // The source space becomes the new target, so the remainder is compacted
1930       // within the space itself.
1931       dst_space_id = SpaceId(id);
1932       dst_space_end = space->end();
1933       new_top_addr = _space_info[id].new_top_addr();
1934       NOT_PRODUCT(summary_phase_msg(dst_space_id,
1935                                     space->bottom(), dst_space_end,
1936                                     SpaceId(id), next_src_addr, space->top());)
1937       done = _summary_data.summarize(_space_info[id].split_info(),
1938                                      next_src_addr, space->top(),
1939                                      NULL,
1940                                      space->bottom(), dst_space_end,
1941                                      new_top_addr);
1942       assert(done, "space must fit when compacted into itself");
1943       assert(*new_top_addr <= space->top(), "usage should not grow");
1944     }
1945   }
1946 
1947   if (TraceParallelOldGCSummaryPhase) {
1948     tty->print_cr("summary_phase:  after final summarization");
1949     Universe::print();
1950     NOT_PRODUCT(print_region_ranges());
1951     if (Verbose) {
1952       NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
1953     }
1954   }
1955 }
1956 
1957 // This method should contain all heap-specific policy for invoking a full
1958 // collection.  invoke_no_policy() will only attempt to compact the heap; it
1959 // will do nothing further.  If we need to bail out for policy reasons, scavenge
1960 // before full gc, or any other specialized behavior, it needs to be added here.
1961 //
1962 // Note that this method should only be called from the vm_thread while at a
1963 // safepoint.
1964 //
1965 // Note that the all_soft_refs_clear flag in the collector policy
1966 // may be true because this method can be called without intervening
1967 // activity.  For example when the heap space is tight and full measure
1968 // are being taken to free space.
1969 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1970   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1971   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1972          "should be in vm thread");
1973 
1974   ParallelScavengeHeap* heap = gc_heap();
1975   GCCause::Cause gc_cause = heap->gc_cause();
1976   assert(!heap->is_gc_active(), "not reentrant");
1977 
1978   PSAdaptiveSizePolicy* policy = heap->size_policy();
1979   IsGCActiveMark mark;
1980 
1981   if (ScavengeBeforeFullGC) {
1982     PSScavenge::invoke_no_policy();
1983   }
1984 
1985   const bool clear_all_soft_refs =
1986     heap->collector_policy()->should_clear_all_soft_refs();
1987 
1988   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1989                                       maximum_heap_compaction);
1990 }
1991 
1992 bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
1993   size_t addr_region_index = addr_to_region_idx(addr);
1994   return region_index == addr_region_index;
1995 }
1996 
1997 // This method contains no policy. You should probably
1998 // be calling invoke() instead.
1999 void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
2000   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
2001   assert(ref_processor() != NULL, "Sanity");
2002 
2003   if (GC_locker::check_active_before_gc()) {
2004     return;
2005   }
2006 
2007   TimeStamp marking_start;
2008   TimeStamp compaction_start;
2009   TimeStamp collection_exit;
2010 
2011   ParallelScavengeHeap* heap = gc_heap();
2012   GCCause::Cause gc_cause = heap->gc_cause();
2013   PSYoungGen* young_gen = heap->young_gen();
2014   PSOldGen* old_gen = heap->old_gen();
2015   PSPermGen* perm_gen = heap->perm_gen();
2016   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
2017 
2018   // The scope of casr should end after code that can change
2019   // CollectorPolicy::_should_clear_all_soft_refs.
2020   ClearedAllSoftRefs casr(maximum_heap_compaction,
2021                           heap->collector_policy());
2022 
2023   if (ZapUnusedHeapArea) {
2024     // Save information needed to minimize mangling
2025     heap->record_gen_tops_before_GC();
2026   }
2027 
2028   heap->pre_full_gc_dump();
2029 
2030   _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
2031 
2032   // Make sure data structures are sane, make the heap parsable, and do other
2033   // miscellaneous bookkeeping.
2034   PreGCValues pre_gc_values;
2035   pre_compact(&pre_gc_values);
2036 
2037   // Get the compaction manager reserved for the VM thread.
2038   ParCompactionManager* const vmthread_cm =
2039     ParCompactionManager::manager_array(gc_task_manager()->workers());
2040 
2041   // Place after pre_compact() where the number of invocations is incremented.
2042   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
2043 
2044   {
2045     ResourceMark rm;
2046     HandleMark hm;
2047 
2048     // Set the number of GC threads to be used in this collection
2049     gc_task_manager()->set_active_gang();
2050     gc_task_manager()->task_idle_workers();
2051     heap->set_par_threads(gc_task_manager()->active_workers());
2052 
2053     const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
2054 
2055     // This is useful for debugging but don't change the output the
2056     // the customer sees.
2057     const char* gc_cause_str = "Full GC";
2058     if (is_system_gc && PrintGCDetails) {
2059       gc_cause_str = "Full GC (System)";
2060     }
2061     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
2062     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
2063     TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
2064     TraceCollectorStats tcs(counters());
2065     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
2066 
2067     if (TraceGen1Time) accumulated_time()->start();
2068 
2069     // Let the size policy know we're starting
2070     size_policy->major_collection_begin();
2071 
2072     // When collecting the permanent generation methodOops may be moving,
2073     // so we either have to flush all bcp data or convert it into bci.
2074     CodeCache::gc_prologue();
2075     Threads::gc_prologue();
2076 
2077     COMPILER2_PRESENT(DerivedPointerTable::clear());
2078 
2079     ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
2080     ref_processor()->setup_policy(maximum_heap_compaction);
2081 
2082     bool marked_for_unloading = false;
2083 
2084     marking_start.update();
2085     marking_phase(vmthread_cm, maximum_heap_compaction);
2086 
2087 #ifndef PRODUCT
2088     if (TraceParallelOldGCMarkingPhase) {
2089       gclog_or_tty->print_cr("marking_phase: cas_tries %d  cas_retries %d "
2090         "cas_by_another %d",
2091         mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
2092         mark_bitmap()->cas_by_another());
2093     }
2094 #endif  // #ifndef PRODUCT
2095 
2096     bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc;
2097     summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
2098 
2099     COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
2100     COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
2101 
2102     // adjust_roots() updates Universe::_intArrayKlassObj which is
2103     // needed by the compaction for filling holes in the dense prefix.
2104     adjust_roots();
2105 
2106     compaction_start.update();
2107     // Does the perm gen always have to be done serially because
2108     // klasses are used in the update of an object?
2109     compact_perm(vmthread_cm);
2110 
2111     compact();
2112 
2113     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
2114     // done before resizing.
2115     post_compact();
2116 
2117     // Let the size policy know we're done
2118     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
2119 
2120     if (UseAdaptiveSizePolicy) {
2121       if (PrintAdaptiveSizePolicy) {
2122         gclog_or_tty->print("AdaptiveSizeStart: ");
2123         gclog_or_tty->stamp();
2124         gclog_or_tty->print_cr(" collection: %d ",
2125                        heap->total_collections());
2126         if (Verbose) {
2127           gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
2128             " perm_gen_capacity: %d ",
2129             old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
2130             perm_gen->capacity_in_bytes());
2131         }
2132       }
2133 
2134       // Don't check if the size_policy is ready here.  Let
2135       // the size_policy check that internally.
2136       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
2137           ((gc_cause != GCCause::_java_lang_system_gc) ||
2138             UseAdaptiveSizePolicyWithSystemGC)) {
2139         // Calculate optimal free space amounts
2140         assert(young_gen->max_size() >
2141           young_gen->from_space()->capacity_in_bytes() +
2142           young_gen->to_space()->capacity_in_bytes(),
2143           "Sizes of space in young gen are out-of-bounds");
2144         size_t max_eden_size = young_gen->max_size() -
2145           young_gen->from_space()->capacity_in_bytes() -
2146           young_gen->to_space()->capacity_in_bytes();
2147         size_policy->compute_generation_free_space(
2148                               young_gen->used_in_bytes(),
2149                               young_gen->eden_space()->used_in_bytes(),
2150                               old_gen->used_in_bytes(),
2151                               perm_gen->used_in_bytes(),
2152                               young_gen->eden_space()->capacity_in_bytes(),
2153                               old_gen->max_gen_size(),
2154                               max_eden_size,
2155                               true /* full gc*/,
2156                               gc_cause,
2157                               heap->collector_policy());
2158 
2159         heap->resize_old_gen(
2160           size_policy->calculated_old_free_size_in_bytes());
2161 
2162         // Don't resize the young generation at an major collection.  A
2163         // desired young generation size may have been calculated but
2164         // resizing the young generation complicates the code because the
2165         // resizing of the old generation may have moved the boundary
2166         // between the young generation and the old generation.  Let the
2167         // young generation resizing happen at the minor collections.
2168       }
2169       if (PrintAdaptiveSizePolicy) {
2170         gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
2171                        heap->total_collections());
2172       }
2173     }
2174 
2175     if (UsePerfData) {
2176       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
2177       counters->update_counters();
2178       counters->update_old_capacity(old_gen->capacity_in_bytes());
2179       counters->update_young_capacity(young_gen->capacity_in_bytes());
2180     }
2181 
2182     heap->resize_all_tlabs();
2183 
2184     // We collected the perm gen, so we'll resize it here.
2185     perm_gen->compute_new_size(pre_gc_values.perm_gen_used());
2186 
2187     if (TraceGen1Time) accumulated_time()->stop();
2188 
2189     if (PrintGC) {
2190       if (PrintGCDetails) {
2191         // No GC timestamp here.  This is after GC so it would be confusing.
2192         young_gen->print_used_change(pre_gc_values.young_gen_used());
2193         old_gen->print_used_change(pre_gc_values.old_gen_used());
2194         heap->print_heap_change(pre_gc_values.heap_used());
2195         // Print perm gen last (print_heap_change() excludes the perm gen).
2196         perm_gen->print_used_change(pre_gc_values.perm_gen_used());
2197       } else {
2198         heap->print_heap_change(pre_gc_values.heap_used());
2199       }
2200     }
2201 
2202     // Track memory usage and detect low memory
2203     MemoryService::track_memory_usage();
2204     heap->update_counters();
2205     gc_task_manager()->release_idle_workers();
2206   }
2207 
2208 #ifdef ASSERT
2209   for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
2210     ParCompactionManager* const cm =
2211       ParCompactionManager::manager_array(int(i));
2212     assert(cm->marking_stack()->is_empty(),       "should be empty");
2213     assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
2214     assert(cm->revisit_klass_stack()->is_empty(), "should be empty");
2215   }
2216 #endif // ASSERT
2217 
2218   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
2219     HandleMark hm;  // Discard invalid handles created during verification
2220     gclog_or_tty->print(" VerifyAfterGC:");
2221     Universe::verify(false);
2222   }
2223 
2224   // Re-verify object start arrays
2225   if (VerifyObjectStartArray &&
2226       VerifyAfterGC) {
2227     old_gen->verify_object_start_array();
2228     perm_gen->verify_object_start_array();
2229   }
2230 
2231   if (ZapUnusedHeapArea) {
2232     old_gen->object_space()->check_mangled_unused_area_complete();
2233     perm_gen->object_space()->check_mangled_unused_area_complete();
2234   }
2235 
2236   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
2237 
2238   collection_exit.update();
2239 
2240   if (PrintHeapAtGC) {
2241     Universe::print_heap_after_gc();
2242   }
2243   if (PrintGCTaskTimeStamps) {
2244     gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
2245                            INT64_FORMAT,
2246                            marking_start.ticks(), compaction_start.ticks(),
2247                            collection_exit.ticks());
2248     gc_task_manager()->print_task_time_stamps();
2249   }
2250 
2251   heap->post_full_gc_dump();
2252 
2253 #ifdef TRACESPINNING
2254   ParallelTaskTerminator::print_termination_counts();
2255 #endif
2256 }
2257 
2258 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
2259                                              PSYoungGen* young_gen,
2260                                              PSOldGen* old_gen) {
2261   MutableSpace* const eden_space = young_gen->eden_space();
2262   assert(!eden_space->is_empty(), "eden must be non-empty");
2263   assert(young_gen->virtual_space()->alignment() ==
2264          old_gen->virtual_space()->alignment(), "alignments do not match");
2265 
2266   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
2267     return false;
2268   }
2269 
2270   // Both generations must be completely committed.
2271   if (young_gen->virtual_space()->uncommitted_size() != 0) {
2272     return false;
2273   }
2274   if (old_gen->virtual_space()->uncommitted_size() != 0) {
2275     return false;
2276   }
2277 
2278   // Figure out how much to take from eden.  Include the average amount promoted
2279   // in the total; otherwise the next young gen GC will simply bail out to a
2280   // full GC.
2281   const size_t alignment = old_gen->virtual_space()->alignment();
2282   const size_t eden_used = eden_space->used_in_bytes();
2283   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
2284   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
2285   const size_t eden_capacity = eden_space->capacity_in_bytes();
2286 
2287   if (absorb_size >= eden_capacity) {
2288     return false; // Must leave some space in eden.
2289   }
2290 
2291   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
2292   if (new_young_size < young_gen->min_gen_size()) {
2293     return false; // Respect young gen minimum size.
2294   }
2295 
2296   if (TraceAdaptiveGCBoundary && Verbose) {
2297     gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
2298                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
2299                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
2300                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2301                         absorb_size / K,
2302                         eden_capacity / K, (eden_capacity - absorb_size) / K,
2303                         young_gen->from_space()->used_in_bytes() / K,
2304                         young_gen->to_space()->used_in_bytes() / K,
2305                         young_gen->capacity_in_bytes() / K, new_young_size / K);
2306   }
2307 
2308   // Fill the unused part of the old gen.
2309   MutableSpace* const old_space = old_gen->object_space();
2310   HeapWord* const unused_start = old_space->top();
2311   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
2312 
2313   if (unused_words > 0) {
2314     if (unused_words < CollectedHeap::min_fill_size()) {
2315       return false;  // If the old gen cannot be filled, must give up.
2316     }
2317     CollectedHeap::fill_with_objects(unused_start, unused_words);
2318   }
2319 
2320   // Take the live data from eden and set both top and end in the old gen to
2321   // eden top.  (Need to set end because reset_after_change() mangles the region
2322   // from end to virtual_space->high() in debug builds).
2323   HeapWord* const new_top = eden_space->top();
2324   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
2325                                         absorb_size);
2326   young_gen->reset_after_change();
2327   old_space->set_top(new_top);
2328   old_space->set_end(new_top);
2329   old_gen->reset_after_change();
2330 
2331   // Update the object start array for the filler object and the data from eden.
2332   ObjectStartArray* const start_array = old_gen->start_array();
2333   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2334     start_array->allocate_block(p);
2335   }
2336 
2337   // Could update the promoted average here, but it is not typically updated at
2338   // full GCs and the value to use is unclear.  Something like
2339   //
2340   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2341 
2342   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2343   return true;
2344 }
2345 
2346 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2347   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2348     "shouldn't return NULL");
2349   return ParallelScavengeHeap::gc_task_manager();
2350 }
2351 
2352 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2353                                       bool maximum_heap_compaction) {
2354   // Recursively traverse all live objects and mark them
2355   EventMark m("1 mark object");
2356   TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
2357 
2358   ParallelScavengeHeap* heap = gc_heap();
2359   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2360   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2361   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2362   ParallelTaskTerminator terminator(active_gc_threads, qset);
2363 
2364   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2365   PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
2366 
2367   {
2368     TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
2369     ParallelScavengeHeap::ParStrongRootsScope psrs;
2370 
2371     GCTaskQueue* q = GCTaskQueue::create();
2372 
2373     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2374     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2375     // We scan the thread roots in parallel
2376     Threads::create_thread_roots_marking_tasks(q);
2377     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2378     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2379     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2380     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2381     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2382     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2383 
2384     if (active_gc_threads > 1) {
2385       for (uint j = 0; j < active_gc_threads; j++) {
2386         q->enqueue(new StealMarkingTask(&terminator));
2387       }
2388     }
2389 
2390     gc_task_manager()->execute_and_wait(q);
2391   }
2392 
2393   // Process reference objects found during marking
2394   {
2395     TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
2396     if (ref_processor()->processing_is_mt()) {
2397       RefProcTaskExecutor task_executor;
2398       ref_processor()->process_discovered_references(
2399         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2400         &task_executor);
2401     } else {
2402       ref_processor()->process_discovered_references(
2403         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL);
2404     }
2405   }
2406 
2407   TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
2408   // Follow system dictionary roots and unload classes.
2409   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2410 
2411   // Follow code cache roots.
2412   CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
2413                           purged_class);
2414   cm->follow_marking_stacks(); // Flush marking stack.
2415 
2416   // Update subklass/sibling/implementor links of live klasses
2417   // revisit_klass_stack is used in follow_weak_klass_links().
2418   follow_weak_klass_links();
2419 
2420   // Revisit memoized MDO's and clear any unmarked weak refs
2421   follow_mdo_weak_refs();
2422 
2423   // Visit interned string tables and delete unmarked oops
2424   StringTable::unlink(is_alive_closure());
2425   // Clean up unreferenced symbols in symbol table.
2426   SymbolTable::unlink();
2427 
2428   assert(cm->marking_stacks_empty(), "marking stacks should be empty");
2429 }
2430 
2431 // This should be moved to the shared markSweep code!
2432 class PSAlwaysTrueClosure: public BoolObjectClosure {
2433 public:
2434   void do_object(oop p) { ShouldNotReachHere(); }
2435   bool do_object_b(oop p) { return true; }
2436 };
2437 static PSAlwaysTrueClosure always_true;
2438 
2439 void PSParallelCompact::adjust_roots() {
2440   // Adjust the pointers to reflect the new locations
2441   EventMark m("3 adjust roots");
2442   TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
2443 
2444   // General strong roots.
2445   Universe::oops_do(adjust_root_pointer_closure());
2446   JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
2447   Threads::oops_do(adjust_root_pointer_closure(), NULL);
2448   ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
2449   FlatProfiler::oops_do(adjust_root_pointer_closure());
2450   Management::oops_do(adjust_root_pointer_closure());
2451   JvmtiExport::oops_do(adjust_root_pointer_closure());
2452   // SO_AllClasses
2453   SystemDictionary::oops_do(adjust_root_pointer_closure());
2454 
2455   // Now adjust pointers in remaining weak roots.  (All of which should
2456   // have been cleared if they pointed to non-surviving objects.)
2457   // Global (weak) JNI handles
2458   JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
2459 
2460   CodeCache::oops_do(adjust_pointer_closure());
2461   StringTable::oops_do(adjust_root_pointer_closure());
2462   ref_processor()->weak_oops_do(adjust_root_pointer_closure());
2463   // Roots were visited so references into the young gen in roots
2464   // may have been scanned.  Process them also.
2465   // Should the reference processor have a span that excludes
2466   // young gen objects?
2467   PSScavenge::reference_processor()->weak_oops_do(
2468                                               adjust_root_pointer_closure());
2469 }
2470 
2471 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
2472   EventMark m("4 compact perm");
2473   TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
2474   // trace("4");
2475 
2476   gc_heap()->perm_gen()->start_array()->reset();
2477   move_and_update(cm, perm_space_id);
2478 }
2479 
2480 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2481                                                       uint parallel_gc_threads)
2482 {
2483   TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
2484 
2485   // Find the threads that are active
2486   unsigned int which = 0;
2487 
2488   const uint task_count = MAX2(parallel_gc_threads, 1U);
2489   for (uint j = 0; j < task_count; j++) {
2490     q->enqueue(new DrainStacksCompactionTask(j));
2491     ParCompactionManager::verify_region_list_empty(j);
2492     // Set the region stacks variables to "no" region stack values
2493     // so that they will be recognized and needing a region stack
2494     // in the stealing tasks if they do not get one by executing
2495     // a draining stack.
2496     ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2497     cm->set_region_stack(NULL);
2498     cm->set_region_stack_index((uint)max_uintx);
2499   }
2500   ParCompactionManager::reset_recycled_stack_index();
2501 
2502   // Find all regions that are available (can be filled immediately) and
2503   // distribute them to the thread stacks.  The iteration is done in reverse
2504   // order (high to low) so the regions will be removed in ascending order.
2505 
2506   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2507 
2508   size_t fillable_regions = 0;   // A count for diagnostic purposes.
2509   // A region index which corresponds to the tasks created above.
2510   // "which" must be 0 <= which < task_count
2511 
2512   which = 0;
2513   for (unsigned int id = to_space_id; id > perm_space_id; --id) {
2514     SpaceInfo* const space_info = _space_info + id;
2515     MutableSpace* const space = space_info->space();
2516     HeapWord* const new_top = space_info->new_top();
2517 
2518     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2519     const size_t end_region =
2520       sd.addr_to_region_idx(sd.region_align_up(new_top));
2521     assert(end_region > 0, "perm gen cannot be empty");
2522 
2523     for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
2524       if (sd.region(cur)->claim_unsafe()) {
2525         ParCompactionManager::region_list_push(which, cur);
2526 
2527         if (TraceParallelOldGCCompactionPhase && Verbose) {
2528           const size_t count_mod_8 = fillable_regions & 7;
2529           if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
2530           gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
2531           if (count_mod_8 == 7) gclog_or_tty->cr();
2532         }
2533 
2534         NOT_PRODUCT(++fillable_regions;)
2535 
2536         // Assign regions to tasks in round-robin fashion.
2537         if (++which == task_count) {
2538           assert(which <= parallel_gc_threads,
2539             "Inconsistent number of workers");
2540           which = 0;
2541         }
2542       }
2543     }
2544   }
2545 
2546   if (TraceParallelOldGCCompactionPhase) {
2547     if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
2548     gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions);
2549   }
2550 }
2551 
2552 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2553 
2554 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2555                                                     uint parallel_gc_threads) {
2556   TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
2557 
2558   ParallelCompactData& sd = PSParallelCompact::summary_data();
2559 
2560   // Iterate over all the spaces adding tasks for updating
2561   // regions in the dense prefix.  Assume that 1 gc thread
2562   // will work on opening the gaps and the remaining gc threads
2563   // will work on the dense prefix.
2564   unsigned int space_id;
2565   for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2566     HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2567     const MutableSpace* const space = _space_info[space_id].space();
2568 
2569     if (dense_prefix_end == space->bottom()) {
2570       // There is no dense prefix for this space.
2571       continue;
2572     }
2573 
2574     // The dense prefix is before this region.
2575     size_t region_index_end_dense_prefix =
2576         sd.addr_to_region_idx(dense_prefix_end);
2577     RegionData* const dense_prefix_cp =
2578       sd.region(region_index_end_dense_prefix);
2579     assert(dense_prefix_end == space->end() ||
2580            dense_prefix_cp->available() ||
2581            dense_prefix_cp->claimed(),
2582            "The region after the dense prefix should always be ready to fill");
2583 
2584     size_t region_index_start = sd.addr_to_region_idx(space->bottom());
2585 
2586     // Is there dense prefix work?
2587     size_t total_dense_prefix_regions =
2588       region_index_end_dense_prefix - region_index_start;
2589     // How many regions of the dense prefix should be given to
2590     // each thread?
2591     if (total_dense_prefix_regions > 0) {
2592       uint tasks_for_dense_prefix = 1;
2593       if (total_dense_prefix_regions <=
2594           (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2595         // Don't over partition.  This assumes that
2596         // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2597         // so there are not many regions to process.
2598         tasks_for_dense_prefix = parallel_gc_threads;
2599       } else {
2600         // Over partition
2601         tasks_for_dense_prefix = parallel_gc_threads *
2602           PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2603       }
2604       size_t regions_per_thread = total_dense_prefix_regions /
2605         tasks_for_dense_prefix;
2606       // Give each thread at least 1 region.
2607       if (regions_per_thread == 0) {
2608         regions_per_thread = 1;
2609       }
2610 
2611       for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2612         if (region_index_start >= region_index_end_dense_prefix) {
2613           break;
2614         }
2615         // region_index_end is not processed
2616         size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2617                                        region_index_end_dense_prefix);
2618         q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2619                                              region_index_start,
2620                                              region_index_end));
2621         region_index_start = region_index_end;
2622       }
2623     }
2624     // This gets any part of the dense prefix that did not
2625     // fit evenly.
2626     if (region_index_start < region_index_end_dense_prefix) {
2627       q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2628                                            region_index_start,
2629                                            region_index_end_dense_prefix));
2630     }
2631   }
2632 }
2633 
2634 void PSParallelCompact::enqueue_region_stealing_tasks(
2635                                      GCTaskQueue* q,
2636                                      ParallelTaskTerminator* terminator_ptr,
2637                                      uint parallel_gc_threads) {
2638   TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
2639 
2640   // Once a thread has drained it's stack, it should try to steal regions from
2641   // other threads.
2642   if (parallel_gc_threads > 1) {
2643     for (uint j = 0; j < parallel_gc_threads; j++) {
2644       q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2645     }
2646   }
2647 }
2648 
2649 void PSParallelCompact::compact() {
2650   EventMark m("5 compact");
2651   // trace("5");
2652   TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
2653 
2654   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2655   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2656   PSOldGen* old_gen = heap->old_gen();
2657   old_gen->start_array()->reset();
2658   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2659   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2660   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2661   ParallelTaskTerminator terminator(active_gc_threads, qset);
2662 
2663   GCTaskQueue* q = GCTaskQueue::create();
2664   enqueue_region_draining_tasks(q, active_gc_threads);
2665   enqueue_dense_prefix_tasks(q, active_gc_threads);
2666   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2667 
2668   {
2669     TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
2670 
2671     gc_task_manager()->execute_and_wait(q);
2672 
2673 #ifdef  ASSERT
2674     // Verify that all regions have been processed before the deferred updates.
2675     // Note that perm_space_id is skipped; this type of verification is not
2676     // valid until the perm gen is compacted by regions.
2677     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2678       verify_complete(SpaceId(id));
2679     }
2680 #endif
2681   }
2682 
2683   {
2684     // Update the deferred objects, if any.  Any compaction manager can be used.
2685     TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
2686     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2687     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2688       update_deferred_objects(cm, SpaceId(id));
2689     }
2690   }
2691 }
2692 
2693 #ifdef  ASSERT
2694 void PSParallelCompact::verify_complete(SpaceId space_id) {
2695   // All Regions between space bottom() to new_top() should be marked as filled
2696   // and all Regions between new_top() and top() should be available (i.e.,
2697   // should have been emptied).
2698   ParallelCompactData& sd = summary_data();
2699   SpaceInfo si = _space_info[space_id];
2700   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2701   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2702   const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2703   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2704   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2705 
2706   bool issued_a_warning = false;
2707 
2708   size_t cur_region;
2709   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2710     const RegionData* const c = sd.region(cur_region);
2711     if (!c->completed()) {
2712       warning("region " SIZE_FORMAT " not filled:  "
2713               "destination_count=" SIZE_FORMAT,
2714               cur_region, c->destination_count());
2715       issued_a_warning = true;
2716     }
2717   }
2718 
2719   for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
2720     const RegionData* const c = sd.region(cur_region);
2721     if (!c->available()) {
2722       warning("region " SIZE_FORMAT " not empty:   "
2723               "destination_count=" SIZE_FORMAT,
2724               cur_region, c->destination_count());
2725       issued_a_warning = true;
2726     }
2727   }
2728 
2729   if (issued_a_warning) {
2730     print_region_ranges();
2731   }
2732 }
2733 #endif  // #ifdef ASSERT
2734 
2735 void
2736 PSParallelCompact::follow_weak_klass_links() {
2737   // All klasses on the revisit stack are marked at this point.
2738   // Update and follow all subklass, sibling and implementor links.
2739   // Check all the stacks here even if not all the workers are active.
2740   // There is no accounting which indicates which stacks might have
2741   // contents to be followed.
2742   if (PrintRevisitStats) {
2743     gclog_or_tty->print_cr("#classes in system dictionary = %d",
2744                            SystemDictionary::number_of_classes());
2745   }
2746   for (uint i = 0; i < ParallelGCThreads + 1; i++) {
2747     ParCompactionManager* cm = ParCompactionManager::manager_array(i);
2748     KeepAliveClosure keep_alive_closure(cm);
2749     Stack<Klass*>* const rks = cm->revisit_klass_stack();
2750     if (PrintRevisitStats) {
2751       gclog_or_tty->print_cr("Revisit klass stack[%u] length = " SIZE_FORMAT,
2752                              i, rks->size());
2753     }
2754     while (!rks->is_empty()) {
2755       Klass* const k = rks->pop();
2756       k->follow_weak_klass_links(is_alive_closure(), &keep_alive_closure);
2757     }
2758 
2759     cm->follow_marking_stacks();
2760   }
2761 }
2762 
2763 void
2764 PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
2765   cm->revisit_klass_stack()->push(k);
2766 }
2767 
2768 void PSParallelCompact::revisit_mdo(ParCompactionManager* cm, DataLayout* p) {
2769   cm->revisit_mdo_stack()->push(p);
2770 }
2771 
2772 void PSParallelCompact::follow_mdo_weak_refs() {
2773   // All strongly reachable oops have been marked at this point;
2774   // we can visit and clear any weak references from MDO's which
2775   // we memoized during the strong marking phase.
2776   if (PrintRevisitStats) {
2777     gclog_or_tty->print_cr("#classes in system dictionary = %d",
2778                            SystemDictionary::number_of_classes());
2779   }
2780   for (uint i = 0; i < ParallelGCThreads + 1; i++) {
2781     ParCompactionManager* cm = ParCompactionManager::manager_array(i);
2782     Stack<DataLayout*>* rms = cm->revisit_mdo_stack();
2783     if (PrintRevisitStats) {
2784       gclog_or_tty->print_cr("Revisit MDO stack[%u] size = " SIZE_FORMAT,
2785                              i, rms->size());
2786     }
2787     while (!rms->is_empty()) {
2788       rms->pop()->follow_weak_refs(is_alive_closure());
2789     }
2790 
2791     cm->follow_marking_stacks();
2792   }
2793 }
2794 
2795 
2796 #ifdef VALIDATE_MARK_SWEEP
2797 
2798 void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
2799   if (!ValidateMarkSweep)
2800     return;
2801 
2802   if (!isroot) {
2803     if (_pointer_tracking) {
2804       guarantee(_adjusted_pointers->contains(p), "should have seen this pointer");
2805       _adjusted_pointers->remove(p);
2806     }
2807   } else {
2808     ptrdiff_t index = _root_refs_stack->find(p);
2809     if (index != -1) {
2810       int l = _root_refs_stack->length();
2811       if (l > 0 && l - 1 != index) {
2812         void* last = _root_refs_stack->pop();
2813         assert(last != p, "should be different");
2814         _root_refs_stack->at_put(index, last);
2815       } else {
2816         _root_refs_stack->remove(p);
2817       }
2818     }
2819   }
2820 }
2821 
2822 
2823 void PSParallelCompact::check_adjust_pointer(void* p) {
2824   _adjusted_pointers->push(p);
2825 }
2826 
2827 
2828 class AdjusterTracker: public OopClosure {
2829  public:
2830   AdjusterTracker() {};
2831   void do_oop(oop* o)         { PSParallelCompact::check_adjust_pointer(o); }
2832   void do_oop(narrowOop* o)   { PSParallelCompact::check_adjust_pointer(o); }
2833 };
2834 
2835 
2836 void PSParallelCompact::track_interior_pointers(oop obj) {
2837   if (ValidateMarkSweep) {
2838     _adjusted_pointers->clear();
2839     _pointer_tracking = true;
2840 
2841     AdjusterTracker checker;
2842     obj->oop_iterate(&checker);
2843   }
2844 }
2845 
2846 
2847 void PSParallelCompact::check_interior_pointers() {
2848   if (ValidateMarkSweep) {
2849     _pointer_tracking = false;
2850     guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers");
2851   }
2852 }
2853 
2854 
2855 void PSParallelCompact::reset_live_oop_tracking(bool at_perm) {
2856   if (ValidateMarkSweep) {
2857     guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
2858     _live_oops_index = at_perm ? _live_oops_index_at_perm : 0;
2859   }
2860 }
2861 
2862 
2863 void PSParallelCompact::register_live_oop(oop p, size_t size) {
2864   if (ValidateMarkSweep) {
2865     _live_oops->push(p);
2866     _live_oops_size->push(size);
2867     _live_oops_index++;
2868   }
2869 }
2870 
2871 void PSParallelCompact::validate_live_oop(oop p, size_t size) {
2872   if (ValidateMarkSweep) {
2873     oop obj = _live_oops->at((int)_live_oops_index);
2874     guarantee(obj == p, "should be the same object");
2875     guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size");
2876     _live_oops_index++;
2877   }
2878 }
2879 
2880 void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size,
2881                                   HeapWord* compaction_top) {
2882   assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top),
2883          "should be moved to forwarded location");
2884   if (ValidateMarkSweep) {
2885     PSParallelCompact::validate_live_oop(oop(q), size);
2886     _live_oops_moved_to->push(oop(compaction_top));
2887   }
2888   if (RecordMarkSweepCompaction) {
2889     _cur_gc_live_oops->push(q);
2890     _cur_gc_live_oops_moved_to->push(compaction_top);
2891     _cur_gc_live_oops_size->push(size);
2892   }
2893 }
2894 
2895 
2896 void PSParallelCompact::compaction_complete() {
2897   if (RecordMarkSweepCompaction) {
2898     GrowableArray<HeapWord*>* _tmp_live_oops          = _cur_gc_live_oops;
2899     GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to;
2900     GrowableArray<size_t>   * _tmp_live_oops_size     = _cur_gc_live_oops_size;
2901 
2902     _cur_gc_live_oops           = _last_gc_live_oops;
2903     _cur_gc_live_oops_moved_to  = _last_gc_live_oops_moved_to;
2904     _cur_gc_live_oops_size      = _last_gc_live_oops_size;
2905     _last_gc_live_oops          = _tmp_live_oops;
2906     _last_gc_live_oops_moved_to = _tmp_live_oops_moved_to;
2907     _last_gc_live_oops_size     = _tmp_live_oops_size;
2908   }
2909 }
2910 
2911 
2912 void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
2913   if (!RecordMarkSweepCompaction) {
2914     tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
2915     return;
2916   }
2917 
2918   if (_last_gc_live_oops == NULL) {
2919     tty->print_cr("No compaction information gathered yet");
2920     return;
2921   }
2922 
2923   for (int i = 0; i < _last_gc_live_oops->length(); i++) {
2924     HeapWord* old_oop = _last_gc_live_oops->at(i);
2925     size_t    sz      = _last_gc_live_oops_size->at(i);
2926     if (old_oop <= q && q < (old_oop + sz)) {
2927       HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
2928       size_t offset = (q - old_oop);
2929       tty->print_cr("Address " PTR_FORMAT, q);
2930       tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
2931       tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
2932       return;
2933     }
2934   }
2935 
2936   tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q);
2937 }
2938 #endif //VALIDATE_MARK_SWEEP
2939 
2940 // Update interior oops in the ranges of regions [beg_region, end_region).
2941 void
2942 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2943                                                        SpaceId space_id,
2944                                                        size_t beg_region,
2945                                                        size_t end_region) {
2946   ParallelCompactData& sd = summary_data();
2947   ParMarkBitMap* const mbm = mark_bitmap();
2948 
2949   HeapWord* beg_addr = sd.region_to_addr(beg_region);
2950   HeapWord* const end_addr = sd.region_to_addr(end_region);
2951   assert(beg_region <= end_region, "bad region range");
2952   assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2953 
2954 #ifdef  ASSERT
2955   // Claim the regions to avoid triggering an assert when they are marked as
2956   // filled.
2957   for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
2958     assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
2959   }
2960 #endif  // #ifdef ASSERT
2961 
2962   if (beg_addr != space(space_id)->bottom()) {
2963     // Find the first live object or block of dead space that *starts* in this
2964     // range of regions.  If a partial object crosses onto the region, skip it;
2965     // it will be marked for 'deferred update' when the object head is
2966     // processed.  If dead space crosses onto the region, it is also skipped; it
2967     // will be filled when the prior region is processed.  If neither of those
2968     // apply, the first word in the region is the start of a live object or dead
2969     // space.
2970     assert(beg_addr > space(space_id)->bottom(), "sanity");
2971     const RegionData* const cp = sd.region(beg_region);
2972     if (cp->partial_obj_size() != 0) {
2973       beg_addr = sd.partial_obj_end(beg_region);
2974     } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2975       beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2976     }
2977   }
2978 
2979   if (beg_addr < end_addr) {
2980     // A live object or block of dead space starts in this range of Regions.
2981      HeapWord* const dense_prefix_end = dense_prefix(space_id);
2982 
2983     // Create closures and iterate.
2984     UpdateOnlyClosure update_closure(mbm, cm, space_id);
2985     FillClosure fill_closure(cm, space_id);
2986     ParMarkBitMap::IterationStatus status;
2987     status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2988                           dense_prefix_end);
2989     if (status == ParMarkBitMap::incomplete) {
2990       update_closure.do_addr(update_closure.source());
2991     }
2992   }
2993 
2994   // Mark the regions as filled.
2995   RegionData* const beg_cp = sd.region(beg_region);
2996   RegionData* const end_cp = sd.region(end_region);
2997   for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2998     cp->set_completed();
2999   }
3000 }
3001 
3002 // Return the SpaceId for the space containing addr.  If addr is not in the
3003 // heap, last_space_id is returned.  In debug mode it expects the address to be
3004 // in the heap and asserts such.
3005 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
3006   assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
3007 
3008   for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
3009     if (_space_info[id].space()->contains(addr)) {
3010       return SpaceId(id);
3011     }
3012   }
3013 
3014   assert(false, "no space contains the addr");
3015   return last_space_id;
3016 }
3017 
3018 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
3019                                                 SpaceId id) {
3020   assert(id < last_space_id, "bad space id");
3021 
3022   ParallelCompactData& sd = summary_data();
3023   const SpaceInfo* const space_info = _space_info + id;
3024   ObjectStartArray* const start_array = space_info->start_array();
3025 
3026   const MutableSpace* const space = space_info->space();
3027   assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
3028   HeapWord* const beg_addr = space_info->dense_prefix();
3029   HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
3030 
3031   const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
3032   const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
3033   const RegionData* cur_region;
3034   for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
3035     HeapWord* const addr = cur_region->deferred_obj_addr();
3036     if (addr != NULL) {
3037       if (start_array != NULL) {
3038         start_array->allocate_block(addr);
3039       }
3040       oop(addr)->update_contents(cm);
3041       assert(oop(addr)->is_oop_or_null(), "should be an oop now");
3042     }
3043   }
3044 }
3045 
3046 // Skip over count live words starting from beg, and return the address of the
3047 // next live word.  Unless marked, the word corresponding to beg is assumed to
3048 // be dead.  Callers must either ensure beg does not correspond to the middle of
3049 // an object, or account for those live words in some other way.  Callers must
3050 // also ensure that there are enough live words in the range [beg, end) to skip.
3051 HeapWord*
3052 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
3053 {
3054   assert(count > 0, "sanity");
3055 
3056   ParMarkBitMap* m = mark_bitmap();
3057   idx_t bits_to_skip = m->words_to_bits(count);
3058   idx_t cur_beg = m->addr_to_bit(beg);
3059   const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
3060 
3061   do {
3062     cur_beg = m->find_obj_beg(cur_beg, search_end);
3063     idx_t cur_end = m->find_obj_end(cur_beg, search_end);
3064     const size_t obj_bits = cur_end - cur_beg + 1;
3065     if (obj_bits > bits_to_skip) {
3066       return m->bit_to_addr(cur_beg + bits_to_skip);
3067     }
3068     bits_to_skip -= obj_bits;
3069     cur_beg = cur_end + 1;
3070   } while (bits_to_skip > 0);
3071 
3072   // Skipping the desired number of words landed just past the end of an object.
3073   // Find the start of the next object.
3074   cur_beg = m->find_obj_beg(cur_beg, search_end);
3075   assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
3076   return m->bit_to_addr(cur_beg);
3077 }
3078 
3079 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
3080                                             SpaceId src_space_id,
3081                                             size_t src_region_idx)
3082 {
3083   assert(summary_data().is_region_aligned(dest_addr), "not aligned");
3084 
3085   const SplitInfo& split_info = _space_info[src_space_id].split_info();
3086   if (split_info.dest_region_addr() == dest_addr) {
3087     // The partial object ending at the split point contains the first word to
3088     // be copied to dest_addr.
3089     return split_info.first_src_addr();
3090   }
3091 
3092   const ParallelCompactData& sd = summary_data();
3093   ParMarkBitMap* const bitmap = mark_bitmap();
3094   const size_t RegionSize = ParallelCompactData::RegionSize;
3095 
3096   assert(sd.is_region_aligned(dest_addr), "not aligned");
3097   const RegionData* const src_region_ptr = sd.region(src_region_idx);
3098   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
3099   HeapWord* const src_region_destination = src_region_ptr->destination();
3100 
3101   assert(dest_addr >= src_region_destination, "wrong src region");
3102   assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
3103 
3104   HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
3105   HeapWord* const src_region_end = src_region_beg + RegionSize;
3106 
3107   HeapWord* addr = src_region_beg;
3108   if (dest_addr == src_region_destination) {
3109     // Return the first live word in the source region.
3110     if (partial_obj_size == 0) {
3111       addr = bitmap->find_obj_beg(addr, src_region_end);
3112       assert(addr < src_region_end, "no objects start in src region");
3113     }
3114     return addr;
3115   }
3116 
3117   // Must skip some live data.
3118   size_t words_to_skip = dest_addr - src_region_destination;
3119   assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
3120 
3121   if (partial_obj_size >= words_to_skip) {
3122     // All the live words to skip are part of the partial object.
3123     addr += words_to_skip;
3124     if (partial_obj_size == words_to_skip) {
3125       // Find the first live word past the partial object.
3126       addr = bitmap->find_obj_beg(addr, src_region_end);
3127       assert(addr < src_region_end, "wrong src region");
3128     }
3129     return addr;
3130   }
3131 
3132   // Skip over the partial object (if any).
3133   if (partial_obj_size != 0) {
3134     words_to_skip -= partial_obj_size;
3135     addr += partial_obj_size;
3136   }
3137 
3138   // Skip over live words due to objects that start in the region.
3139   addr = skip_live_words(addr, src_region_end, words_to_skip);
3140   assert(addr < src_region_end, "wrong src region");
3141   return addr;
3142 }
3143 
3144 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
3145                                                      SpaceId src_space_id,
3146                                                      size_t beg_region,
3147                                                      HeapWord* end_addr)
3148 {
3149   ParallelCompactData& sd = summary_data();
3150 
3151 #ifdef ASSERT
3152   MutableSpace* const src_space = _space_info[src_space_id].space();
3153   HeapWord* const beg_addr = sd.region_to_addr(beg_region);
3154   assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
3155          "src_space_id does not match beg_addr");
3156   assert(src_space->contains(end_addr) || end_addr == src_space->end(),
3157          "src_space_id does not match end_addr");
3158 #endif // #ifdef ASSERT
3159 
3160   RegionData* const beg = sd.region(beg_region);
3161   RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
3162 
3163   // Regions up to new_top() are enqueued if they become available.
3164   HeapWord* const new_top = _space_info[src_space_id].new_top();
3165   RegionData* const enqueue_end =
3166     sd.addr_to_region_ptr(sd.region_align_up(new_top));
3167 
3168   for (RegionData* cur = beg; cur < end; ++cur) {
3169     assert(cur->data_size() > 0, "region must have live data");
3170     cur->decrement_destination_count();
3171     if (cur < enqueue_end && cur->available() && cur->claim()) {
3172       cm->push_region(sd.region(cur));
3173     }
3174   }
3175 }
3176 
3177 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
3178                                           SpaceId& src_space_id,
3179                                           HeapWord*& src_space_top,
3180                                           HeapWord* end_addr)
3181 {
3182   typedef ParallelCompactData::RegionData RegionData;
3183 
3184   ParallelCompactData& sd = PSParallelCompact::summary_data();
3185   const size_t region_size = ParallelCompactData::RegionSize;
3186 
3187   size_t src_region_idx = 0;
3188 
3189   // Skip empty regions (if any) up to the top of the space.
3190   HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
3191   RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
3192   HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
3193   const RegionData* const top_region_ptr =
3194     sd.addr_to_region_ptr(top_aligned_up);
3195   while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
3196     ++src_region_ptr;
3197   }
3198 
3199   if (src_region_ptr < top_region_ptr) {
3200     // The next source region is in the current space.  Update src_region_idx
3201     // and the source address to match src_region_ptr.
3202     src_region_idx = sd.region(src_region_ptr);
3203     HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
3204     if (src_region_addr > closure.source()) {
3205       closure.set_source(src_region_addr);
3206     }
3207     return src_region_idx;
3208   }
3209 
3210   // Switch to a new source space and find the first non-empty region.
3211   unsigned int space_id = src_space_id + 1;
3212   assert(space_id < last_space_id, "not enough spaces");
3213 
3214   HeapWord* const destination = closure.destination();
3215 
3216   do {
3217     MutableSpace* space = _space_info[space_id].space();
3218     HeapWord* const bottom = space->bottom();
3219     const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
3220 
3221     // Iterate over the spaces that do not compact into themselves.
3222     if (bottom_cp->destination() != bottom) {
3223       HeapWord* const top_aligned_up = sd.region_align_up(space->top());
3224       const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
3225 
3226       for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
3227         if (src_cp->live_obj_size() > 0) {
3228           // Found it.
3229           assert(src_cp->destination() == destination,
3230                  "first live obj in the space must match the destination");
3231           assert(src_cp->partial_obj_size() == 0,
3232                  "a space cannot begin with a partial obj");
3233 
3234           src_space_id = SpaceId(space_id);
3235           src_space_top = space->top();
3236           const size_t src_region_idx = sd.region(src_cp);
3237           closure.set_source(sd.region_to_addr(src_region_idx));
3238           return src_region_idx;
3239         } else {
3240           assert(src_cp->data_size() == 0, "sanity");
3241         }
3242       }
3243     }
3244   } while (++space_id < last_space_id);
3245 
3246   assert(false, "no source region was found");
3247   return 0;
3248 }
3249 
3250 void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
3251 {
3252   typedef ParMarkBitMap::IterationStatus IterationStatus;
3253   const size_t RegionSize = ParallelCompactData::RegionSize;
3254   ParMarkBitMap* const bitmap = mark_bitmap();
3255   ParallelCompactData& sd = summary_data();
3256   RegionData* const region_ptr = sd.region(region_idx);
3257 
3258   // Get the items needed to construct the closure.
3259   HeapWord* dest_addr = sd.region_to_addr(region_idx);
3260   SpaceId dest_space_id = space_id(dest_addr);
3261   ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
3262   HeapWord* new_top = _space_info[dest_space_id].new_top();
3263   assert(dest_addr < new_top, "sanity");
3264   const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
3265 
3266   // Get the source region and related info.
3267   size_t src_region_idx = region_ptr->source_region();
3268   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
3269   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
3270 
3271   MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3272   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
3273 
3274   // Adjust src_region_idx to prepare for decrementing destination counts (the
3275   // destination count is not decremented when a region is copied to itself).
3276   if (src_region_idx == region_idx) {
3277     src_region_idx += 1;
3278   }
3279 
3280   if (bitmap->is_unmarked(closure.source())) {
3281     // The first source word is in the middle of an object; copy the remainder
3282     // of the object or as much as will fit.  The fact that pointer updates were
3283     // deferred will be noted when the object header is processed.
3284     HeapWord* const old_src_addr = closure.source();
3285     closure.copy_partial_obj();
3286     if (closure.is_full()) {
3287       decrement_destination_counts(cm, src_space_id, src_region_idx,
3288                                    closure.source());
3289       region_ptr->set_deferred_obj_addr(NULL);
3290       region_ptr->set_completed();
3291       return;
3292     }
3293 
3294     HeapWord* const end_addr = sd.region_align_down(closure.source());
3295     if (sd.region_align_down(old_src_addr) != end_addr) {
3296       // The partial object was copied from more than one source region.
3297       decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
3298 
3299       // Move to the next source region, possibly switching spaces as well.  All
3300       // args except end_addr may be modified.
3301       src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3302                                        end_addr);
3303     }
3304   }
3305 
3306   do {
3307     HeapWord* const cur_addr = closure.source();
3308     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
3309                                     src_space_top);
3310     IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
3311 
3312     if (status == ParMarkBitMap::incomplete) {
3313       // The last obj that starts in the source region does not end in the
3314       // region.
3315       assert(closure.source() < end_addr, "sanity");
3316       HeapWord* const obj_beg = closure.source();
3317       HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
3318                                        src_space_top);
3319       HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
3320       if (obj_end < range_end) {
3321         // The end was found; the entire object will fit.
3322         status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
3323         assert(status != ParMarkBitMap::would_overflow, "sanity");
3324       } else {
3325         // The end was not found; the object will not fit.
3326         assert(range_end < src_space_top, "obj cannot cross space boundary");
3327         status = ParMarkBitMap::would_overflow;
3328       }
3329     }
3330 
3331     if (status == ParMarkBitMap::would_overflow) {
3332       // The last object did not fit.  Note that interior oop updates were
3333       // deferred, then copy enough of the object to fill the region.
3334       region_ptr->set_deferred_obj_addr(closure.destination());
3335       status = closure.copy_until_full(); // copies from closure.source()
3336 
3337       decrement_destination_counts(cm, src_space_id, src_region_idx,
3338                                    closure.source());
3339       region_ptr->set_completed();
3340       return;
3341     }
3342 
3343     if (status == ParMarkBitMap::full) {
3344       decrement_destination_counts(cm, src_space_id, src_region_idx,
3345                                    closure.source());
3346       region_ptr->set_deferred_obj_addr(NULL);
3347       region_ptr->set_completed();
3348       return;
3349     }
3350 
3351     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
3352 
3353     // Move to the next source region, possibly switching spaces as well.  All
3354     // args except end_addr may be modified.
3355     src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3356                                      end_addr);
3357   } while (true);
3358 }
3359 
3360 void
3361 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
3362   const MutableSpace* sp = space(space_id);
3363   if (sp->is_empty()) {
3364     return;
3365   }
3366 
3367   ParallelCompactData& sd = PSParallelCompact::summary_data();
3368   ParMarkBitMap* const bitmap = mark_bitmap();
3369   HeapWord* const dp_addr = dense_prefix(space_id);
3370   HeapWord* beg_addr = sp->bottom();
3371   HeapWord* end_addr = sp->top();
3372 
3373   assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
3374 
3375   const size_t beg_region = sd.addr_to_region_idx(beg_addr);
3376   const size_t dp_region = sd.addr_to_region_idx(dp_addr);
3377   if (beg_region < dp_region) {
3378     update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
3379   }
3380 
3381   // The destination of the first live object that starts in the region is one
3382   // past the end of the partial object entering the region (if any).
3383   HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
3384   HeapWord* const new_top = _space_info[space_id].new_top();
3385   assert(new_top >= dest_addr, "bad new_top value");
3386   const size_t words = pointer_delta(new_top, dest_addr);
3387 
3388   if (words > 0) {
3389     ObjectStartArray* start_array = _space_info[space_id].start_array();
3390     MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3391 
3392     ParMarkBitMap::IterationStatus status;
3393     status = bitmap->iterate(&closure, dest_addr, end_addr);
3394     assert(status == ParMarkBitMap::full, "iteration not complete");
3395     assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
3396            "live objects skipped because closure is full");
3397   }
3398 }
3399 
3400 jlong PSParallelCompact::millis_since_last_gc() {
3401   jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
3402   // XXX See note in genCollectedHeap::millis_since_last_gc().
3403   if (ret_val < 0) {
3404     NOT_PRODUCT(warning("time warp: %d", ret_val);)
3405     return 0;
3406   }
3407   return ret_val;
3408 }
3409 
3410 void PSParallelCompact::reset_millis_since_last_gc() {
3411   _time_of_last_gc = os::javaTimeMillis();
3412 }
3413 
3414 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3415 {
3416   if (source() != destination()) {
3417     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3418     Copy::aligned_conjoint_words(source(), destination(), words_remaining());
3419   }
3420   update_state(words_remaining());
3421   assert(is_full(), "sanity");
3422   return ParMarkBitMap::full;
3423 }
3424 
3425 void MoveAndUpdateClosure::copy_partial_obj()
3426 {
3427   size_t words = words_remaining();
3428 
3429   HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3430   HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3431   if (end_addr < range_end) {
3432     words = bitmap()->obj_size(source(), end_addr);
3433   }
3434 
3435   // This test is necessary; if omitted, the pointer updates to a partial object
3436   // that crosses the dense prefix boundary could be overwritten.
3437   if (source() != destination()) {
3438     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3439     Copy::aligned_conjoint_words(source(), destination(), words);
3440   }
3441   update_state(words);
3442 }
3443 
3444 ParMarkBitMapClosure::IterationStatus
3445 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3446   assert(destination() != NULL, "sanity");
3447   assert(bitmap()->obj_size(addr) == words, "bad size");
3448 
3449   _source = addr;
3450   assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
3451          destination(), "wrong destination");
3452 
3453   if (words > words_remaining()) {
3454     return ParMarkBitMap::would_overflow;
3455   }
3456 
3457   // The start_array must be updated even if the object is not moving.
3458   if (_start_array != NULL) {
3459     _start_array->allocate_block(destination());
3460   }
3461 
3462   if (destination() != source()) {
3463     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3464     Copy::aligned_conjoint_words(source(), destination(), words);
3465   }
3466 
3467   oop moved_oop = (oop) destination();
3468   moved_oop->update_contents(compaction_manager());
3469   assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
3470 
3471   update_state(words);
3472   assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
3473   return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3474 }
3475 
3476 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3477                                      ParCompactionManager* cm,
3478                                      PSParallelCompact::SpaceId space_id) :
3479   ParMarkBitMapClosure(mbm, cm),
3480   _space_id(space_id),
3481   _start_array(PSParallelCompact::start_array(space_id))
3482 {
3483 }
3484 
3485 // Updates the references in the object to their new values.
3486 ParMarkBitMapClosure::IterationStatus
3487 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3488   do_addr(addr);
3489   return ParMarkBitMap::incomplete;
3490 }
3491 
3492 // Prepare for compaction.  This method is executed once
3493 // (i.e., by a single thread) before compaction.
3494 // Save the updated location of the intArrayKlassObj for
3495 // filling holes in the dense prefix.
3496 void PSParallelCompact::compact_prologue() {
3497   _updated_int_array_klass_obj = (klassOop)
3498     summary_data().calc_new_pointer(Universe::intArrayKlassObj());
3499 }
3500