Print this page
rev 4535 : 6725714: par compact - add a table to speed up bitmap searches
Reviewed-by: jmasa, tschatzl
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
+++ new/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
1 1 /*
2 2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/symbolTable.hpp"
27 27 #include "classfile/systemDictionary.hpp"
28 28 #include "code/codeCache.hpp"
29 29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
30 30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
31 31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
32 32 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
33 33 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
34 34 #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp"
35 35 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
36 36 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
37 37 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
38 38 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
39 39 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
40 40 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
41 41 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
42 42 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
43 43 #include "gc_implementation/shared/gcHeapSummary.hpp"
44 44 #include "gc_implementation/shared/gcTimer.hpp"
45 45 #include "gc_implementation/shared/gcTrace.hpp"
46 46 #include "gc_implementation/shared/gcTraceTime.hpp"
47 47 #include "gc_implementation/shared/isGCActiveMark.hpp"
48 48 #include "gc_interface/gcCause.hpp"
49 49 #include "memory/gcLocker.inline.hpp"
50 50 #include "memory/referencePolicy.hpp"
51 51 #include "memory/referenceProcessor.hpp"
52 52 #include "oops/methodDataOop.hpp"
53 53 #include "oops/oop.inline.hpp"
54 54 #include "oops/oop.pcgc.inline.hpp"
55 55 #include "runtime/fprofiler.hpp"
56 56 #include "runtime/safepoint.hpp"
↓ open down ↓ |
56 lines elided |
↑ open up ↑ |
57 57 #include "runtime/vmThread.hpp"
58 58 #include "services/management.hpp"
59 59 #include "services/memoryService.hpp"
60 60 #include "services/memTracker.hpp"
61 61 #include "utilities/events.hpp"
62 62 #include "utilities/stack.inline.hpp"
63 63
64 64 #include <math.h>
65 65
66 66 // All sizes are in HeapWords.
67 -const size_t ParallelCompactData::Log2RegionSize = 9; // 512 words
67 +const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words
68 68 const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize;
69 69 const size_t ParallelCompactData::RegionSizeBytes =
70 70 RegionSize << LogHeapWordSize;
71 71 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
72 72 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
73 -const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
73 +const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
74 74
75 +const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words
76 +const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize;
77 +const size_t ParallelCompactData::BlockSizeBytes =
78 + BlockSize << LogHeapWordSize;
79 +const size_t ParallelCompactData::BlockSizeOffsetMask = BlockSize - 1;
80 +const size_t ParallelCompactData::BlockAddrOffsetMask = BlockSizeBytes - 1;
81 +const size_t ParallelCompactData::BlockAddrMask = ~BlockAddrOffsetMask;
82 +
83 +const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
84 +const size_t ParallelCompactData::Log2BlocksPerRegion =
85 + Log2RegionSize - Log2BlockSize;
86 +
75 87 const ParallelCompactData::RegionData::region_sz_t
76 88 ParallelCompactData::RegionData::dc_shift = 27;
77 89
78 90 const ParallelCompactData::RegionData::region_sz_t
79 91 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
80 92
81 93 const ParallelCompactData::RegionData::region_sz_t
82 94 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
83 95
84 96 const ParallelCompactData::RegionData::region_sz_t
85 97 ParallelCompactData::RegionData::los_mask = ~dc_mask;
86 98
87 99 const ParallelCompactData::RegionData::region_sz_t
88 100 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
89 101
90 102 const ParallelCompactData::RegionData::region_sz_t
91 103 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
92 104
93 105 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
94 106 bool PSParallelCompact::_print_phases = false;
95 107
96 108 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
97 109 klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL;
98 110
99 111 double PSParallelCompact::_dwl_mean;
100 112 double PSParallelCompact::_dwl_std_dev;
101 113 double PSParallelCompact::_dwl_first_term;
102 114 double PSParallelCompact::_dwl_adjustment;
103 115 #ifdef ASSERT
104 116 bool PSParallelCompact::_dwl_initialized = false;
105 117 #endif // #ifdef ASSERT
106 118
107 119 #ifdef VALIDATE_MARK_SWEEP
108 120 GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL;
109 121 GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
110 122 GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
111 123 GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
112 124 size_t PSParallelCompact::_live_oops_index = 0;
113 125 size_t PSParallelCompact::_live_oops_index_at_perm = 0;
114 126 GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL;
115 127 GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL;
116 128 bool PSParallelCompact::_pointer_tracking = false;
117 129 bool PSParallelCompact::_root_tracking = true;
118 130
119 131 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL;
120 132 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL;
121 133 GrowableArray<size_t> * PSParallelCompact::_cur_gc_live_oops_size = NULL;
122 134 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL;
123 135 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
124 136 GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL;
125 137 #endif
126 138
127 139 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
128 140 HeapWord* destination)
129 141 {
130 142 assert(src_region_idx != 0, "invalid src_region_idx");
131 143 assert(partial_obj_size != 0, "invalid partial_obj_size argument");
132 144 assert(destination != NULL, "invalid destination argument");
133 145
134 146 _src_region_idx = src_region_idx;
135 147 _partial_obj_size = partial_obj_size;
136 148 _destination = destination;
137 149
138 150 // These fields may not be updated below, so make sure they're clear.
139 151 assert(_dest_region_addr == NULL, "should have been cleared");
140 152 assert(_first_src_addr == NULL, "should have been cleared");
141 153
142 154 // Determine the number of destination regions for the partial object.
143 155 HeapWord* const last_word = destination + partial_obj_size - 1;
144 156 const ParallelCompactData& sd = PSParallelCompact::summary_data();
145 157 HeapWord* const beg_region_addr = sd.region_align_down(destination);
146 158 HeapWord* const end_region_addr = sd.region_align_down(last_word);
147 159
148 160 if (beg_region_addr == end_region_addr) {
149 161 // One destination region.
150 162 _destination_count = 1;
151 163 if (end_region_addr == destination) {
152 164 // The destination falls on a region boundary, thus the first word of the
153 165 // partial object will be the first word copied to the destination region.
154 166 _dest_region_addr = end_region_addr;
155 167 _first_src_addr = sd.region_to_addr(src_region_idx);
156 168 }
157 169 } else {
158 170 // Two destination regions. When copied, the partial object will cross a
159 171 // destination region boundary, so a word somewhere within the partial
160 172 // object will be the first word copied to the second destination region.
161 173 _destination_count = 2;
162 174 _dest_region_addr = end_region_addr;
163 175 const size_t ofs = pointer_delta(end_region_addr, destination);
164 176 assert(ofs < _partial_obj_size, "sanity");
165 177 _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
166 178 }
167 179 }
168 180
169 181 void SplitInfo::clear()
170 182 {
171 183 _src_region_idx = 0;
172 184 _partial_obj_size = 0;
173 185 _destination = NULL;
174 186 _destination_count = 0;
175 187 _dest_region_addr = NULL;
176 188 _first_src_addr = NULL;
177 189 assert(!is_valid(), "sanity");
178 190 }
179 191
180 192 #ifdef ASSERT
181 193 void SplitInfo::verify_clear()
182 194 {
183 195 assert(_src_region_idx == 0, "not clear");
184 196 assert(_partial_obj_size == 0, "not clear");
185 197 assert(_destination == NULL, "not clear");
186 198 assert(_destination_count == 0, "not clear");
187 199 assert(_dest_region_addr == NULL, "not clear");
188 200 assert(_first_src_addr == NULL, "not clear");
189 201 }
190 202 #endif // #ifdef ASSERT
191 203
192 204
193 205 #ifndef PRODUCT
194 206 const char* PSParallelCompact::space_names[] = {
195 207 "perm", "old ", "eden", "from", "to "
196 208 };
197 209
198 210 void PSParallelCompact::print_region_ranges()
199 211 {
200 212 tty->print_cr("space bottom top end new_top");
201 213 tty->print_cr("------ ---------- ---------- ---------- ----------");
202 214
203 215 for (unsigned int id = 0; id < last_space_id; ++id) {
204 216 const MutableSpace* space = _space_info[id].space();
205 217 tty->print_cr("%u %s "
206 218 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
207 219 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
208 220 id, space_names[id],
209 221 summary_data().addr_to_region_idx(space->bottom()),
210 222 summary_data().addr_to_region_idx(space->top()),
211 223 summary_data().addr_to_region_idx(space->end()),
212 224 summary_data().addr_to_region_idx(_space_info[id].new_top()));
213 225 }
214 226 }
215 227
216 228 void
217 229 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
218 230 {
219 231 #define REGION_IDX_FORMAT SIZE_FORMAT_W(7)
220 232 #define REGION_DATA_FORMAT SIZE_FORMAT_W(5)
221 233
222 234 ParallelCompactData& sd = PSParallelCompact::summary_data();
223 235 size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
224 236 tty->print_cr(REGION_IDX_FORMAT " " PTR_FORMAT " "
225 237 REGION_IDX_FORMAT " " PTR_FORMAT " "
226 238 REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
227 239 REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
228 240 i, c->data_location(), dci, c->destination(),
229 241 c->partial_obj_size(), c->live_obj_size(),
230 242 c->data_size(), c->source_region(), c->destination_count());
231 243
232 244 #undef REGION_IDX_FORMAT
233 245 #undef REGION_DATA_FORMAT
234 246 }
235 247
236 248 void
237 249 print_generic_summary_data(ParallelCompactData& summary_data,
238 250 HeapWord* const beg_addr,
239 251 HeapWord* const end_addr)
240 252 {
241 253 size_t total_words = 0;
242 254 size_t i = summary_data.addr_to_region_idx(beg_addr);
243 255 const size_t last = summary_data.addr_to_region_idx(end_addr);
244 256 HeapWord* pdest = 0;
245 257
246 258 while (i <= last) {
247 259 ParallelCompactData::RegionData* c = summary_data.region(i);
248 260 if (c->data_size() != 0 || c->destination() != pdest) {
249 261 print_generic_summary_region(i, c);
250 262 total_words += c->data_size();
251 263 pdest = c->destination();
252 264 }
253 265 ++i;
254 266 }
255 267
256 268 tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
257 269 }
258 270
259 271 void
260 272 print_generic_summary_data(ParallelCompactData& summary_data,
261 273 SpaceInfo* space_info)
262 274 {
263 275 for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
264 276 const MutableSpace* space = space_info[id].space();
265 277 print_generic_summary_data(summary_data, space->bottom(),
266 278 MAX2(space->top(), space_info[id].new_top()));
267 279 }
268 280 }
269 281
270 282 void
271 283 print_initial_summary_region(size_t i,
272 284 const ParallelCompactData::RegionData* c,
273 285 bool newline = true)
274 286 {
275 287 tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
276 288 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
277 289 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
278 290 i, c->destination(),
279 291 c->partial_obj_size(), c->live_obj_size(),
280 292 c->data_size(), c->source_region(), c->destination_count());
281 293 if (newline) tty->cr();
282 294 }
283 295
284 296 void
285 297 print_initial_summary_data(ParallelCompactData& summary_data,
286 298 const MutableSpace* space) {
287 299 if (space->top() == space->bottom()) {
288 300 return;
289 301 }
290 302
291 303 const size_t region_size = ParallelCompactData::RegionSize;
292 304 typedef ParallelCompactData::RegionData RegionData;
293 305 HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
294 306 const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
295 307 const RegionData* c = summary_data.region(end_region - 1);
296 308 HeapWord* end_addr = c->destination() + c->data_size();
297 309 const size_t live_in_space = pointer_delta(end_addr, space->bottom());
298 310
299 311 // Print (and count) the full regions at the beginning of the space.
300 312 size_t full_region_count = 0;
301 313 size_t i = summary_data.addr_to_region_idx(space->bottom());
302 314 while (i < end_region && summary_data.region(i)->data_size() == region_size) {
303 315 print_initial_summary_region(i, summary_data.region(i));
304 316 ++full_region_count;
305 317 ++i;
306 318 }
307 319
308 320 size_t live_to_right = live_in_space - full_region_count * region_size;
309 321
310 322 double max_reclaimed_ratio = 0.0;
311 323 size_t max_reclaimed_ratio_region = 0;
312 324 size_t max_dead_to_right = 0;
313 325 size_t max_live_to_right = 0;
314 326
315 327 // Print the 'reclaimed ratio' for regions while there is something live in
316 328 // the region or to the right of it. The remaining regions are empty (and
317 329 // uninteresting), and computing the ratio will result in division by 0.
318 330 while (i < end_region && live_to_right > 0) {
319 331 c = summary_data.region(i);
320 332 HeapWord* const region_addr = summary_data.region_to_addr(i);
321 333 const size_t used_to_right = pointer_delta(space->top(), region_addr);
322 334 const size_t dead_to_right = used_to_right - live_to_right;
323 335 const double reclaimed_ratio = double(dead_to_right) / live_to_right;
324 336
325 337 if (reclaimed_ratio > max_reclaimed_ratio) {
326 338 max_reclaimed_ratio = reclaimed_ratio;
327 339 max_reclaimed_ratio_region = i;
328 340 max_dead_to_right = dead_to_right;
329 341 max_live_to_right = live_to_right;
330 342 }
331 343
332 344 print_initial_summary_region(i, c, false);
333 345 tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
334 346 reclaimed_ratio, dead_to_right, live_to_right);
335 347
336 348 live_to_right -= c->data_size();
337 349 ++i;
338 350 }
339 351
340 352 // Any remaining regions are empty. Print one more if there is one.
341 353 if (i < end_region) {
342 354 print_initial_summary_region(i, summary_data.region(i));
343 355 }
344 356
345 357 tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
346 358 "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
347 359 max_reclaimed_ratio_region, max_dead_to_right,
348 360 max_live_to_right, max_reclaimed_ratio);
349 361 }
350 362
351 363 void
352 364 print_initial_summary_data(ParallelCompactData& summary_data,
353 365 SpaceInfo* space_info) {
354 366 unsigned int id = PSParallelCompact::perm_space_id;
355 367 const MutableSpace* space;
356 368 do {
357 369 space = space_info[id].space();
358 370 print_initial_summary_data(summary_data, space);
359 371 } while (++id < PSParallelCompact::eden_space_id);
360 372
361 373 do {
362 374 space = space_info[id].space();
363 375 print_generic_summary_data(summary_data, space->bottom(), space->top());
364 376 } while (++id < PSParallelCompact::last_space_id);
365 377 }
366 378 #endif // #ifndef PRODUCT
367 379
368 380 #ifdef ASSERT
369 381 size_t add_obj_count;
370 382 size_t add_obj_size;
371 383 size_t mark_bitmap_count;
↓ open down ↓ |
287 lines elided |
↑ open up ↑ |
372 384 size_t mark_bitmap_size;
373 385 #endif // #ifdef ASSERT
374 386
375 387 ParallelCompactData::ParallelCompactData()
376 388 {
377 389 _region_start = 0;
378 390
379 391 _region_vspace = 0;
380 392 _region_data = 0;
381 393 _region_count = 0;
394 +
395 + _block_vspace = 0;
396 + _block_data = 0;
397 + _block_count = 0;
382 398 }
383 399
384 400 bool ParallelCompactData::initialize(MemRegion covered_region)
385 401 {
386 402 _region_start = covered_region.start();
387 403 const size_t region_size = covered_region.word_size();
388 404 DEBUG_ONLY(_region_end = _region_start + region_size;)
389 405
390 406 assert(region_align_down(_region_start) == _region_start,
391 407 "region start not aligned");
392 408 assert((region_size & RegionSizeOffsetMask) == 0,
393 409 "region size not a multiple of RegionSize");
394 410
395 - bool result = initialize_region_data(region_size);
396 -
411 + bool result = initialize_region_data(region_size) && initialize_block_data();
397 412 return result;
398 413 }
399 414
400 415 PSVirtualSpace*
401 416 ParallelCompactData::create_vspace(size_t count, size_t element_size)
402 417 {
403 418 const size_t raw_bytes = count * element_size;
404 419 const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
405 420 const size_t granularity = os::vm_allocation_granularity();
406 421 const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
407 422
408 423 const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
409 424 MAX2(page_sz, granularity);
410 425 ReservedSpace rs(bytes, rs_align, rs_align > 0);
411 426 os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
412 427 rs.size());
413 428
414 429 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
415 430
416 431 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
417 432 if (vspace != 0) {
418 433 if (vspace->expand_by(bytes)) {
419 434 return vspace;
420 435 }
421 436 delete vspace;
422 437 // Release memory reserved in the space.
423 438 rs.release();
424 439 }
425 440
426 441 return 0;
427 442 }
428 443
429 444 bool ParallelCompactData::initialize_region_data(size_t region_size)
430 445 {
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
431 446 const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
432 447 _region_vspace = create_vspace(count, sizeof(RegionData));
433 448 if (_region_vspace != 0) {
434 449 _region_data = (RegionData*)_region_vspace->reserved_low_addr();
435 450 _region_count = count;
436 451 return true;
437 452 }
438 453 return false;
439 454 }
440 455
456 +bool ParallelCompactData::initialize_block_data()
457 +{
458 + assert(_region_count != 0, "region data must be initialized first");
459 + const size_t count = _region_count << Log2BlocksPerRegion;
460 + _block_vspace = create_vspace(count, sizeof(BlockData));
461 + if (_block_vspace != 0) {
462 + _block_data = (BlockData*)_block_vspace->reserved_low_addr();
463 + _block_count = count;
464 + return true;
465 + }
466 + return false;
467 +}
468 +
441 469 void ParallelCompactData::clear()
442 470 {
443 471 memset(_region_data, 0, _region_vspace->committed_size());
472 + memset(_block_data, 0, _block_vspace->committed_size());
444 473 }
445 474
446 475 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
447 476 assert(beg_region <= _region_count, "beg_region out of range");
448 477 assert(end_region <= _region_count, "end_region out of range");
478 + assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
449 479
450 480 const size_t region_cnt = end_region - beg_region;
451 481 memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
482 +
483 + const size_t beg_block = beg_region * BlocksPerRegion;
484 + const size_t block_cnt = region_cnt * BlocksPerRegion;
485 + memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
452 486 }
453 487
454 488 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
455 489 {
456 490 const RegionData* cur_cp = region(region_idx);
457 491 const RegionData* const end_cp = region(region_count() - 1);
458 492
459 493 HeapWord* result = region_to_addr(region_idx);
460 494 if (cur_cp < end_cp) {
461 495 do {
462 496 result += cur_cp->partial_obj_size();
463 497 } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
464 498 }
465 499 return result;
466 500 }
467 501
468 502 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
469 503 {
470 504 const size_t obj_ofs = pointer_delta(addr, _region_start);
471 505 const size_t beg_region = obj_ofs >> Log2RegionSize;
472 506 const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
473 507
474 508 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
475 509 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
476 510
477 511 if (beg_region == end_region) {
478 512 // All in one region.
479 513 _region_data[beg_region].add_live_obj(len);
480 514 return;
481 515 }
482 516
483 517 // First region.
484 518 const size_t beg_ofs = region_offset(addr);
485 519 _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
486 520
487 521 klassOop klass = ((oop)addr)->klass();
488 522 // Middle regions--completely spanned by this object.
489 523 for (size_t region = beg_region + 1; region < end_region; ++region) {
490 524 _region_data[region].set_partial_obj_size(RegionSize);
491 525 _region_data[region].set_partial_obj_addr(addr);
492 526 }
493 527
494 528 // Last region.
495 529 const size_t end_ofs = region_offset(addr + len - 1);
496 530 _region_data[end_region].set_partial_obj_size(end_ofs + 1);
497 531 _region_data[end_region].set_partial_obj_addr(addr);
498 532 }
499 533
500 534 void
501 535 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
502 536 {
503 537 assert(region_offset(beg) == 0, "not RegionSize aligned");
504 538 assert(region_offset(end) == 0, "not RegionSize aligned");
505 539
506 540 size_t cur_region = addr_to_region_idx(beg);
507 541 const size_t end_region = addr_to_region_idx(end);
508 542 HeapWord* addr = beg;
509 543 while (cur_region < end_region) {
510 544 _region_data[cur_region].set_destination(addr);
511 545 _region_data[cur_region].set_destination_count(0);
512 546 _region_data[cur_region].set_source_region(cur_region);
513 547 _region_data[cur_region].set_data_location(addr);
514 548
515 549 // Update live_obj_size so the region appears completely full.
516 550 size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
517 551 _region_data[cur_region].set_live_obj_size(live_size);
518 552
519 553 ++cur_region;
520 554 addr += RegionSize;
521 555 }
522 556 }
523 557
524 558 // Find the point at which a space can be split and, if necessary, record the
525 559 // split point.
526 560 //
527 561 // If the current src region (which overflowed the destination space) doesn't
528 562 // have a partial object, the split point is at the beginning of the current src
529 563 // region (an "easy" split, no extra bookkeeping required).
530 564 //
531 565 // If the current src region has a partial object, the split point is in the
532 566 // region where that partial object starts (call it the split_region). If
533 567 // split_region has a partial object, then the split point is just after that
534 568 // partial object (a "hard" split where we have to record the split data and
535 569 // zero the partial_obj_size field). With a "hard" split, we know that the
536 570 // partial_obj ends within split_region because the partial object that caused
537 571 // the overflow starts in split_region. If split_region doesn't have a partial
538 572 // obj, then the split is at the beginning of split_region (another "easy"
539 573 // split).
540 574 HeapWord*
541 575 ParallelCompactData::summarize_split_space(size_t src_region,
542 576 SplitInfo& split_info,
543 577 HeapWord* destination,
544 578 HeapWord* target_end,
545 579 HeapWord** target_next)
546 580 {
547 581 assert(destination <= target_end, "sanity");
548 582 assert(destination + _region_data[src_region].data_size() > target_end,
549 583 "region should not fit into target space");
550 584 assert(is_region_aligned(target_end), "sanity");
551 585
552 586 size_t split_region = src_region;
553 587 HeapWord* split_destination = destination;
554 588 size_t partial_obj_size = _region_data[src_region].partial_obj_size();
555 589
556 590 if (destination + partial_obj_size > target_end) {
557 591 // The split point is just after the partial object (if any) in the
558 592 // src_region that contains the start of the object that overflowed the
559 593 // destination space.
560 594 //
561 595 // Find the start of the "overflow" object and set split_region to the
562 596 // region containing it.
563 597 HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
564 598 split_region = addr_to_region_idx(overflow_obj);
565 599
566 600 // Clear the source_region field of all destination regions whose first word
567 601 // came from data after the split point (a non-null source_region field
568 602 // implies a region must be filled).
569 603 //
570 604 // An alternative to the simple loop below: clear during post_compact(),
571 605 // which uses memcpy instead of individual stores, and is easy to
572 606 // parallelize. (The downside is that it clears the entire RegionData
573 607 // object as opposed to just one field.)
574 608 //
575 609 // post_compact() would have to clear the summary data up to the highest
576 610 // address that was written during the summary phase, which would be
577 611 //
578 612 // max(top, max(new_top, clear_top))
579 613 //
580 614 // where clear_top is a new field in SpaceInfo. Would have to set clear_top
581 615 // to target_end.
582 616 const RegionData* const sr = region(split_region);
583 617 const size_t beg_idx =
584 618 addr_to_region_idx(region_align_up(sr->destination() +
585 619 sr->partial_obj_size()));
586 620 const size_t end_idx = addr_to_region_idx(target_end);
587 621
588 622 if (TraceParallelOldGCSummaryPhase) {
589 623 gclog_or_tty->print_cr("split: clearing source_region field in ["
590 624 SIZE_FORMAT ", " SIZE_FORMAT ")",
591 625 beg_idx, end_idx);
592 626 }
593 627 for (size_t idx = beg_idx; idx < end_idx; ++idx) {
594 628 _region_data[idx].set_source_region(0);
595 629 }
596 630
597 631 // Set split_destination and partial_obj_size to reflect the split region.
598 632 split_destination = sr->destination();
599 633 partial_obj_size = sr->partial_obj_size();
600 634 }
601 635
602 636 // The split is recorded only if a partial object extends onto the region.
603 637 if (partial_obj_size != 0) {
604 638 _region_data[split_region].set_partial_obj_size(0);
605 639 split_info.record(split_region, partial_obj_size, split_destination);
606 640 }
607 641
608 642 // Setup the continuation addresses.
609 643 *target_next = split_destination + partial_obj_size;
610 644 HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
611 645
612 646 if (TraceParallelOldGCSummaryPhase) {
613 647 const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
614 648 gclog_or_tty->print_cr("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT
615 649 " pos=" SIZE_FORMAT,
616 650 split_type, source_next, split_region,
617 651 partial_obj_size);
618 652 gclog_or_tty->print_cr("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT
619 653 " tn=" PTR_FORMAT,
620 654 split_type, split_destination,
621 655 addr_to_region_idx(split_destination),
622 656 *target_next);
623 657
624 658 if (partial_obj_size != 0) {
625 659 HeapWord* const po_beg = split_info.destination();
626 660 HeapWord* const po_end = po_beg + split_info.partial_obj_size();
627 661 gclog_or_tty->print_cr("%s split: "
628 662 "po_beg=" PTR_FORMAT " " SIZE_FORMAT " "
629 663 "po_end=" PTR_FORMAT " " SIZE_FORMAT,
630 664 split_type,
631 665 po_beg, addr_to_region_idx(po_beg),
632 666 po_end, addr_to_region_idx(po_end));
633 667 }
634 668 }
635 669
636 670 return source_next;
637 671 }
638 672
639 673 bool ParallelCompactData::summarize(SplitInfo& split_info,
640 674 HeapWord* source_beg, HeapWord* source_end,
641 675 HeapWord** source_next,
642 676 HeapWord* target_beg, HeapWord* target_end,
643 677 HeapWord** target_next)
644 678 {
645 679 if (TraceParallelOldGCSummaryPhase) {
646 680 HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
647 681 tty->print_cr("sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
648 682 "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
649 683 source_beg, source_end, source_next_val,
650 684 target_beg, target_end, *target_next);
651 685 }
652 686
653 687 size_t cur_region = addr_to_region_idx(source_beg);
654 688 const size_t end_region = addr_to_region_idx(region_align_up(source_end));
655 689
656 690 HeapWord *dest_addr = target_beg;
657 691 while (cur_region < end_region) {
658 692 // The destination must be set even if the region has no data.
659 693 _region_data[cur_region].set_destination(dest_addr);
660 694
661 695 size_t words = _region_data[cur_region].data_size();
662 696 if (words > 0) {
663 697 // If cur_region does not fit entirely into the target space, find a point
664 698 // at which the source space can be 'split' so that part is copied to the
665 699 // target space and the rest is copied elsewhere.
666 700 if (dest_addr + words > target_end) {
667 701 assert(source_next != NULL, "source_next is NULL when splitting");
668 702 *source_next = summarize_split_space(cur_region, split_info, dest_addr,
669 703 target_end, target_next);
670 704 return false;
671 705 }
672 706
673 707 // Compute the destination_count for cur_region, and if necessary, update
674 708 // source_region for a destination region. The source_region field is
675 709 // updated if cur_region is the first (left-most) region to be copied to a
676 710 // destination region.
677 711 //
678 712 // The destination_count calculation is a bit subtle. A region that has
679 713 // data that compacts into itself does not count itself as a destination.
680 714 // This maintains the invariant that a zero count means the region is
681 715 // available and can be claimed and then filled.
682 716 uint destination_count = 0;
683 717 if (split_info.is_split(cur_region)) {
684 718 // The current region has been split: the partial object will be copied
685 719 // to one destination space and the remaining data will be copied to
686 720 // another destination space. Adjust the initial destination_count and,
687 721 // if necessary, set the source_region field if the partial object will
688 722 // cross a destination region boundary.
689 723 destination_count = split_info.destination_count();
690 724 if (destination_count == 2) {
691 725 size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
692 726 _region_data[dest_idx].set_source_region(cur_region);
693 727 }
694 728 }
695 729
696 730 HeapWord* const last_addr = dest_addr + words - 1;
697 731 const size_t dest_region_1 = addr_to_region_idx(dest_addr);
698 732 const size_t dest_region_2 = addr_to_region_idx(last_addr);
699 733
700 734 // Initially assume that the destination regions will be the same and
701 735 // adjust the value below if necessary. Under this assumption, if
702 736 // cur_region == dest_region_2, then cur_region will be compacted
703 737 // completely into itself.
704 738 destination_count += cur_region == dest_region_2 ? 0 : 1;
705 739 if (dest_region_1 != dest_region_2) {
706 740 // Destination regions differ; adjust destination_count.
707 741 destination_count += 1;
708 742 // Data from cur_region will be copied to the start of dest_region_2.
709 743 _region_data[dest_region_2].set_source_region(cur_region);
710 744 } else if (region_offset(dest_addr) == 0) {
711 745 // Data from cur_region will be copied to the start of the destination
712 746 // region.
713 747 _region_data[dest_region_1].set_source_region(cur_region);
714 748 }
715 749
716 750 _region_data[cur_region].set_destination_count(destination_count);
717 751 _region_data[cur_region].set_data_location(region_to_addr(cur_region));
718 752 dest_addr += words;
719 753 }
↓ open down ↓ |
258 lines elided |
↑ open up ↑ |
720 754
721 755 ++cur_region;
722 756 }
723 757
724 758 *target_next = dest_addr;
725 759 return true;
726 760 }
727 761
728 762 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
729 763 assert(addr != NULL, "Should detect NULL oop earlier");
730 - assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
731 -#ifdef ASSERT
732 - if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
733 - gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
734 - }
735 -#endif
736 - assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
764 + assert(PSParallelCompact::gc_heap()->is_in(addr), "not in heap");
765 + assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
737 766
738 767 // Region covering the object.
739 - size_t region_index = addr_to_region_idx(addr);
740 - const RegionData* const region_ptr = region(region_index);
741 - HeapWord* const region_addr = region_align_down(addr);
742 -
743 - assert(addr < region_addr + RegionSize, "Region does not cover object");
744 - assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
745 -
768 + RegionData* const region_ptr = addr_to_region_ptr(addr);
746 769 HeapWord* result = region_ptr->destination();
747 770
748 - // If all the data in the region is live, then the new location of the object
749 - // can be calculated from the destination of the region plus the offset of the
750 - // object in the region.
771 + // If the entire Region is live, the new location is region->destination + the
772 + // offset of the object within in the Region.
773 +
774 + // Run some performance tests to determine if this special case pays off. It
775 + // is worth it for pointers into the dense prefix. If the optimization to
776 + // avoid pointer updates in regions that only point to the dense prefix is
777 + // ever implemented, this should be revisited.
751 778 if (region_ptr->data_size() == RegionSize) {
752 - result += pointer_delta(addr, region_addr);
753 - DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
779 + result += region_offset(addr);
754 780 return result;
755 781 }
756 782
757 - // The new location of the object is
758 - // region destination +
759 - // size of the partial object extending onto the region +
760 - // sizes of the live objects in the Region that are to the left of addr
761 - const size_t partial_obj_size = region_ptr->partial_obj_size();
762 - HeapWord* const search_start = region_addr + partial_obj_size;
783 + // Otherwise, the new location is region->destination + block offset + the
784 + // number of live words in the Block that are (a) to the left of addr and (b)
785 + // due to objects that start in the Block.
763 786
764 - const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
765 - size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
787 + // Fill in the block table if necessary. This is unsynchronized, so multiple
788 + // threads may fill the block table for a region (harmless, since it is
789 + // idempotent).
790 + if (!region_ptr->blocks_filled()) {
791 + PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
792 + region_ptr->set_blocks_filled();
793 + }
766 794
767 - result += partial_obj_size + live_to_left;
768 - DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
795 + HeapWord* const search_start = block_align_down(addr);
796 + const size_t block_offset = addr_to_block_ptr(addr)->offset();
797 +
798 + const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
799 + const size_t live = bitmap->live_words_in_range(search_start, oop(addr));
800 + result += block_offset + live;
801 + DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
769 802 return result;
770 803 }
771 804
772 805 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
773 806 klassOop updated_klass;
774 807 if (PSParallelCompact::should_update_klass(old_klass)) {
775 808 updated_klass = (klassOop) calc_new_pointer(old_klass);
776 809 } else {
777 810 updated_klass = old_klass;
778 811 }
779 812
780 813 return updated_klass;
781 814 }
782 815
783 -#ifdef ASSERT
816 +#ifdef ASSERT
784 817 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
785 818 {
786 819 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
787 820 const size_t* const end = (const size_t*)vspace->committed_high_addr();
788 821 for (const size_t* p = beg; p < end; ++p) {
789 822 assert(*p == 0, "not zero");
790 823 }
791 824 }
792 825
793 826 void ParallelCompactData::verify_clear()
794 827 {
795 828 verify_clear(_region_vspace);
829 + verify_clear(_block_vspace);
796 830 }
797 831 #endif // #ifdef ASSERT
798 832
799 -#ifdef NOT_PRODUCT
800 -ParallelCompactData::RegionData* debug_region(size_t region_index) {
801 - ParallelCompactData& sd = PSParallelCompact::summary_data();
802 - return sd.region(region_index);
803 -}
804 -#endif
805 -
806 833 STWGCTimer PSParallelCompact::_gc_timer;
807 834 ParallelOldTracer PSParallelCompact::_gc_tracer;
808 835 elapsedTimer PSParallelCompact::_accumulated_time;
809 836 unsigned int PSParallelCompact::_total_invocations = 0;
810 837 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
811 838 jlong PSParallelCompact::_time_of_last_gc = 0;
812 839 CollectorCounters* PSParallelCompact::_counters = NULL;
813 840 ParMarkBitMap PSParallelCompact::_mark_bitmap;
814 841 ParallelCompactData PSParallelCompact::_summary_data;
815 842
816 843 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
817 844
818 845 void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
819 846 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
820 847
821 848 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
822 849 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
823 850
824 851 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
825 852 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
826 853
827 854 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
828 855 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
829 856
830 857 void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
831 858
832 859 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
833 860 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
834 861
835 862 void PSParallelCompact::post_initialize() {
836 863 ParallelScavengeHeap* heap = gc_heap();
837 864 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
838 865
839 866 MemRegion mr = heap->reserved_region();
840 867 _ref_processor =
841 868 new ReferenceProcessor(mr, // span
842 869 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
843 870 (int) ParallelGCThreads, // mt processing degree
844 871 true, // mt discovery
845 872 (int) ParallelGCThreads, // mt discovery degree
846 873 true, // atomic_discovery
847 874 &_is_alive_closure, // non-header is alive closure
848 875 false); // write barrier for next field updates
849 876 _counters = new CollectorCounters("PSParallelCompact", 1);
850 877
851 878 // Initialize static fields in ParCompactionManager.
852 879 ParCompactionManager::initialize(mark_bitmap());
853 880 }
854 881
855 882 bool PSParallelCompact::initialize() {
856 883 ParallelScavengeHeap* heap = gc_heap();
857 884 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
858 885 MemRegion mr = heap->reserved_region();
859 886
860 887 // Was the old gen get allocated successfully?
861 888 if (!heap->old_gen()->is_allocated()) {
862 889 return false;
863 890 }
864 891
865 892 initialize_space_info();
866 893 initialize_dead_wood_limiter();
867 894
868 895 if (!_mark_bitmap.initialize(mr)) {
869 896 vm_shutdown_during_initialization("Unable to allocate bit map for "
870 897 "parallel garbage collection for the requested heap size.");
871 898 return false;
872 899 }
873 900
874 901 if (!_summary_data.initialize(mr)) {
875 902 vm_shutdown_during_initialization("Unable to allocate tables for "
876 903 "parallel garbage collection for the requested heap size.");
877 904 return false;
878 905 }
879 906
880 907 return true;
881 908 }
882 909
883 910 void PSParallelCompact::initialize_space_info()
884 911 {
885 912 memset(&_space_info, 0, sizeof(_space_info));
886 913
887 914 ParallelScavengeHeap* heap = gc_heap();
888 915 PSYoungGen* young_gen = heap->young_gen();
889 916 MutableSpace* perm_space = heap->perm_gen()->object_space();
890 917
891 918 _space_info[perm_space_id].set_space(perm_space);
892 919 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
893 920 _space_info[eden_space_id].set_space(young_gen->eden_space());
894 921 _space_info[from_space_id].set_space(young_gen->from_space());
895 922 _space_info[to_space_id].set_space(young_gen->to_space());
896 923
897 924 _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array());
898 925 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
899 926
900 927 _space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
901 928 if (TraceParallelOldGCDensePrefix) {
902 929 tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
903 930 _space_info[perm_space_id].min_dense_prefix());
904 931 }
905 932 }
906 933
907 934 void PSParallelCompact::initialize_dead_wood_limiter()
908 935 {
909 936 const size_t max = 100;
910 937 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
911 938 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
912 939 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
913 940 DEBUG_ONLY(_dwl_initialized = true;)
914 941 _dwl_adjustment = normal_distribution(1.0);
915 942 }
916 943
917 944 // Simple class for storing info about the heap at the start of GC, to be used
918 945 // after GC for comparison/printing.
919 946 class PreGCValues {
920 947 public:
921 948 PreGCValues() { }
922 949 PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
923 950
924 951 void fill(ParallelScavengeHeap* heap) {
925 952 _heap_used = heap->used();
926 953 _young_gen_used = heap->young_gen()->used_in_bytes();
927 954 _old_gen_used = heap->old_gen()->used_in_bytes();
928 955 _perm_gen_used = heap->perm_gen()->used_in_bytes();
929 956 };
930 957
931 958 size_t heap_used() const { return _heap_used; }
932 959 size_t young_gen_used() const { return _young_gen_used; }
933 960 size_t old_gen_used() const { return _old_gen_used; }
934 961 size_t perm_gen_used() const { return _perm_gen_used; }
935 962
936 963 private:
937 964 size_t _heap_used;
938 965 size_t _young_gen_used;
939 966 size_t _old_gen_used;
940 967 size_t _perm_gen_used;
941 968 };
942 969
943 970 void
944 971 PSParallelCompact::clear_data_covering_space(SpaceId id)
945 972 {
946 973 // At this point, top is the value before GC, new_top() is the value that will
947 974 // be set at the end of GC. The marking bitmap is cleared to top; nothing
948 975 // should be marked above top. The summary data is cleared to the larger of
949 976 // top & new_top.
950 977 MutableSpace* const space = _space_info[id].space();
951 978 HeapWord* const bot = space->bottom();
952 979 HeapWord* const top = space->top();
953 980 HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
954 981
955 982 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
956 983 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
957 984 _mark_bitmap.clear_range(beg_bit, end_bit);
958 985
959 986 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
960 987 const size_t end_region =
961 988 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
962 989 _summary_data.clear_range(beg_region, end_region);
963 990
964 991 // Clear the data used to 'split' regions.
965 992 SplitInfo& split_info = _space_info[id].split_info();
966 993 if (split_info.is_valid()) {
967 994 split_info.clear();
968 995 }
969 996 DEBUG_ONLY(split_info.verify_clear();)
970 997 }
971 998
972 999 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
973 1000 {
974 1001 // Update the from & to space pointers in space_info, since they are swapped
975 1002 // at each young gen gc. Do the update unconditionally (even though a
976 1003 // promotion failure does not swap spaces) because an unknown number of minor
977 1004 // collections will have swapped the spaces an unknown number of times.
978 1005 GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
979 1006 ParallelScavengeHeap* heap = gc_heap();
980 1007 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
981 1008 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
982 1009
983 1010 pre_gc_values->fill(heap);
984 1011
985 1012 ParCompactionManager::reset();
986 1013 NOT_PRODUCT(_mark_bitmap.reset_counters());
987 1014 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
988 1015 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
989 1016
990 1017 // Increment the invocation count
991 1018 heap->increment_total_collections(true);
992 1019
993 1020 // We need to track unique mark sweep invocations as well.
994 1021 _total_invocations++;
995 1022
996 1023 heap->print_heap_before_gc();
997 1024 heap->trace_heap_before_gc(&_gc_tracer);
998 1025
999 1026 // Fill in TLABs
1000 1027 heap->accumulate_statistics_all_tlabs();
1001 1028 heap->ensure_parsability(true); // retire TLABs
1002 1029
1003 1030 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
1004 1031 HandleMark hm; // Discard invalid handles created during verification
1005 1032 gclog_or_tty->print(" VerifyBeforeGC:");
1006 1033 Universe::verify();
1007 1034 }
1008 1035
1009 1036 // Verify object start arrays
1010 1037 if (VerifyObjectStartArray &&
1011 1038 VerifyBeforeGC) {
1012 1039 heap->old_gen()->verify_object_start_array();
1013 1040 heap->perm_gen()->verify_object_start_array();
1014 1041 }
1015 1042
1016 1043 DEBUG_ONLY(mark_bitmap()->verify_clear();)
1017 1044 DEBUG_ONLY(summary_data().verify_clear();)
1018 1045
1019 1046 // Have worker threads release resources the next time they run a task.
1020 1047 gc_task_manager()->release_all_resources();
1021 1048 }
1022 1049
1023 1050 void PSParallelCompact::post_compact()
1024 1051 {
1025 1052 GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
1026 1053
1027 1054 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
1028 1055 // Clear the marking bitmap, summary data and split info.
1029 1056 clear_data_covering_space(SpaceId(id));
1030 1057 // Update top(). Must be done after clearing the bitmap and summary data.
1031 1058 _space_info[id].publish_new_top();
1032 1059 }
1033 1060
1034 1061 MutableSpace* const eden_space = _space_info[eden_space_id].space();
1035 1062 MutableSpace* const from_space = _space_info[from_space_id].space();
1036 1063 MutableSpace* const to_space = _space_info[to_space_id].space();
1037 1064
1038 1065 ParallelScavengeHeap* heap = gc_heap();
1039 1066 bool eden_empty = eden_space->is_empty();
1040 1067 if (!eden_empty) {
1041 1068 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1042 1069 heap->young_gen(), heap->old_gen());
1043 1070 }
1044 1071
1045 1072 // Update heap occupancy information which is used as input to the soft ref
1046 1073 // clearing policy at the next gc.
1047 1074 Universe::update_heap_info_at_gc();
1048 1075
1049 1076 bool young_gen_empty = eden_empty && from_space->is_empty() &&
1050 1077 to_space->is_empty();
1051 1078
1052 1079 BarrierSet* bs = heap->barrier_set();
1053 1080 if (bs->is_a(BarrierSet::ModRef)) {
1054 1081 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
1055 1082 MemRegion old_mr = heap->old_gen()->reserved();
1056 1083 MemRegion perm_mr = heap->perm_gen()->reserved();
1057 1084 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
1058 1085
1059 1086 if (young_gen_empty) {
1060 1087 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
1061 1088 } else {
1062 1089 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
1063 1090 }
1064 1091 }
1065 1092
1066 1093 Threads::gc_epilogue();
1067 1094 CodeCache::gc_epilogue();
1068 1095 JvmtiExport::gc_epilogue();
1069 1096
1070 1097 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1071 1098
1072 1099 ref_processor()->enqueue_discovered_references(NULL);
1073 1100
1074 1101 if (ZapUnusedHeapArea) {
1075 1102 heap->gen_mangle_unused_area();
1076 1103 }
1077 1104
1078 1105 // Update time of last GC
1079 1106 reset_millis_since_last_gc();
1080 1107 }
1081 1108
1082 1109 HeapWord*
1083 1110 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1084 1111 bool maximum_compaction)
1085 1112 {
1086 1113 const size_t region_size = ParallelCompactData::RegionSize;
1087 1114 const ParallelCompactData& sd = summary_data();
1088 1115
1089 1116 const MutableSpace* const space = _space_info[id].space();
1090 1117 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
1091 1118 const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
1092 1119 const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
1093 1120
1094 1121 // Skip full regions at the beginning of the space--they are necessarily part
1095 1122 // of the dense prefix.
1096 1123 size_t full_count = 0;
1097 1124 const RegionData* cp;
1098 1125 for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1099 1126 ++full_count;
1100 1127 }
1101 1128
1102 1129 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1103 1130 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1104 1131 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1105 1132 if (maximum_compaction || cp == end_cp || interval_ended) {
1106 1133 _maximum_compaction_gc_num = total_invocations();
1107 1134 return sd.region_to_addr(cp);
1108 1135 }
1109 1136
1110 1137 HeapWord* const new_top = _space_info[id].new_top();
1111 1138 const size_t space_live = pointer_delta(new_top, space->bottom());
1112 1139 const size_t space_used = space->used_in_words();
1113 1140 const size_t space_capacity = space->capacity_in_words();
1114 1141
1115 1142 const double cur_density = double(space_live) / space_capacity;
1116 1143 const double deadwood_density =
1117 1144 (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
1118 1145 const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
1119 1146
1120 1147 if (TraceParallelOldGCDensePrefix) {
1121 1148 tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
1122 1149 cur_density, deadwood_density, deadwood_goal);
1123 1150 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1124 1151 "space_cap=" SIZE_FORMAT,
1125 1152 space_live, space_used,
1126 1153 space_capacity);
1127 1154 }
1128 1155
1129 1156 // XXX - Use binary search?
1130 1157 HeapWord* dense_prefix = sd.region_to_addr(cp);
1131 1158 const RegionData* full_cp = cp;
1132 1159 const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
1133 1160 while (cp < end_cp) {
1134 1161 HeapWord* region_destination = cp->destination();
1135 1162 const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
1136 1163 if (TraceParallelOldGCDensePrefix && Verbose) {
1137 1164 tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
1138 1165 "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
1139 1166 sd.region(cp), region_destination,
1140 1167 dense_prefix, cur_deadwood);
1141 1168 }
1142 1169
1143 1170 if (cur_deadwood >= deadwood_goal) {
1144 1171 // Found the region that has the correct amount of deadwood to the left.
1145 1172 // This typically occurs after crossing a fairly sparse set of regions, so
1146 1173 // iterate backwards over those sparse regions, looking for the region
1147 1174 // that has the lowest density of live objects 'to the right.'
1148 1175 size_t space_to_left = sd.region(cp) * region_size;
1149 1176 size_t live_to_left = space_to_left - cur_deadwood;
1150 1177 size_t space_to_right = space_capacity - space_to_left;
1151 1178 size_t live_to_right = space_live - live_to_left;
1152 1179 double density_to_right = double(live_to_right) / space_to_right;
1153 1180 while (cp > full_cp) {
1154 1181 --cp;
1155 1182 const size_t prev_region_live_to_right = live_to_right -
1156 1183 cp->data_size();
1157 1184 const size_t prev_region_space_to_right = space_to_right + region_size;
1158 1185 double prev_region_density_to_right =
1159 1186 double(prev_region_live_to_right) / prev_region_space_to_right;
1160 1187 if (density_to_right <= prev_region_density_to_right) {
1161 1188 return dense_prefix;
1162 1189 }
1163 1190 if (TraceParallelOldGCDensePrefix && Verbose) {
1164 1191 tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
1165 1192 "pc_d2r=%10.8f", sd.region(cp), density_to_right,
1166 1193 prev_region_density_to_right);
1167 1194 }
1168 1195 dense_prefix -= region_size;
1169 1196 live_to_right = prev_region_live_to_right;
1170 1197 space_to_right = prev_region_space_to_right;
1171 1198 density_to_right = prev_region_density_to_right;
1172 1199 }
1173 1200 return dense_prefix;
1174 1201 }
1175 1202
1176 1203 dense_prefix += region_size;
1177 1204 ++cp;
1178 1205 }
1179 1206
1180 1207 return dense_prefix;
1181 1208 }
1182 1209
1183 1210 #ifndef PRODUCT
1184 1211 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1185 1212 const SpaceId id,
1186 1213 const bool maximum_compaction,
1187 1214 HeapWord* const addr)
1188 1215 {
1189 1216 const size_t region_idx = summary_data().addr_to_region_idx(addr);
1190 1217 RegionData* const cp = summary_data().region(region_idx);
1191 1218 const MutableSpace* const space = _space_info[id].space();
1192 1219 HeapWord* const new_top = _space_info[id].new_top();
1193 1220
1194 1221 const size_t space_live = pointer_delta(new_top, space->bottom());
1195 1222 const size_t dead_to_left = pointer_delta(addr, cp->destination());
1196 1223 const size_t space_cap = space->capacity_in_words();
1197 1224 const double dead_to_left_pct = double(dead_to_left) / space_cap;
1198 1225 const size_t live_to_right = new_top - cp->destination();
1199 1226 const size_t dead_to_right = space->top() - addr - live_to_right;
1200 1227
1201 1228 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
1202 1229 "spl=" SIZE_FORMAT " "
1203 1230 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1204 1231 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
1205 1232 " ratio=%10.8f",
1206 1233 algorithm, addr, region_idx,
1207 1234 space_live,
1208 1235 dead_to_left, dead_to_left_pct,
1209 1236 dead_to_right, live_to_right,
1210 1237 double(dead_to_right) / live_to_right);
1211 1238 }
1212 1239 #endif // #ifndef PRODUCT
1213 1240
1214 1241 // Return a fraction indicating how much of the generation can be treated as
1215 1242 // "dead wood" (i.e., not reclaimed). The function uses a normal distribution
1216 1243 // based on the density of live objects in the generation to determine a limit,
1217 1244 // which is then adjusted so the return value is min_percent when the density is
1218 1245 // 1.
1219 1246 //
1220 1247 // The following table shows some return values for a different values of the
1221 1248 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1222 1249 // min_percent is 1.
1223 1250 //
1224 1251 // fraction allowed as dead wood
1225 1252 // -----------------------------------------------------------------
1226 1253 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1227 1254 // ------- ---------- ---------- ---------- ---------- ---------- ----------
1228 1255 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1229 1256 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1230 1257 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1231 1258 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1232 1259 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1233 1260 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1234 1261 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1235 1262 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1236 1263 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1237 1264 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1238 1265 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1239 1266 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1240 1267 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1241 1268 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1242 1269 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1243 1270 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1244 1271 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1245 1272 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1246 1273 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1247 1274 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1248 1275 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1249 1276
1250 1277 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1251 1278 {
1252 1279 assert(_dwl_initialized, "uninitialized");
1253 1280
1254 1281 // The raw limit is the value of the normal distribution at x = density.
1255 1282 const double raw_limit = normal_distribution(density);
1256 1283
1257 1284 // Adjust the raw limit so it becomes the minimum when the density is 1.
1258 1285 //
1259 1286 // First subtract the adjustment value (which is simply the precomputed value
1260 1287 // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1261 1288 // Then add the minimum value, so the minimum is returned when the density is
1262 1289 // 1. Finally, prevent negative values, which occur when the mean is not 0.5.
1263 1290 const double min = double(min_percent) / 100.0;
1264 1291 const double limit = raw_limit - _dwl_adjustment + min;
1265 1292 return MAX2(limit, 0.0);
1266 1293 }
1267 1294
1268 1295 ParallelCompactData::RegionData*
1269 1296 PSParallelCompact::first_dead_space_region(const RegionData* beg,
1270 1297 const RegionData* end)
1271 1298 {
1272 1299 const size_t region_size = ParallelCompactData::RegionSize;
1273 1300 ParallelCompactData& sd = summary_data();
1274 1301 size_t left = sd.region(beg);
1275 1302 size_t right = end > beg ? sd.region(end) - 1 : left;
1276 1303
1277 1304 // Binary search.
1278 1305 while (left < right) {
1279 1306 // Equivalent to (left + right) / 2, but does not overflow.
1280 1307 const size_t middle = left + (right - left) / 2;
1281 1308 RegionData* const middle_ptr = sd.region(middle);
1282 1309 HeapWord* const dest = middle_ptr->destination();
1283 1310 HeapWord* const addr = sd.region_to_addr(middle);
1284 1311 assert(dest != NULL, "sanity");
1285 1312 assert(dest <= addr, "must move left");
1286 1313
1287 1314 if (middle > left && dest < addr) {
1288 1315 right = middle - 1;
1289 1316 } else if (middle < right && middle_ptr->data_size() == region_size) {
1290 1317 left = middle + 1;
1291 1318 } else {
1292 1319 return middle_ptr;
1293 1320 }
1294 1321 }
1295 1322 return sd.region(left);
1296 1323 }
1297 1324
1298 1325 ParallelCompactData::RegionData*
1299 1326 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
1300 1327 const RegionData* end,
1301 1328 size_t dead_words)
1302 1329 {
1303 1330 ParallelCompactData& sd = summary_data();
1304 1331 size_t left = sd.region(beg);
1305 1332 size_t right = end > beg ? sd.region(end) - 1 : left;
1306 1333
1307 1334 // Binary search.
1308 1335 while (left < right) {
1309 1336 // Equivalent to (left + right) / 2, but does not overflow.
1310 1337 const size_t middle = left + (right - left) / 2;
1311 1338 RegionData* const middle_ptr = sd.region(middle);
1312 1339 HeapWord* const dest = middle_ptr->destination();
1313 1340 HeapWord* const addr = sd.region_to_addr(middle);
1314 1341 assert(dest != NULL, "sanity");
1315 1342 assert(dest <= addr, "must move left");
1316 1343
1317 1344 const size_t dead_to_left = pointer_delta(addr, dest);
1318 1345 if (middle > left && dead_to_left > dead_words) {
1319 1346 right = middle - 1;
1320 1347 } else if (middle < right && dead_to_left < dead_words) {
1321 1348 left = middle + 1;
1322 1349 } else {
1323 1350 return middle_ptr;
1324 1351 }
1325 1352 }
1326 1353 return sd.region(left);
1327 1354 }
1328 1355
1329 1356 // The result is valid during the summary phase, after the initial summarization
1330 1357 // of each space into itself, and before final summarization.
1331 1358 inline double
1332 1359 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
1333 1360 HeapWord* const bottom,
1334 1361 HeapWord* const top,
1335 1362 HeapWord* const new_top)
1336 1363 {
1337 1364 ParallelCompactData& sd = summary_data();
1338 1365
1339 1366 assert(cp != NULL, "sanity");
1340 1367 assert(bottom != NULL, "sanity");
1341 1368 assert(top != NULL, "sanity");
1342 1369 assert(new_top != NULL, "sanity");
1343 1370 assert(top >= new_top, "summary data problem?");
1344 1371 assert(new_top > bottom, "space is empty; should not be here");
1345 1372 assert(new_top >= cp->destination(), "sanity");
1346 1373 assert(top >= sd.region_to_addr(cp), "sanity");
1347 1374
1348 1375 HeapWord* const destination = cp->destination();
1349 1376 const size_t dense_prefix_live = pointer_delta(destination, bottom);
1350 1377 const size_t compacted_region_live = pointer_delta(new_top, destination);
1351 1378 const size_t compacted_region_used = pointer_delta(top,
1352 1379 sd.region_to_addr(cp));
1353 1380 const size_t reclaimable = compacted_region_used - compacted_region_live;
1354 1381
1355 1382 const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1356 1383 return double(reclaimable) / divisor;
1357 1384 }
1358 1385
1359 1386 // Return the address of the end of the dense prefix, a.k.a. the start of the
1360 1387 // compacted region. The address is always on a region boundary.
1361 1388 //
1362 1389 // Completely full regions at the left are skipped, since no compaction can
1363 1390 // occur in those regions. Then the maximum amount of dead wood to allow is
1364 1391 // computed, based on the density (amount live / capacity) of the generation;
1365 1392 // the region with approximately that amount of dead space to the left is
1366 1393 // identified as the limit region. Regions between the last completely full
1367 1394 // region and the limit region are scanned and the one that has the best
1368 1395 // (maximum) reclaimed_ratio() is selected.
1369 1396 HeapWord*
1370 1397 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1371 1398 bool maximum_compaction)
1372 1399 {
1373 1400 if (ParallelOldGCSplitALot) {
1374 1401 if (_space_info[id].dense_prefix() != _space_info[id].space()->bottom()) {
1375 1402 // The value was chosen to provoke splitting a young gen space; use it.
1376 1403 return _space_info[id].dense_prefix();
1377 1404 }
1378 1405 }
1379 1406
1380 1407 const size_t region_size = ParallelCompactData::RegionSize;
1381 1408 const ParallelCompactData& sd = summary_data();
1382 1409
1383 1410 const MutableSpace* const space = _space_info[id].space();
1384 1411 HeapWord* const top = space->top();
1385 1412 HeapWord* const top_aligned_up = sd.region_align_up(top);
1386 1413 HeapWord* const new_top = _space_info[id].new_top();
1387 1414 HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
1388 1415 HeapWord* const bottom = space->bottom();
1389 1416 const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
1390 1417 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
1391 1418 const RegionData* const new_top_cp =
1392 1419 sd.addr_to_region_ptr(new_top_aligned_up);
1393 1420
1394 1421 // Skip full regions at the beginning of the space--they are necessarily part
1395 1422 // of the dense prefix.
1396 1423 const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
1397 1424 assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
1398 1425 space->is_empty(), "no dead space allowed to the left");
1399 1426 assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
1400 1427 "region must have dead space");
1401 1428
1402 1429 // The gc number is saved whenever a maximum compaction is done, and used to
1403 1430 // determine when the maximum compaction interval has expired. This avoids
1404 1431 // successive max compactions for different reasons.
1405 1432 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1406 1433 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1407 1434 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1408 1435 total_invocations() == HeapFirstMaximumCompactionCount;
1409 1436 if (maximum_compaction || full_cp == top_cp || interval_ended) {
1410 1437 _maximum_compaction_gc_num = total_invocations();
1411 1438 return sd.region_to_addr(full_cp);
1412 1439 }
1413 1440
1414 1441 const size_t space_live = pointer_delta(new_top, bottom);
1415 1442 const size_t space_used = space->used_in_words();
1416 1443 const size_t space_capacity = space->capacity_in_words();
1417 1444
1418 1445 const double density = double(space_live) / double(space_capacity);
1419 1446 const size_t min_percent_free =
1420 1447 id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
1421 1448 const double limiter = dead_wood_limiter(density, min_percent_free);
1422 1449 const size_t dead_wood_max = space_used - space_live;
1423 1450 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1424 1451 dead_wood_max);
1425 1452
1426 1453 if (TraceParallelOldGCDensePrefix) {
1427 1454 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1428 1455 "space_cap=" SIZE_FORMAT,
1429 1456 space_live, space_used,
1430 1457 space_capacity);
1431 1458 tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
1432 1459 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1433 1460 density, min_percent_free, limiter,
1434 1461 dead_wood_max, dead_wood_limit);
1435 1462 }
1436 1463
1437 1464 // Locate the region with the desired amount of dead space to the left.
1438 1465 const RegionData* const limit_cp =
1439 1466 dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
1440 1467
1441 1468 // Scan from the first region with dead space to the limit region and find the
1442 1469 // one with the best (largest) reclaimed ratio.
1443 1470 double best_ratio = 0.0;
1444 1471 const RegionData* best_cp = full_cp;
1445 1472 for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
1446 1473 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1447 1474 if (tmp_ratio > best_ratio) {
1448 1475 best_cp = cp;
1449 1476 best_ratio = tmp_ratio;
1450 1477 }
1451 1478 }
1452 1479
1453 1480 #if 0
1454 1481 // Something to consider: if the region with the best ratio is 'close to' the
1455 1482 // first region w/free space, choose the first region with free space
1456 1483 // ("first-free"). The first-free region is usually near the start of the
1457 1484 // heap, which means we are copying most of the heap already, so copy a bit
1458 1485 // more to get complete compaction.
1459 1486 if (pointer_delta(best_cp, full_cp, sizeof(RegionData)) < 4) {
1460 1487 _maximum_compaction_gc_num = total_invocations();
1461 1488 best_cp = full_cp;
1462 1489 }
1463 1490 #endif // #if 0
1464 1491
1465 1492 return sd.region_to_addr(best_cp);
1466 1493 }
1467 1494
1468 1495 #ifndef PRODUCT
1469 1496 void
1470 1497 PSParallelCompact::fill_with_live_objects(SpaceId id, HeapWord* const start,
1471 1498 size_t words)
1472 1499 {
1473 1500 if (TraceParallelOldGCSummaryPhase) {
1474 1501 tty->print_cr("fill_with_live_objects [" PTR_FORMAT " " PTR_FORMAT ") "
1475 1502 SIZE_FORMAT, start, start + words, words);
1476 1503 }
1477 1504
1478 1505 ObjectStartArray* const start_array = _space_info[id].start_array();
1479 1506 CollectedHeap::fill_with_objects(start, words);
1480 1507 for (HeapWord* p = start; p < start + words; p += oop(p)->size()) {
1481 1508 _mark_bitmap.mark_obj(p, words);
1482 1509 _summary_data.add_obj(p, words);
1483 1510 start_array->allocate_block(p);
1484 1511 }
1485 1512 }
1486 1513
1487 1514 void
1488 1515 PSParallelCompact::summarize_new_objects(SpaceId id, HeapWord* start)
1489 1516 {
1490 1517 ParallelCompactData& sd = summary_data();
1491 1518 MutableSpace* space = _space_info[id].space();
1492 1519
1493 1520 // Find the source and destination start addresses.
1494 1521 HeapWord* const src_addr = sd.region_align_down(start);
1495 1522 HeapWord* dst_addr;
1496 1523 if (src_addr < start) {
1497 1524 dst_addr = sd.addr_to_region_ptr(src_addr)->destination();
1498 1525 } else if (src_addr > space->bottom()) {
1499 1526 // The start (the original top() value) is aligned to a region boundary so
1500 1527 // the associated region does not have a destination. Compute the
1501 1528 // destination from the previous region.
1502 1529 RegionData* const cp = sd.addr_to_region_ptr(src_addr) - 1;
1503 1530 dst_addr = cp->destination() + cp->data_size();
1504 1531 } else {
1505 1532 // Filling the entire space.
1506 1533 dst_addr = space->bottom();
1507 1534 }
1508 1535 assert(dst_addr != NULL, "sanity");
1509 1536
1510 1537 // Update the summary data.
1511 1538 bool result = _summary_data.summarize(_space_info[id].split_info(),
1512 1539 src_addr, space->top(), NULL,
1513 1540 dst_addr, space->end(),
1514 1541 _space_info[id].new_top_addr());
1515 1542 assert(result, "should not fail: bad filler object size");
1516 1543 }
1517 1544
1518 1545 void
1519 1546 PSParallelCompact::provoke_split_fill_survivor(SpaceId id)
1520 1547 {
1521 1548 if (total_invocations() % (ParallelOldGCSplitInterval * 3) != 0) {
1522 1549 return;
1523 1550 }
1524 1551
1525 1552 MutableSpace* const space = _space_info[id].space();
1526 1553 if (space->is_empty()) {
1527 1554 HeapWord* b = space->bottom();
1528 1555 HeapWord* t = b + space->capacity_in_words() / 2;
1529 1556 space->set_top(t);
1530 1557 if (ZapUnusedHeapArea) {
1531 1558 space->set_top_for_allocations();
1532 1559 }
1533 1560
1534 1561 size_t min_size = CollectedHeap::min_fill_size();
1535 1562 size_t obj_len = min_size;
1536 1563 while (b + obj_len <= t) {
1537 1564 CollectedHeap::fill_with_object(b, obj_len);
1538 1565 mark_bitmap()->mark_obj(b, obj_len);
1539 1566 summary_data().add_obj(b, obj_len);
1540 1567 b += obj_len;
1541 1568 obj_len = (obj_len & (min_size*3)) + min_size; // 8 16 24 32 8 16 24 32 ...
1542 1569 }
1543 1570 if (b < t) {
1544 1571 // The loop didn't completely fill to t (top); adjust top downward.
1545 1572 space->set_top(b);
1546 1573 if (ZapUnusedHeapArea) {
1547 1574 space->set_top_for_allocations();
1548 1575 }
1549 1576 }
1550 1577
1551 1578 HeapWord** nta = _space_info[id].new_top_addr();
1552 1579 bool result = summary_data().summarize(_space_info[id].split_info(),
1553 1580 space->bottom(), space->top(), NULL,
1554 1581 space->bottom(), space->end(), nta);
1555 1582 assert(result, "space must fit into itself");
1556 1583 }
1557 1584 }
1558 1585
1559 1586 void
1560 1587 PSParallelCompact::provoke_split(bool & max_compaction)
1561 1588 {
1562 1589 if (total_invocations() % ParallelOldGCSplitInterval != 0) {
1563 1590 return;
1564 1591 }
1565 1592
1566 1593 const size_t region_size = ParallelCompactData::RegionSize;
1567 1594 ParallelCompactData& sd = summary_data();
1568 1595
1569 1596 MutableSpace* const eden_space = _space_info[eden_space_id].space();
1570 1597 MutableSpace* const from_space = _space_info[from_space_id].space();
1571 1598 const size_t eden_live = pointer_delta(eden_space->top(),
1572 1599 _space_info[eden_space_id].new_top());
1573 1600 const size_t from_live = pointer_delta(from_space->top(),
1574 1601 _space_info[from_space_id].new_top());
1575 1602
1576 1603 const size_t min_fill_size = CollectedHeap::min_fill_size();
1577 1604 const size_t eden_free = pointer_delta(eden_space->end(), eden_space->top());
1578 1605 const size_t eden_fillable = eden_free >= min_fill_size ? eden_free : 0;
1579 1606 const size_t from_free = pointer_delta(from_space->end(), from_space->top());
1580 1607 const size_t from_fillable = from_free >= min_fill_size ? from_free : 0;
1581 1608
1582 1609 // Choose the space to split; need at least 2 regions live (or fillable).
1583 1610 SpaceId id;
1584 1611 MutableSpace* space;
1585 1612 size_t live_words;
1586 1613 size_t fill_words;
1587 1614 if (eden_live + eden_fillable >= region_size * 2) {
1588 1615 id = eden_space_id;
1589 1616 space = eden_space;
1590 1617 live_words = eden_live;
1591 1618 fill_words = eden_fillable;
1592 1619 } else if (from_live + from_fillable >= region_size * 2) {
1593 1620 id = from_space_id;
1594 1621 space = from_space;
1595 1622 live_words = from_live;
1596 1623 fill_words = from_fillable;
1597 1624 } else {
1598 1625 return; // Give up.
1599 1626 }
1600 1627 assert(fill_words == 0 || fill_words >= min_fill_size, "sanity");
1601 1628
1602 1629 if (live_words < region_size * 2) {
1603 1630 // Fill from top() to end() w/live objects of mixed sizes.
1604 1631 HeapWord* const fill_start = space->top();
1605 1632 live_words += fill_words;
1606 1633
1607 1634 space->set_top(fill_start + fill_words);
1608 1635 if (ZapUnusedHeapArea) {
1609 1636 space->set_top_for_allocations();
1610 1637 }
1611 1638
1612 1639 HeapWord* cur_addr = fill_start;
1613 1640 while (fill_words > 0) {
1614 1641 const size_t r = (size_t)os::random() % (region_size / 2) + min_fill_size;
1615 1642 size_t cur_size = MIN2(align_object_size_(r), fill_words);
1616 1643 if (fill_words - cur_size < min_fill_size) {
1617 1644 cur_size = fill_words; // Avoid leaving a fragment too small to fill.
1618 1645 }
1619 1646
1620 1647 CollectedHeap::fill_with_object(cur_addr, cur_size);
1621 1648 mark_bitmap()->mark_obj(cur_addr, cur_size);
1622 1649 sd.add_obj(cur_addr, cur_size);
1623 1650
1624 1651 cur_addr += cur_size;
1625 1652 fill_words -= cur_size;
1626 1653 }
1627 1654
1628 1655 summarize_new_objects(id, fill_start);
1629 1656 }
1630 1657
1631 1658 max_compaction = false;
1632 1659
1633 1660 // Manipulate the old gen so that it has room for about half of the live data
1634 1661 // in the target young gen space (live_words / 2).
1635 1662 id = old_space_id;
1636 1663 space = _space_info[id].space();
1637 1664 const size_t free_at_end = space->free_in_words();
1638 1665 const size_t free_target = align_object_size(live_words / 2);
1639 1666 const size_t dead = pointer_delta(space->top(), _space_info[id].new_top());
1640 1667
1641 1668 if (free_at_end >= free_target + min_fill_size) {
1642 1669 // Fill space above top() and set the dense prefix so everything survives.
1643 1670 HeapWord* const fill_start = space->top();
1644 1671 const size_t fill_size = free_at_end - free_target;
1645 1672 space->set_top(space->top() + fill_size);
1646 1673 if (ZapUnusedHeapArea) {
1647 1674 space->set_top_for_allocations();
1648 1675 }
1649 1676 fill_with_live_objects(id, fill_start, fill_size);
1650 1677 summarize_new_objects(id, fill_start);
1651 1678 _space_info[id].set_dense_prefix(sd.region_align_down(space->top()));
1652 1679 } else if (dead + free_at_end > free_target) {
1653 1680 // Find a dense prefix that makes the right amount of space available.
1654 1681 HeapWord* cur = sd.region_align_down(space->top());
1655 1682 HeapWord* cur_destination = sd.addr_to_region_ptr(cur)->destination();
1656 1683 size_t dead_to_right = pointer_delta(space->end(), cur_destination);
1657 1684 while (dead_to_right < free_target) {
1658 1685 cur -= region_size;
1659 1686 cur_destination = sd.addr_to_region_ptr(cur)->destination();
1660 1687 dead_to_right = pointer_delta(space->end(), cur_destination);
1661 1688 }
1662 1689 _space_info[id].set_dense_prefix(cur);
1663 1690 }
1664 1691 }
1665 1692 #endif // #ifndef PRODUCT
1666 1693
1667 1694 void PSParallelCompact::summarize_spaces_quick()
1668 1695 {
1669 1696 for (unsigned int i = 0; i < last_space_id; ++i) {
1670 1697 const MutableSpace* space = _space_info[i].space();
1671 1698 HeapWord** nta = _space_info[i].new_top_addr();
1672 1699 bool result = _summary_data.summarize(_space_info[i].split_info(),
1673 1700 space->bottom(), space->top(), NULL,
1674 1701 space->bottom(), space->end(), nta);
1675 1702 assert(result, "space must fit into itself");
1676 1703 _space_info[i].set_dense_prefix(space->bottom());
1677 1704 }
1678 1705
1679 1706 #ifndef PRODUCT
1680 1707 if (ParallelOldGCSplitALot) {
1681 1708 provoke_split_fill_survivor(to_space_id);
1682 1709 }
1683 1710 #endif // #ifndef PRODUCT
1684 1711 }
1685 1712
1686 1713 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1687 1714 {
1688 1715 HeapWord* const dense_prefix_end = dense_prefix(id);
1689 1716 const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
1690 1717 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1691 1718 if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
1692 1719 // Only enough dead space is filled so that any remaining dead space to the
1693 1720 // left is larger than the minimum filler object. (The remainder is filled
1694 1721 // during the copy/update phase.)
1695 1722 //
1696 1723 // The size of the dead space to the right of the boundary is not a
1697 1724 // concern, since compaction will be able to use whatever space is
1698 1725 // available.
1699 1726 //
1700 1727 // Here '||' is the boundary, 'x' represents a don't care bit and a box
1701 1728 // surrounds the space to be filled with an object.
1702 1729 //
1703 1730 // In the 32-bit VM, each bit represents two 32-bit words:
1704 1731 // +---+
1705 1732 // a) beg_bits: ... x x x | 0 | || 0 x x ...
1706 1733 // end_bits: ... x x x | 0 | || 0 x x ...
1707 1734 // +---+
1708 1735 //
1709 1736 // In the 64-bit VM, each bit represents one 64-bit word:
1710 1737 // +------------+
1711 1738 // b) beg_bits: ... x x x | 0 || 0 | x x ...
1712 1739 // end_bits: ... x x 1 | 0 || 0 | x x ...
1713 1740 // +------------+
1714 1741 // +-------+
1715 1742 // c) beg_bits: ... x x | 0 0 | || 0 x x ...
1716 1743 // end_bits: ... x 1 | 0 0 | || 0 x x ...
1717 1744 // +-------+
1718 1745 // +-----------+
1719 1746 // d) beg_bits: ... x | 0 0 0 | || 0 x x ...
1720 1747 // end_bits: ... 1 | 0 0 0 | || 0 x x ...
1721 1748 // +-----------+
1722 1749 // +-------+
1723 1750 // e) beg_bits: ... 0 0 | 0 0 | || 0 x x ...
1724 1751 // end_bits: ... 0 0 | 0 0 | || 0 x x ...
1725 1752 // +-------+
1726 1753
1727 1754 // Initially assume case a, c or e will apply.
1728 1755 size_t obj_len = CollectedHeap::min_fill_size();
1729 1756 HeapWord* obj_beg = dense_prefix_end - obj_len;
1730 1757
1731 1758 #ifdef _LP64
1732 1759 if (MinObjAlignment > 1) { // object alignment > heap word size
1733 1760 // Cases a, c or e.
1734 1761 } else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1735 1762 // Case b above.
1736 1763 obj_beg = dense_prefix_end - 1;
1737 1764 } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1738 1765 _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1739 1766 // Case d above.
1740 1767 obj_beg = dense_prefix_end - 3;
1741 1768 obj_len = 3;
1742 1769 }
1743 1770 #endif // #ifdef _LP64
1744 1771
1745 1772 CollectedHeap::fill_with_object(obj_beg, obj_len);
1746 1773 _mark_bitmap.mark_obj(obj_beg, obj_len);
1747 1774 _summary_data.add_obj(obj_beg, obj_len);
1748 1775 assert(start_array(id) != NULL, "sanity");
1749 1776 start_array(id)->allocate_block(obj_beg);
1750 1777 }
1751 1778 }
1752 1779
1753 1780 void
1754 1781 PSParallelCompact::clear_source_region(HeapWord* beg_addr, HeapWord* end_addr)
1755 1782 {
1756 1783 RegionData* const beg_ptr = _summary_data.addr_to_region_ptr(beg_addr);
1757 1784 HeapWord* const end_aligned_up = _summary_data.region_align_up(end_addr);
1758 1785 RegionData* const end_ptr = _summary_data.addr_to_region_ptr(end_aligned_up);
1759 1786 for (RegionData* cur = beg_ptr; cur < end_ptr; ++cur) {
1760 1787 cur->set_source_region(0);
1761 1788 }
1762 1789 }
1763 1790
1764 1791 void
1765 1792 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1766 1793 {
1767 1794 assert(id < last_space_id, "id out of range");
1768 1795 assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom() ||
1769 1796 ParallelOldGCSplitALot && id == old_space_id,
1770 1797 "should have been reset in summarize_spaces_quick()");
1771 1798
1772 1799 const MutableSpace* space = _space_info[id].space();
1773 1800 if (_space_info[id].new_top() != space->bottom()) {
1774 1801 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1775 1802 _space_info[id].set_dense_prefix(dense_prefix_end);
1776 1803
1777 1804 #ifndef PRODUCT
1778 1805 if (TraceParallelOldGCDensePrefix) {
1779 1806 print_dense_prefix_stats("ratio", id, maximum_compaction,
1780 1807 dense_prefix_end);
1781 1808 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1782 1809 print_dense_prefix_stats("density", id, maximum_compaction, addr);
1783 1810 }
1784 1811 #endif // #ifndef PRODUCT
1785 1812
1786 1813 // Recompute the summary data, taking into account the dense prefix. If
1787 1814 // every last byte will be reclaimed, then the existing summary data which
1788 1815 // compacts everything can be left in place.
1789 1816 if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1790 1817 // If dead space crosses the dense prefix boundary, it is (at least
1791 1818 // partially) filled with a dummy object, marked live and added to the
1792 1819 // summary data. This simplifies the copy/update phase and must be done
1793 1820 // before the final locations of objects are determined, to prevent
1794 1821 // leaving a fragment of dead space that is too small to fill.
1795 1822 fill_dense_prefix_end(id);
1796 1823
1797 1824 // Compute the destination of each Region, and thus each object.
1798 1825 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1799 1826 _summary_data.summarize(_space_info[id].split_info(),
1800 1827 dense_prefix_end, space->top(), NULL,
1801 1828 dense_prefix_end, space->end(),
1802 1829 _space_info[id].new_top_addr());
1803 1830 }
1804 1831 }
1805 1832
1806 1833 if (TraceParallelOldGCSummaryPhase) {
1807 1834 const size_t region_size = ParallelCompactData::RegionSize;
1808 1835 HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1809 1836 const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1810 1837 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1811 1838 HeapWord* const new_top = _space_info[id].new_top();
1812 1839 const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1813 1840 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1814 1841 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1815 1842 "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1816 1843 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1817 1844 id, space->capacity_in_words(), dense_prefix_end,
1818 1845 dp_region, dp_words / region_size,
1819 1846 cr_words / region_size, new_top);
1820 1847 }
1821 1848 }
1822 1849
1823 1850 #ifndef PRODUCT
1824 1851 void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1825 1852 HeapWord* dst_beg, HeapWord* dst_end,
1826 1853 SpaceId src_space_id,
1827 1854 HeapWord* src_beg, HeapWord* src_end)
1828 1855 {
1829 1856 if (TraceParallelOldGCSummaryPhase) {
1830 1857 tty->print_cr("summarizing %d [%s] into %d [%s]: "
1831 1858 "src=" PTR_FORMAT "-" PTR_FORMAT " "
1832 1859 SIZE_FORMAT "-" SIZE_FORMAT " "
1833 1860 "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1834 1861 SIZE_FORMAT "-" SIZE_FORMAT,
1835 1862 src_space_id, space_names[src_space_id],
1836 1863 dst_space_id, space_names[dst_space_id],
1837 1864 src_beg, src_end,
1838 1865 _summary_data.addr_to_region_idx(src_beg),
1839 1866 _summary_data.addr_to_region_idx(src_end),
1840 1867 dst_beg, dst_end,
1841 1868 _summary_data.addr_to_region_idx(dst_beg),
1842 1869 _summary_data.addr_to_region_idx(dst_end));
1843 1870 }
1844 1871 }
1845 1872 #endif // #ifndef PRODUCT
1846 1873
1847 1874 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1848 1875 bool maximum_compaction)
1849 1876 {
1850 1877 GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
1851 1878 // trace("2");
1852 1879
1853 1880 #ifdef ASSERT
1854 1881 if (TraceParallelOldGCMarkingPhase) {
1855 1882 tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1856 1883 "add_obj_bytes=" SIZE_FORMAT,
1857 1884 add_obj_count, add_obj_size * HeapWordSize);
1858 1885 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1859 1886 "mark_bitmap_bytes=" SIZE_FORMAT,
1860 1887 mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1861 1888 }
1862 1889 #endif // #ifdef ASSERT
1863 1890
1864 1891 // Quick summarization of each space into itself, to see how much is live.
1865 1892 summarize_spaces_quick();
1866 1893
1867 1894 if (TraceParallelOldGCSummaryPhase) {
1868 1895 tty->print_cr("summary_phase: after summarizing each space to self");
1869 1896 Universe::print();
1870 1897 NOT_PRODUCT(print_region_ranges());
1871 1898 if (Verbose) {
1872 1899 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1873 1900 }
1874 1901 }
1875 1902
1876 1903 // The amount of live data that will end up in old space (assuming it fits).
1877 1904 size_t old_space_total_live = 0;
1878 1905 assert(perm_space_id < old_space_id, "should not count perm data here");
1879 1906 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1880 1907 old_space_total_live += pointer_delta(_space_info[id].new_top(),
1881 1908 _space_info[id].space()->bottom());
1882 1909 }
1883 1910
1884 1911 MutableSpace* const old_space = _space_info[old_space_id].space();
1885 1912 const size_t old_capacity = old_space->capacity_in_words();
1886 1913 if (old_space_total_live > old_capacity) {
1887 1914 // XXX - should also try to expand
1888 1915 maximum_compaction = true;
1889 1916 }
1890 1917 #ifndef PRODUCT
1891 1918 if (ParallelOldGCSplitALot && old_space_total_live < old_capacity) {
1892 1919 provoke_split(maximum_compaction);
1893 1920 }
1894 1921 #endif // #ifndef PRODUCT
1895 1922
1896 1923 // Permanent and Old generations.
1897 1924 summarize_space(perm_space_id, maximum_compaction);
1898 1925 summarize_space(old_space_id, maximum_compaction);
1899 1926
1900 1927 // Summarize the remaining spaces in the young gen. The initial target space
1901 1928 // is the old gen. If a space does not fit entirely into the target, then the
1902 1929 // remainder is compacted into the space itself and that space becomes the new
1903 1930 // target.
1904 1931 SpaceId dst_space_id = old_space_id;
1905 1932 HeapWord* dst_space_end = old_space->end();
1906 1933 HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
1907 1934 for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
1908 1935 const MutableSpace* space = _space_info[id].space();
1909 1936 const size_t live = pointer_delta(_space_info[id].new_top(),
1910 1937 space->bottom());
1911 1938 const size_t available = pointer_delta(dst_space_end, *new_top_addr);
1912 1939
1913 1940 NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
1914 1941 SpaceId(id), space->bottom(), space->top());)
1915 1942 if (live > 0 && live <= available) {
1916 1943 // All the live data will fit.
1917 1944 bool done = _summary_data.summarize(_space_info[id].split_info(),
1918 1945 space->bottom(), space->top(),
1919 1946 NULL,
1920 1947 *new_top_addr, dst_space_end,
1921 1948 new_top_addr);
1922 1949 assert(done, "space must fit into old gen");
1923 1950
1924 1951 // Reset the new_top value for the space.
1925 1952 _space_info[id].set_new_top(space->bottom());
1926 1953 } else if (live > 0) {
1927 1954 // Attempt to fit part of the source space into the target space.
1928 1955 HeapWord* next_src_addr = NULL;
1929 1956 bool done = _summary_data.summarize(_space_info[id].split_info(),
1930 1957 space->bottom(), space->top(),
1931 1958 &next_src_addr,
1932 1959 *new_top_addr, dst_space_end,
1933 1960 new_top_addr);
1934 1961 assert(!done, "space should not fit into old gen");
1935 1962 assert(next_src_addr != NULL, "sanity");
1936 1963
1937 1964 // The source space becomes the new target, so the remainder is compacted
1938 1965 // within the space itself.
1939 1966 dst_space_id = SpaceId(id);
1940 1967 dst_space_end = space->end();
1941 1968 new_top_addr = _space_info[id].new_top_addr();
1942 1969 NOT_PRODUCT(summary_phase_msg(dst_space_id,
1943 1970 space->bottom(), dst_space_end,
1944 1971 SpaceId(id), next_src_addr, space->top());)
1945 1972 done = _summary_data.summarize(_space_info[id].split_info(),
1946 1973 next_src_addr, space->top(),
1947 1974 NULL,
1948 1975 space->bottom(), dst_space_end,
1949 1976 new_top_addr);
1950 1977 assert(done, "space must fit when compacted into itself");
1951 1978 assert(*new_top_addr <= space->top(), "usage should not grow");
1952 1979 }
1953 1980 }
1954 1981
1955 1982 if (TraceParallelOldGCSummaryPhase) {
1956 1983 tty->print_cr("summary_phase: after final summarization");
1957 1984 Universe::print();
1958 1985 NOT_PRODUCT(print_region_ranges());
1959 1986 if (Verbose) {
1960 1987 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
1961 1988 }
1962 1989 }
1963 1990 }
1964 1991
1965 1992 // This method should contain all heap-specific policy for invoking a full
1966 1993 // collection. invoke_no_policy() will only attempt to compact the heap; it
1967 1994 // will do nothing further. If we need to bail out for policy reasons, scavenge
1968 1995 // before full gc, or any other specialized behavior, it needs to be added here.
1969 1996 //
1970 1997 // Note that this method should only be called from the vm_thread while at a
1971 1998 // safepoint.
1972 1999 //
1973 2000 // Note that the all_soft_refs_clear flag in the collector policy
1974 2001 // may be true because this method can be called without intervening
1975 2002 // activity. For example when the heap space is tight and full measure
1976 2003 // are being taken to free space.
1977 2004 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1978 2005 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1979 2006 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1980 2007 "should be in vm thread");
1981 2008
1982 2009 ParallelScavengeHeap* heap = gc_heap();
1983 2010 GCCause::Cause gc_cause = heap->gc_cause();
1984 2011 assert(!heap->is_gc_active(), "not reentrant");
1985 2012
1986 2013 PSAdaptiveSizePolicy* policy = heap->size_policy();
1987 2014 IsGCActiveMark mark;
1988 2015
1989 2016 if (ScavengeBeforeFullGC) {
↓ open down ↓ |
1174 lines elided |
↑ open up ↑ |
1990 2017 PSScavenge::invoke_no_policy();
1991 2018 }
1992 2019
1993 2020 const bool clear_all_soft_refs =
1994 2021 heap->collector_policy()->should_clear_all_soft_refs();
1995 2022
1996 2023 PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1997 2024 maximum_heap_compaction);
1998 2025 }
1999 2026
2000 -bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
2001 - size_t addr_region_index = addr_to_region_idx(addr);
2002 - return region_index == addr_region_index;
2003 -}
2004 -
2005 2027 // This method contains no policy. You should probably
2006 2028 // be calling invoke() instead.
2007 2029 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
2008 2030 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
2009 2031 assert(ref_processor() != NULL, "Sanity");
2010 2032
2011 2033 if (GC_locker::check_active_before_gc()) {
2012 2034 return false;
2013 2035 }
2014 2036
2015 2037 ParallelScavengeHeap* heap = gc_heap();
2016 2038
2017 2039 _gc_timer.register_gc_start(os::elapsed_counter());
2018 2040 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
2019 2041
2020 2042 TimeStamp marking_start;
2021 2043 TimeStamp compaction_start;
2022 2044 TimeStamp collection_exit;
2023 2045
2024 2046 GCCause::Cause gc_cause = heap->gc_cause();
2025 2047 PSYoungGen* young_gen = heap->young_gen();
2026 2048 PSOldGen* old_gen = heap->old_gen();
2027 2049 PSPermGen* perm_gen = heap->perm_gen();
2028 2050 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
2029 2051
2030 2052 // The scope of casr should end after code that can change
2031 2053 // CollectorPolicy::_should_clear_all_soft_refs.
2032 2054 ClearedAllSoftRefs casr(maximum_heap_compaction,
2033 2055 heap->collector_policy());
2034 2056
2035 2057 if (ZapUnusedHeapArea) {
2036 2058 // Save information needed to minimize mangling
2037 2059 heap->record_gen_tops_before_GC();
2038 2060 }
2039 2061
2040 2062 heap->pre_full_gc_dump(&_gc_timer);
2041 2063
2042 2064 _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
2043 2065
2044 2066 // Make sure data structures are sane, make the heap parsable, and do other
2045 2067 // miscellaneous bookkeeping.
2046 2068 PreGCValues pre_gc_values;
2047 2069 pre_compact(&pre_gc_values);
2048 2070
2049 2071 // Get the compaction manager reserved for the VM thread.
2050 2072 ParCompactionManager* const vmthread_cm =
2051 2073 ParCompactionManager::manager_array(gc_task_manager()->workers());
2052 2074
2053 2075 // Place after pre_compact() where the number of invocations is incremented.
2054 2076 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
2055 2077
2056 2078 {
2057 2079 ResourceMark rm;
2058 2080 HandleMark hm;
2059 2081
2060 2082 // Set the number of GC threads to be used in this collection
2061 2083 gc_task_manager()->set_active_gang();
2062 2084 gc_task_manager()->task_idle_workers();
2063 2085 heap->set_par_threads(gc_task_manager()->active_workers());
2064 2086
2065 2087 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
2066 2088 GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
2067 2089 TraceCollectorStats tcs(counters());
2068 2090 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
2069 2091
2070 2092 if (TraceGen1Time) accumulated_time()->start();
2071 2093
2072 2094 // Let the size policy know we're starting
2073 2095 size_policy->major_collection_begin();
2074 2096
2075 2097 // When collecting the permanent generation methodOops may be moving,
2076 2098 // so we either have to flush all bcp data or convert it into bci.
2077 2099 CodeCache::gc_prologue();
2078 2100 Threads::gc_prologue();
2079 2101
2080 2102 COMPILER2_PRESENT(DerivedPointerTable::clear());
2081 2103
2082 2104 ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
2083 2105 ref_processor()->setup_policy(maximum_heap_compaction);
2084 2106
2085 2107 bool marked_for_unloading = false;
2086 2108
2087 2109 marking_start.update();
2088 2110 marking_phase(vmthread_cm, maximum_heap_compaction);
2089 2111
2090 2112 #ifndef PRODUCT
2091 2113 if (TraceParallelOldGCMarkingPhase) {
2092 2114 gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d "
2093 2115 "cas_by_another %d",
2094 2116 mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
2095 2117 mark_bitmap()->cas_by_another());
2096 2118 }
2097 2119 #endif // #ifndef PRODUCT
2098 2120
2099 2121 bool max_on_system_gc = UseMaximumCompactionOnSystemGC
2100 2122 && gc_cause == GCCause::_java_lang_system_gc;
2101 2123 summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
2102 2124
2103 2125 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
2104 2126 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
2105 2127
2106 2128 // adjust_roots() updates Universe::_intArrayKlassObj which is
2107 2129 // needed by the compaction for filling holes in the dense prefix.
2108 2130 adjust_roots();
2109 2131
2110 2132 compaction_start.update();
2111 2133 // Does the perm gen always have to be done serially because
2112 2134 // klasses are used in the update of an object?
2113 2135 compact_perm(vmthread_cm);
2114 2136
2115 2137 compact();
2116 2138
2117 2139 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
2118 2140 // done before resizing.
2119 2141 post_compact();
2120 2142
2121 2143 // Let the size policy know we're done
2122 2144 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
2123 2145
2124 2146 if (UseAdaptiveSizePolicy) {
2125 2147 if (PrintAdaptiveSizePolicy) {
2126 2148 gclog_or_tty->print("AdaptiveSizeStart: ");
2127 2149 gclog_or_tty->stamp();
2128 2150 gclog_or_tty->print_cr(" collection: %d ",
2129 2151 heap->total_collections());
2130 2152 if (Verbose) {
2131 2153 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
2132 2154 " perm_gen_capacity: %d ",
2133 2155 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
2134 2156 perm_gen->capacity_in_bytes());
2135 2157 }
2136 2158 }
2137 2159
2138 2160 // Don't check if the size_policy is ready here. Let
2139 2161 // the size_policy check that internally.
2140 2162 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
2141 2163 ((gc_cause != GCCause::_java_lang_system_gc) ||
2142 2164 UseAdaptiveSizePolicyWithSystemGC)) {
2143 2165 // Calculate optimal free space amounts
2144 2166 assert(young_gen->max_size() >
2145 2167 young_gen->from_space()->capacity_in_bytes() +
2146 2168 young_gen->to_space()->capacity_in_bytes(),
2147 2169 "Sizes of space in young gen are out-of-bounds");
2148 2170 size_t max_eden_size = young_gen->max_size() -
2149 2171 young_gen->from_space()->capacity_in_bytes() -
2150 2172 young_gen->to_space()->capacity_in_bytes();
2151 2173 size_policy->compute_generation_free_space(
2152 2174 young_gen->used_in_bytes(),
2153 2175 young_gen->eden_space()->used_in_bytes(),
2154 2176 old_gen->used_in_bytes(),
2155 2177 perm_gen->used_in_bytes(),
2156 2178 young_gen->eden_space()->capacity_in_bytes(),
2157 2179 old_gen->max_gen_size(),
2158 2180 max_eden_size,
2159 2181 true /* full gc*/,
2160 2182 gc_cause,
2161 2183 heap->collector_policy());
2162 2184
2163 2185 heap->resize_old_gen(
2164 2186 size_policy->calculated_old_free_size_in_bytes());
2165 2187
2166 2188 // Don't resize the young generation at an major collection. A
2167 2189 // desired young generation size may have been calculated but
2168 2190 // resizing the young generation complicates the code because the
2169 2191 // resizing of the old generation may have moved the boundary
2170 2192 // between the young generation and the old generation. Let the
2171 2193 // young generation resizing happen at the minor collections.
2172 2194 }
2173 2195 if (PrintAdaptiveSizePolicy) {
2174 2196 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
2175 2197 heap->total_collections());
2176 2198 }
2177 2199 }
2178 2200
2179 2201 if (UsePerfData) {
2180 2202 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
2181 2203 counters->update_counters();
2182 2204 counters->update_old_capacity(old_gen->capacity_in_bytes());
2183 2205 counters->update_young_capacity(young_gen->capacity_in_bytes());
2184 2206 }
2185 2207
2186 2208 heap->resize_all_tlabs();
2187 2209
2188 2210 // We collected the perm gen, so we'll resize it here.
2189 2211 perm_gen->compute_new_size(pre_gc_values.perm_gen_used());
2190 2212
2191 2213 if (TraceGen1Time) accumulated_time()->stop();
2192 2214
2193 2215 if (PrintGC) {
2194 2216 if (PrintGCDetails) {
2195 2217 // No GC timestamp here. This is after GC so it would be confusing.
2196 2218 young_gen->print_used_change(pre_gc_values.young_gen_used());
2197 2219 old_gen->print_used_change(pre_gc_values.old_gen_used());
2198 2220 heap->print_heap_change(pre_gc_values.heap_used());
2199 2221 // Print perm gen last (print_heap_change() excludes the perm gen).
2200 2222 perm_gen->print_used_change(pre_gc_values.perm_gen_used());
2201 2223 } else {
2202 2224 heap->print_heap_change(pre_gc_values.heap_used());
2203 2225 }
2204 2226 }
2205 2227
2206 2228 // Track memory usage and detect low memory
2207 2229 MemoryService::track_memory_usage();
2208 2230 heap->update_counters();
2209 2231 gc_task_manager()->release_idle_workers();
2210 2232 }
2211 2233
2212 2234 #ifdef ASSERT
2213 2235 for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
2214 2236 ParCompactionManager* const cm =
2215 2237 ParCompactionManager::manager_array(int(i));
2216 2238 assert(cm->marking_stack()->is_empty(), "should be empty");
2217 2239 assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
2218 2240 assert(cm->revisit_klass_stack()->is_empty(), "should be empty");
2219 2241 }
2220 2242 #endif // ASSERT
2221 2243
2222 2244 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
2223 2245 HandleMark hm; // Discard invalid handles created during verification
2224 2246 gclog_or_tty->print(" VerifyAfterGC:");
2225 2247 Universe::verify();
2226 2248 }
2227 2249
2228 2250 // Re-verify object start arrays
2229 2251 if (VerifyObjectStartArray &&
2230 2252 VerifyAfterGC) {
2231 2253 old_gen->verify_object_start_array();
2232 2254 perm_gen->verify_object_start_array();
2233 2255 }
2234 2256
2235 2257 if (ZapUnusedHeapArea) {
2236 2258 old_gen->object_space()->check_mangled_unused_area_complete();
2237 2259 perm_gen->object_space()->check_mangled_unused_area_complete();
2238 2260 }
2239 2261
2240 2262 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
2241 2263
2242 2264 collection_exit.update();
2243 2265
2244 2266 heap->print_heap_after_gc();
2245 2267 heap->trace_heap_after_gc(&_gc_tracer);
2246 2268
2247 2269 if (PrintGCTaskTimeStamps) {
2248 2270 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
2249 2271 INT64_FORMAT,
2250 2272 marking_start.ticks(), compaction_start.ticks(),
2251 2273 collection_exit.ticks());
2252 2274 gc_task_manager()->print_task_time_stamps();
2253 2275 }
2254 2276
2255 2277 heap->post_full_gc_dump(&_gc_timer);
2256 2278
2257 2279 #ifdef TRACESPINNING
2258 2280 ParallelTaskTerminator::print_termination_counts();
2259 2281 #endif
2260 2282
2261 2283 _gc_timer.register_gc_end(os::elapsed_counter());
2262 2284
2263 2285 _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
2264 2286 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
2265 2287
2266 2288 return true;
2267 2289 }
2268 2290
2269 2291 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
2270 2292 PSYoungGen* young_gen,
2271 2293 PSOldGen* old_gen) {
2272 2294 MutableSpace* const eden_space = young_gen->eden_space();
2273 2295 assert(!eden_space->is_empty(), "eden must be non-empty");
2274 2296 assert(young_gen->virtual_space()->alignment() ==
2275 2297 old_gen->virtual_space()->alignment(), "alignments do not match");
2276 2298
2277 2299 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
2278 2300 return false;
2279 2301 }
2280 2302
2281 2303 // Both generations must be completely committed.
2282 2304 if (young_gen->virtual_space()->uncommitted_size() != 0) {
2283 2305 return false;
2284 2306 }
2285 2307 if (old_gen->virtual_space()->uncommitted_size() != 0) {
2286 2308 return false;
2287 2309 }
2288 2310
2289 2311 // Figure out how much to take from eden. Include the average amount promoted
2290 2312 // in the total; otherwise the next young gen GC will simply bail out to a
2291 2313 // full GC.
2292 2314 const size_t alignment = old_gen->virtual_space()->alignment();
2293 2315 const size_t eden_used = eden_space->used_in_bytes();
2294 2316 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
2295 2317 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
2296 2318 const size_t eden_capacity = eden_space->capacity_in_bytes();
2297 2319
2298 2320 if (absorb_size >= eden_capacity) {
2299 2321 return false; // Must leave some space in eden.
2300 2322 }
2301 2323
2302 2324 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
2303 2325 if (new_young_size < young_gen->min_gen_size()) {
2304 2326 return false; // Respect young gen minimum size.
2305 2327 }
2306 2328
2307 2329 if (TraceAdaptiveGCBoundary && Verbose) {
2308 2330 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
2309 2331 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
2310 2332 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
2311 2333 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2312 2334 absorb_size / K,
2313 2335 eden_capacity / K, (eden_capacity - absorb_size) / K,
2314 2336 young_gen->from_space()->used_in_bytes() / K,
2315 2337 young_gen->to_space()->used_in_bytes() / K,
2316 2338 young_gen->capacity_in_bytes() / K, new_young_size / K);
2317 2339 }
2318 2340
2319 2341 // Fill the unused part of the old gen.
2320 2342 MutableSpace* const old_space = old_gen->object_space();
2321 2343 HeapWord* const unused_start = old_space->top();
2322 2344 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
2323 2345
2324 2346 if (unused_words > 0) {
2325 2347 if (unused_words < CollectedHeap::min_fill_size()) {
2326 2348 return false; // If the old gen cannot be filled, must give up.
2327 2349 }
2328 2350 CollectedHeap::fill_with_objects(unused_start, unused_words);
2329 2351 }
2330 2352
2331 2353 // Take the live data from eden and set both top and end in the old gen to
2332 2354 // eden top. (Need to set end because reset_after_change() mangles the region
2333 2355 // from end to virtual_space->high() in debug builds).
2334 2356 HeapWord* const new_top = eden_space->top();
2335 2357 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
2336 2358 absorb_size);
2337 2359 young_gen->reset_after_change();
2338 2360 old_space->set_top(new_top);
2339 2361 old_space->set_end(new_top);
2340 2362 old_gen->reset_after_change();
2341 2363
2342 2364 // Update the object start array for the filler object and the data from eden.
2343 2365 ObjectStartArray* const start_array = old_gen->start_array();
2344 2366 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2345 2367 start_array->allocate_block(p);
2346 2368 }
2347 2369
2348 2370 // Could update the promoted average here, but it is not typically updated at
2349 2371 // full GCs and the value to use is unclear. Something like
2350 2372 //
2351 2373 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2352 2374
2353 2375 size_policy->set_bytes_absorbed_from_eden(absorb_size);
2354 2376 return true;
2355 2377 }
2356 2378
2357 2379 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2358 2380 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2359 2381 "shouldn't return NULL");
2360 2382 return ParallelScavengeHeap::gc_task_manager();
2361 2383 }
2362 2384
2363 2385 void PSParallelCompact::marking_phase(ParCompactionManager* cm, bool maximum_heap_compaction) {
2364 2386 // Recursively traverse all live objects and mark them
2365 2387 GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
2366 2388
2367 2389 ParallelScavengeHeap* heap = gc_heap();
2368 2390 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2369 2391 uint active_gc_threads = heap->gc_task_manager()->active_workers();
2370 2392 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2371 2393 ParallelTaskTerminator terminator(active_gc_threads, qset);
2372 2394
2373 2395 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2374 2396 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
2375 2397
2376 2398 {
2377 2399 GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
2378 2400
2379 2401 ParallelScavengeHeap::ParStrongRootsScope psrs;
2380 2402
2381 2403 GCTaskQueue* q = GCTaskQueue::create();
2382 2404
2383 2405 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2384 2406 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2385 2407 // We scan the thread roots in parallel
2386 2408 Threads::create_thread_roots_marking_tasks(q);
2387 2409 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2388 2410 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2389 2411 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2390 2412 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2391 2413 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2392 2414 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2393 2415
2394 2416 if (active_gc_threads > 1) {
2395 2417 for (uint j = 0; j < active_gc_threads; j++) {
2396 2418 q->enqueue(new StealMarkingTask(&terminator));
2397 2419 }
2398 2420 }
2399 2421
2400 2422 gc_task_manager()->execute_and_wait(q);
2401 2423 }
2402 2424
2403 2425 // Process reference objects found during marking
2404 2426 {
2405 2427 GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
2406 2428
2407 2429 ReferenceProcessorStats stats;
2408 2430 if (ref_processor()->processing_is_mt()) {
2409 2431 RefProcTaskExecutor task_executor;
2410 2432 stats = ref_processor()->process_discovered_references(
2411 2433 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2412 2434 &task_executor, &_gc_timer);
2413 2435 } else {
2414 2436 stats = ref_processor()->process_discovered_references(
2415 2437 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
2416 2438 &_gc_timer);
2417 2439 }
2418 2440
2419 2441 _gc_tracer.report_gc_reference_stats(stats);
2420 2442 }
2421 2443
2422 2444 GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
2423 2445
2424 2446 // Follow system dictionary roots and unload classes.
2425 2447 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2426 2448
2427 2449 // Follow code cache roots.
2428 2450 CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
2429 2451 purged_class);
2430 2452 cm->follow_marking_stacks(); // Flush marking stack.
2431 2453
2432 2454 // Update subklass/sibling/implementor links of live klasses
2433 2455 // revisit_klass_stack is used in follow_weak_klass_links().
2434 2456 follow_weak_klass_links();
2435 2457
2436 2458 // Revisit memoized MDO's and clear any unmarked weak refs
2437 2459 follow_mdo_weak_refs();
2438 2460
2439 2461 // Visit interned string tables and delete unmarked oops
2440 2462 StringTable::unlink(is_alive_closure());
2441 2463 // Clean up unreferenced symbols in symbol table.
2442 2464 SymbolTable::unlink();
2443 2465
2444 2466 assert(cm->marking_stacks_empty(), "marking stacks should be empty");
2445 2467 _gc_tracer.report_object_count_after_gc(is_alive_closure());
2446 2468 }
2447 2469
2448 2470 // This should be moved to the shared markSweep code!
2449 2471 class PSAlwaysTrueClosure: public BoolObjectClosure {
2450 2472 public:
2451 2473 void do_object(oop p) { ShouldNotReachHere(); }
2452 2474 bool do_object_b(oop p) { return true; }
2453 2475 };
2454 2476 static PSAlwaysTrueClosure always_true;
2455 2477
2456 2478 void PSParallelCompact::adjust_roots() {
2457 2479 // Adjust the pointers to reflect the new locations
2458 2480 GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
2459 2481
2460 2482 // General strong roots.
2461 2483 Universe::oops_do(adjust_root_pointer_closure());
2462 2484 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
2463 2485 Threads::oops_do(adjust_root_pointer_closure(), NULL);
2464 2486 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
2465 2487 FlatProfiler::oops_do(adjust_root_pointer_closure());
2466 2488 Management::oops_do(adjust_root_pointer_closure());
2467 2489 JvmtiExport::oops_do(adjust_root_pointer_closure());
2468 2490 // SO_AllClasses
2469 2491 SystemDictionary::oops_do(adjust_root_pointer_closure());
2470 2492
2471 2493 // Now adjust pointers in remaining weak roots. (All of which should
2472 2494 // have been cleared if they pointed to non-surviving objects.)
2473 2495 // Global (weak) JNI handles
2474 2496 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
2475 2497
2476 2498 CodeCache::oops_do(adjust_pointer_closure());
2477 2499 StringTable::oops_do(adjust_root_pointer_closure());
2478 2500 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
2479 2501 // Roots were visited so references into the young gen in roots
2480 2502 // may have been scanned. Process them also.
2481 2503 // Should the reference processor have a span that excludes
2482 2504 // young gen objects?
2483 2505 PSScavenge::reference_processor()->weak_oops_do(
2484 2506 adjust_root_pointer_closure());
2485 2507 }
2486 2508
2487 2509 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
2488 2510 GCTraceTime tm("compact perm gen", print_phases(), true, &_gc_timer);
2489 2511 // trace("4");
2490 2512
2491 2513 gc_heap()->perm_gen()->start_array()->reset();
2492 2514 move_and_update(cm, perm_space_id);
2493 2515 }
2494 2516
2495 2517 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2496 2518 uint parallel_gc_threads)
2497 2519 {
2498 2520 GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
2499 2521
2500 2522 // Find the threads that are active
2501 2523 unsigned int which = 0;
2502 2524
2503 2525 const uint task_count = MAX2(parallel_gc_threads, 1U);
2504 2526 for (uint j = 0; j < task_count; j++) {
2505 2527 q->enqueue(new DrainStacksCompactionTask(j));
2506 2528 ParCompactionManager::verify_region_list_empty(j);
2507 2529 // Set the region stacks variables to "no" region stack values
2508 2530 // so that they will be recognized and needing a region stack
2509 2531 // in the stealing tasks if they do not get one by executing
2510 2532 // a draining stack.
2511 2533 ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2512 2534 cm->set_region_stack(NULL);
2513 2535 cm->set_region_stack_index((uint)max_uintx);
2514 2536 }
2515 2537 ParCompactionManager::reset_recycled_stack_index();
2516 2538
2517 2539 // Find all regions that are available (can be filled immediately) and
2518 2540 // distribute them to the thread stacks. The iteration is done in reverse
2519 2541 // order (high to low) so the regions will be removed in ascending order.
2520 2542
2521 2543 const ParallelCompactData& sd = PSParallelCompact::summary_data();
2522 2544
2523 2545 size_t fillable_regions = 0; // A count for diagnostic purposes.
2524 2546 // A region index which corresponds to the tasks created above.
2525 2547 // "which" must be 0 <= which < task_count
2526 2548
2527 2549 which = 0;
2528 2550 for (unsigned int id = to_space_id; id > perm_space_id; --id) {
2529 2551 SpaceInfo* const space_info = _space_info + id;
2530 2552 MutableSpace* const space = space_info->space();
2531 2553 HeapWord* const new_top = space_info->new_top();
2532 2554
2533 2555 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2534 2556 const size_t end_region =
2535 2557 sd.addr_to_region_idx(sd.region_align_up(new_top));
2536 2558 assert(end_region > 0, "perm gen cannot be empty");
2537 2559
2538 2560 for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
2539 2561 if (sd.region(cur)->claim_unsafe()) {
2540 2562 ParCompactionManager::region_list_push(which, cur);
2541 2563
2542 2564 if (TraceParallelOldGCCompactionPhase && Verbose) {
2543 2565 const size_t count_mod_8 = fillable_regions & 7;
2544 2566 if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
2545 2567 gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
2546 2568 if (count_mod_8 == 7) gclog_or_tty->cr();
2547 2569 }
2548 2570
2549 2571 NOT_PRODUCT(++fillable_regions;)
2550 2572
2551 2573 // Assign regions to tasks in round-robin fashion.
2552 2574 if (++which == task_count) {
2553 2575 assert(which <= parallel_gc_threads,
2554 2576 "Inconsistent number of workers");
2555 2577 which = 0;
2556 2578 }
2557 2579 }
2558 2580 }
2559 2581 }
2560 2582
2561 2583 if (TraceParallelOldGCCompactionPhase) {
2562 2584 if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
2563 2585 gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions);
2564 2586 }
2565 2587 }
2566 2588
2567 2589 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2568 2590
2569 2591 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2570 2592 uint parallel_gc_threads) {
2571 2593 GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
2572 2594
2573 2595 ParallelCompactData& sd = PSParallelCompact::summary_data();
2574 2596
2575 2597 // Iterate over all the spaces adding tasks for updating
2576 2598 // regions in the dense prefix. Assume that 1 gc thread
2577 2599 // will work on opening the gaps and the remaining gc threads
2578 2600 // will work on the dense prefix.
2579 2601 unsigned int space_id;
2580 2602 for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2581 2603 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2582 2604 const MutableSpace* const space = _space_info[space_id].space();
2583 2605
2584 2606 if (dense_prefix_end == space->bottom()) {
2585 2607 // There is no dense prefix for this space.
2586 2608 continue;
2587 2609 }
2588 2610
2589 2611 // The dense prefix is before this region.
2590 2612 size_t region_index_end_dense_prefix =
2591 2613 sd.addr_to_region_idx(dense_prefix_end);
2592 2614 RegionData* const dense_prefix_cp =
2593 2615 sd.region(region_index_end_dense_prefix);
2594 2616 assert(dense_prefix_end == space->end() ||
2595 2617 dense_prefix_cp->available() ||
2596 2618 dense_prefix_cp->claimed(),
2597 2619 "The region after the dense prefix should always be ready to fill");
2598 2620
2599 2621 size_t region_index_start = sd.addr_to_region_idx(space->bottom());
2600 2622
2601 2623 // Is there dense prefix work?
2602 2624 size_t total_dense_prefix_regions =
2603 2625 region_index_end_dense_prefix - region_index_start;
2604 2626 // How many regions of the dense prefix should be given to
2605 2627 // each thread?
2606 2628 if (total_dense_prefix_regions > 0) {
2607 2629 uint tasks_for_dense_prefix = 1;
2608 2630 if (total_dense_prefix_regions <=
2609 2631 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2610 2632 // Don't over partition. This assumes that
2611 2633 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2612 2634 // so there are not many regions to process.
2613 2635 tasks_for_dense_prefix = parallel_gc_threads;
2614 2636 } else {
2615 2637 // Over partition
2616 2638 tasks_for_dense_prefix = parallel_gc_threads *
2617 2639 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2618 2640 }
2619 2641 size_t regions_per_thread = total_dense_prefix_regions /
2620 2642 tasks_for_dense_prefix;
2621 2643 // Give each thread at least 1 region.
2622 2644 if (regions_per_thread == 0) {
2623 2645 regions_per_thread = 1;
2624 2646 }
2625 2647
2626 2648 for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2627 2649 if (region_index_start >= region_index_end_dense_prefix) {
2628 2650 break;
2629 2651 }
2630 2652 // region_index_end is not processed
2631 2653 size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2632 2654 region_index_end_dense_prefix);
2633 2655 q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2634 2656 region_index_start,
2635 2657 region_index_end));
2636 2658 region_index_start = region_index_end;
2637 2659 }
2638 2660 }
2639 2661 // This gets any part of the dense prefix that did not
2640 2662 // fit evenly.
2641 2663 if (region_index_start < region_index_end_dense_prefix) {
2642 2664 q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2643 2665 region_index_start,
2644 2666 region_index_end_dense_prefix));
2645 2667 }
2646 2668 }
2647 2669 }
2648 2670
2649 2671 void PSParallelCompact::enqueue_region_stealing_tasks(
2650 2672 GCTaskQueue* q,
2651 2673 ParallelTaskTerminator* terminator_ptr,
2652 2674 uint parallel_gc_threads) {
2653 2675 GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
↓ open down ↓ |
639 lines elided |
↑ open up ↑ |
2654 2676
2655 2677 // Once a thread has drained it's stack, it should try to steal regions from
2656 2678 // other threads.
2657 2679 if (parallel_gc_threads > 1) {
2658 2680 for (uint j = 0; j < parallel_gc_threads; j++) {
2659 2681 q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2660 2682 }
2661 2683 }
2662 2684 }
2663 2685
2686 +#ifdef ASSERT
2687 +// Write a histogram of the number of times the block table was filled for a
2688 +// region.
2689 +void PSParallelCompact::write_block_fill_histogram(outputStream* const out)
2690 +{
2691 + if (!TraceParallelOldGCCompactionPhase) return;
2692 +
2693 + typedef ParallelCompactData::RegionData rd_t;
2694 + ParallelCompactData& sd = summary_data();
2695 +
2696 + for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2697 + MutableSpace* const spc = _space_info[id].space();
2698 + if (spc->bottom() != spc->top()) {
2699 + const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2700 + HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2701 + const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2702 +
2703 + size_t histo[5] = { 0, 0, 0, 0, 0 };
2704 + const size_t histo_len = sizeof(histo) / sizeof(size_t);
2705 + const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2706 +
2707 + for (const rd_t* cur = beg; cur < end; ++cur) {
2708 + ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2709 + }
2710 + out->print("%u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2711 + for (size_t i = 0; i < histo_len; ++i) {
2712 + out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2713 + histo[i], 100.0 * histo[i] / region_cnt);
2714 + }
2715 + out->cr();
2716 + }
2717 + }
2718 +}
2719 +#endif // #ifdef ASSERT
2720 +
2664 2721 void PSParallelCompact::compact() {
2665 2722 // trace("5");
2666 2723 GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
2667 2724
2668 2725 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2669 2726 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2670 2727 PSOldGen* old_gen = heap->old_gen();
2671 2728 old_gen->start_array()->reset();
2672 2729 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2673 2730 uint active_gc_threads = heap->gc_task_manager()->active_workers();
2674 2731 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2675 2732 ParallelTaskTerminator terminator(active_gc_threads, qset);
2676 2733
2677 2734 GCTaskQueue* q = GCTaskQueue::create();
2678 2735 enqueue_region_draining_tasks(q, active_gc_threads);
2679 2736 enqueue_dense_prefix_tasks(q, active_gc_threads);
2680 2737 enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2681 2738
2682 2739 {
2683 2740 GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
2684 2741
2685 2742 gc_task_manager()->execute_and_wait(q);
2686 2743
2687 2744 #ifdef ASSERT
2688 2745 // Verify that all regions have been processed before the deferred updates.
2689 2746 // Note that perm_space_id is skipped; this type of verification is not
2690 2747 // valid until the perm gen is compacted by regions.
2691 2748 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2692 2749 verify_complete(SpaceId(id));
2693 2750 }
2694 2751 #endif
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
2695 2752 }
2696 2753
2697 2754 {
2698 2755 // Update the deferred objects, if any. Any compaction manager can be used.
2699 2756 GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
2700 2757 ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2701 2758 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2702 2759 update_deferred_objects(cm, SpaceId(id));
2703 2760 }
2704 2761 }
2762 +
2763 + DEBUG_ONLY(write_block_fill_histogram(gclog_or_tty));
2705 2764 }
2706 2765
2707 2766 #ifdef ASSERT
2708 2767 void PSParallelCompact::verify_complete(SpaceId space_id) {
2709 2768 // All Regions between space bottom() to new_top() should be marked as filled
2710 2769 // and all Regions between new_top() and top() should be available (i.e.,
2711 2770 // should have been emptied).
2712 2771 ParallelCompactData& sd = summary_data();
2713 2772 SpaceInfo si = _space_info[space_id];
2714 2773 HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2715 2774 HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2716 2775 const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2717 2776 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2718 2777 const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2719 2778
2720 2779 bool issued_a_warning = false;
2721 2780
2722 2781 size_t cur_region;
2723 2782 for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2724 2783 const RegionData* const c = sd.region(cur_region);
2725 2784 if (!c->completed()) {
2726 2785 warning("region " SIZE_FORMAT " not filled: "
2727 2786 "destination_count=" SIZE_FORMAT,
2728 2787 cur_region, c->destination_count());
2729 2788 issued_a_warning = true;
2730 2789 }
2731 2790 }
2732 2791
2733 2792 for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
2734 2793 const RegionData* const c = sd.region(cur_region);
2735 2794 if (!c->available()) {
2736 2795 warning("region " SIZE_FORMAT " not empty: "
2737 2796 "destination_count=" SIZE_FORMAT,
2738 2797 cur_region, c->destination_count());
2739 2798 issued_a_warning = true;
2740 2799 }
2741 2800 }
2742 2801
2743 2802 if (issued_a_warning) {
2744 2803 print_region_ranges();
2745 2804 }
2746 2805 }
2747 2806 #endif // #ifdef ASSERT
2748 2807
2749 2808 void
2750 2809 PSParallelCompact::follow_weak_klass_links() {
2751 2810 // All klasses on the revisit stack are marked at this point.
2752 2811 // Update and follow all subklass, sibling and implementor links.
2753 2812 // Check all the stacks here even if not all the workers are active.
2754 2813 // There is no accounting which indicates which stacks might have
2755 2814 // contents to be followed.
2756 2815 if (PrintRevisitStats) {
2757 2816 gclog_or_tty->print_cr("#classes in system dictionary = %d",
2758 2817 SystemDictionary::number_of_classes());
2759 2818 }
2760 2819 for (uint i = 0; i < ParallelGCThreads + 1; i++) {
2761 2820 ParCompactionManager* cm = ParCompactionManager::manager_array(i);
2762 2821 KeepAliveClosure keep_alive_closure(cm);
2763 2822 Stack<Klass*, mtGC>* const rks = cm->revisit_klass_stack();
2764 2823 if (PrintRevisitStats) {
2765 2824 gclog_or_tty->print_cr("Revisit klass stack[%u] length = " SIZE_FORMAT,
2766 2825 i, rks->size());
2767 2826 }
2768 2827 while (!rks->is_empty()) {
2769 2828 Klass* const k = rks->pop();
2770 2829 k->follow_weak_klass_links(is_alive_closure(), &keep_alive_closure);
2771 2830 }
2772 2831
2773 2832 cm->follow_marking_stacks();
2774 2833 }
2775 2834 }
2776 2835
2777 2836 void
2778 2837 PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
2779 2838 cm->revisit_klass_stack()->push(k);
2780 2839 }
2781 2840
2782 2841 void PSParallelCompact::revisit_mdo(ParCompactionManager* cm, DataLayout* p) {
2783 2842 cm->revisit_mdo_stack()->push(p);
2784 2843 }
2785 2844
2786 2845 void PSParallelCompact::follow_mdo_weak_refs() {
2787 2846 // All strongly reachable oops have been marked at this point;
2788 2847 // we can visit and clear any weak references from MDO's which
2789 2848 // we memoized during the strong marking phase.
2790 2849 if (PrintRevisitStats) {
2791 2850 gclog_or_tty->print_cr("#classes in system dictionary = %d",
2792 2851 SystemDictionary::number_of_classes());
2793 2852 }
2794 2853 for (uint i = 0; i < ParallelGCThreads + 1; i++) {
2795 2854 ParCompactionManager* cm = ParCompactionManager::manager_array(i);
2796 2855 Stack<DataLayout*, mtGC>* rms = cm->revisit_mdo_stack();
2797 2856 if (PrintRevisitStats) {
2798 2857 gclog_or_tty->print_cr("Revisit MDO stack[%u] size = " SIZE_FORMAT,
2799 2858 i, rms->size());
2800 2859 }
2801 2860 while (!rms->is_empty()) {
2802 2861 rms->pop()->follow_weak_refs(is_alive_closure());
2803 2862 }
2804 2863
2805 2864 cm->follow_marking_stacks();
2806 2865 }
2807 2866 }
2808 2867
2809 2868
2810 2869 #ifdef VALIDATE_MARK_SWEEP
2811 2870
2812 2871 void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
2813 2872 if (!ValidateMarkSweep)
2814 2873 return;
2815 2874
2816 2875 if (!isroot) {
2817 2876 if (_pointer_tracking) {
2818 2877 guarantee(_adjusted_pointers->contains(p), "should have seen this pointer");
2819 2878 _adjusted_pointers->remove(p);
2820 2879 }
2821 2880 } else {
2822 2881 ptrdiff_t index = _root_refs_stack->find(p);
2823 2882 if (index != -1) {
2824 2883 int l = _root_refs_stack->length();
2825 2884 if (l > 0 && l - 1 != index) {
2826 2885 void* last = _root_refs_stack->pop();
2827 2886 assert(last != p, "should be different");
2828 2887 _root_refs_stack->at_put(index, last);
2829 2888 } else {
2830 2889 _root_refs_stack->remove(p);
2831 2890 }
2832 2891 }
2833 2892 }
2834 2893 }
2835 2894
2836 2895
2837 2896 void PSParallelCompact::check_adjust_pointer(void* p) {
2838 2897 _adjusted_pointers->push(p);
2839 2898 }
2840 2899
2841 2900
2842 2901 class AdjusterTracker: public OopClosure {
2843 2902 public:
2844 2903 AdjusterTracker() {};
2845 2904 void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
2846 2905 void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); }
2847 2906 };
2848 2907
2849 2908
2850 2909 void PSParallelCompact::track_interior_pointers(oop obj) {
2851 2910 if (ValidateMarkSweep) {
2852 2911 _adjusted_pointers->clear();
2853 2912 _pointer_tracking = true;
2854 2913
2855 2914 AdjusterTracker checker;
2856 2915 obj->oop_iterate(&checker);
2857 2916 }
2858 2917 }
2859 2918
2860 2919
2861 2920 void PSParallelCompact::check_interior_pointers() {
2862 2921 if (ValidateMarkSweep) {
2863 2922 _pointer_tracking = false;
2864 2923 guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers");
2865 2924 }
2866 2925 }
2867 2926
2868 2927
2869 2928 void PSParallelCompact::reset_live_oop_tracking(bool at_perm) {
2870 2929 if (ValidateMarkSweep) {
2871 2930 guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
2872 2931 _live_oops_index = at_perm ? _live_oops_index_at_perm : 0;
2873 2932 }
2874 2933 }
2875 2934
2876 2935
2877 2936 void PSParallelCompact::register_live_oop(oop p, size_t size) {
2878 2937 if (ValidateMarkSweep) {
2879 2938 _live_oops->push(p);
2880 2939 _live_oops_size->push(size);
2881 2940 _live_oops_index++;
2882 2941 }
2883 2942 }
2884 2943
2885 2944 void PSParallelCompact::validate_live_oop(oop p, size_t size) {
2886 2945 if (ValidateMarkSweep) {
2887 2946 oop obj = _live_oops->at((int)_live_oops_index);
2888 2947 guarantee(obj == p, "should be the same object");
2889 2948 guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size");
2890 2949 _live_oops_index++;
2891 2950 }
2892 2951 }
2893 2952
2894 2953 void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size,
2895 2954 HeapWord* compaction_top) {
2896 2955 assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top),
2897 2956 "should be moved to forwarded location");
2898 2957 if (ValidateMarkSweep) {
2899 2958 PSParallelCompact::validate_live_oop(oop(q), size);
2900 2959 _live_oops_moved_to->push(oop(compaction_top));
2901 2960 }
2902 2961 if (RecordMarkSweepCompaction) {
2903 2962 _cur_gc_live_oops->push(q);
2904 2963 _cur_gc_live_oops_moved_to->push(compaction_top);
2905 2964 _cur_gc_live_oops_size->push(size);
2906 2965 }
2907 2966 }
2908 2967
2909 2968
2910 2969 void PSParallelCompact::compaction_complete() {
2911 2970 if (RecordMarkSweepCompaction) {
2912 2971 GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops;
2913 2972 GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to;
2914 2973 GrowableArray<size_t> * _tmp_live_oops_size = _cur_gc_live_oops_size;
2915 2974
2916 2975 _cur_gc_live_oops = _last_gc_live_oops;
2917 2976 _cur_gc_live_oops_moved_to = _last_gc_live_oops_moved_to;
2918 2977 _cur_gc_live_oops_size = _last_gc_live_oops_size;
2919 2978 _last_gc_live_oops = _tmp_live_oops;
2920 2979 _last_gc_live_oops_moved_to = _tmp_live_oops_moved_to;
2921 2980 _last_gc_live_oops_size = _tmp_live_oops_size;
2922 2981 }
2923 2982 }
2924 2983
2925 2984
2926 2985 void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
2927 2986 if (!RecordMarkSweepCompaction) {
2928 2987 tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
2929 2988 return;
2930 2989 }
2931 2990
2932 2991 if (_last_gc_live_oops == NULL) {
2933 2992 tty->print_cr("No compaction information gathered yet");
2934 2993 return;
2935 2994 }
2936 2995
2937 2996 for (int i = 0; i < _last_gc_live_oops->length(); i++) {
2938 2997 HeapWord* old_oop = _last_gc_live_oops->at(i);
2939 2998 size_t sz = _last_gc_live_oops_size->at(i);
2940 2999 if (old_oop <= q && q < (old_oop + sz)) {
2941 3000 HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
2942 3001 size_t offset = (q - old_oop);
2943 3002 tty->print_cr("Address " PTR_FORMAT, q);
2944 3003 tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
2945 3004 tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
2946 3005 return;
2947 3006 }
2948 3007 }
2949 3008
2950 3009 tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q);
2951 3010 }
2952 3011 #endif //VALIDATE_MARK_SWEEP
2953 3012
2954 3013 // Update interior oops in the ranges of regions [beg_region, end_region).
2955 3014 void
2956 3015 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2957 3016 SpaceId space_id,
2958 3017 size_t beg_region,
2959 3018 size_t end_region) {
2960 3019 ParallelCompactData& sd = summary_data();
2961 3020 ParMarkBitMap* const mbm = mark_bitmap();
2962 3021
2963 3022 HeapWord* beg_addr = sd.region_to_addr(beg_region);
2964 3023 HeapWord* const end_addr = sd.region_to_addr(end_region);
2965 3024 assert(beg_region <= end_region, "bad region range");
2966 3025 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2967 3026
2968 3027 #ifdef ASSERT
2969 3028 // Claim the regions to avoid triggering an assert when they are marked as
2970 3029 // filled.
2971 3030 for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
2972 3031 assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
2973 3032 }
2974 3033 #endif // #ifdef ASSERT
2975 3034
2976 3035 if (beg_addr != space(space_id)->bottom()) {
2977 3036 // Find the first live object or block of dead space that *starts* in this
2978 3037 // range of regions. If a partial object crosses onto the region, skip it;
2979 3038 // it will be marked for 'deferred update' when the object head is
2980 3039 // processed. If dead space crosses onto the region, it is also skipped; it
2981 3040 // will be filled when the prior region is processed. If neither of those
2982 3041 // apply, the first word in the region is the start of a live object or dead
2983 3042 // space.
2984 3043 assert(beg_addr > space(space_id)->bottom(), "sanity");
2985 3044 const RegionData* const cp = sd.region(beg_region);
2986 3045 if (cp->partial_obj_size() != 0) {
2987 3046 beg_addr = sd.partial_obj_end(beg_region);
2988 3047 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2989 3048 beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2990 3049 }
2991 3050 }
2992 3051
2993 3052 if (beg_addr < end_addr) {
2994 3053 // A live object or block of dead space starts in this range of Regions.
2995 3054 HeapWord* const dense_prefix_end = dense_prefix(space_id);
2996 3055
2997 3056 // Create closures and iterate.
2998 3057 UpdateOnlyClosure update_closure(mbm, cm, space_id);
2999 3058 FillClosure fill_closure(cm, space_id);
3000 3059 ParMarkBitMap::IterationStatus status;
3001 3060 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
3002 3061 dense_prefix_end);
3003 3062 if (status == ParMarkBitMap::incomplete) {
3004 3063 update_closure.do_addr(update_closure.source());
3005 3064 }
3006 3065 }
3007 3066
3008 3067 // Mark the regions as filled.
3009 3068 RegionData* const beg_cp = sd.region(beg_region);
3010 3069 RegionData* const end_cp = sd.region(end_region);
3011 3070 for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
3012 3071 cp->set_completed();
3013 3072 }
3014 3073 }
3015 3074
3016 3075 // Return the SpaceId for the space containing addr. If addr is not in the
3017 3076 // heap, last_space_id is returned. In debug mode it expects the address to be
3018 3077 // in the heap and asserts such.
3019 3078 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
3020 3079 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
3021 3080
3022 3081 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
3023 3082 if (_space_info[id].space()->contains(addr)) {
3024 3083 return SpaceId(id);
3025 3084 }
3026 3085 }
3027 3086
3028 3087 assert(false, "no space contains the addr");
3029 3088 return last_space_id;
3030 3089 }
3031 3090
3032 3091 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
3033 3092 SpaceId id) {
3034 3093 assert(id < last_space_id, "bad space id");
3035 3094
3036 3095 ParallelCompactData& sd = summary_data();
3037 3096 const SpaceInfo* const space_info = _space_info + id;
3038 3097 ObjectStartArray* const start_array = space_info->start_array();
3039 3098
3040 3099 const MutableSpace* const space = space_info->space();
3041 3100 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
3042 3101 HeapWord* const beg_addr = space_info->dense_prefix();
3043 3102 HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
3044 3103
3045 3104 const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
3046 3105 const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
3047 3106 const RegionData* cur_region;
3048 3107 for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
3049 3108 HeapWord* const addr = cur_region->deferred_obj_addr();
3050 3109 if (addr != NULL) {
3051 3110 if (start_array != NULL) {
3052 3111 start_array->allocate_block(addr);
3053 3112 }
3054 3113 oop(addr)->update_contents(cm);
3055 3114 assert(oop(addr)->is_oop_or_null(), "should be an oop now");
3056 3115 }
3057 3116 }
3058 3117 }
3059 3118
3060 3119 // Skip over count live words starting from beg, and return the address of the
3061 3120 // next live word. Unless marked, the word corresponding to beg is assumed to
3062 3121 // be dead. Callers must either ensure beg does not correspond to the middle of
3063 3122 // an object, or account for those live words in some other way. Callers must
3064 3123 // also ensure that there are enough live words in the range [beg, end) to skip.
3065 3124 HeapWord*
3066 3125 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
3067 3126 {
3068 3127 assert(count > 0, "sanity");
3069 3128
3070 3129 ParMarkBitMap* m = mark_bitmap();
3071 3130 idx_t bits_to_skip = m->words_to_bits(count);
3072 3131 idx_t cur_beg = m->addr_to_bit(beg);
3073 3132 const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
3074 3133
3075 3134 do {
3076 3135 cur_beg = m->find_obj_beg(cur_beg, search_end);
3077 3136 idx_t cur_end = m->find_obj_end(cur_beg, search_end);
3078 3137 const size_t obj_bits = cur_end - cur_beg + 1;
3079 3138 if (obj_bits > bits_to_skip) {
3080 3139 return m->bit_to_addr(cur_beg + bits_to_skip);
3081 3140 }
3082 3141 bits_to_skip -= obj_bits;
3083 3142 cur_beg = cur_end + 1;
3084 3143 } while (bits_to_skip > 0);
3085 3144
3086 3145 // Skipping the desired number of words landed just past the end of an object.
3087 3146 // Find the start of the next object.
3088 3147 cur_beg = m->find_obj_beg(cur_beg, search_end);
3089 3148 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
3090 3149 return m->bit_to_addr(cur_beg);
3091 3150 }
3092 3151
3093 3152 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
3094 3153 SpaceId src_space_id,
3095 3154 size_t src_region_idx)
3096 3155 {
3097 3156 assert(summary_data().is_region_aligned(dest_addr), "not aligned");
3098 3157
3099 3158 const SplitInfo& split_info = _space_info[src_space_id].split_info();
3100 3159 if (split_info.dest_region_addr() == dest_addr) {
3101 3160 // The partial object ending at the split point contains the first word to
3102 3161 // be copied to dest_addr.
3103 3162 return split_info.first_src_addr();
3104 3163 }
3105 3164
3106 3165 const ParallelCompactData& sd = summary_data();
3107 3166 ParMarkBitMap* const bitmap = mark_bitmap();
3108 3167 const size_t RegionSize = ParallelCompactData::RegionSize;
3109 3168
3110 3169 assert(sd.is_region_aligned(dest_addr), "not aligned");
3111 3170 const RegionData* const src_region_ptr = sd.region(src_region_idx);
3112 3171 const size_t partial_obj_size = src_region_ptr->partial_obj_size();
3113 3172 HeapWord* const src_region_destination = src_region_ptr->destination();
3114 3173
3115 3174 assert(dest_addr >= src_region_destination, "wrong src region");
3116 3175 assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
3117 3176
3118 3177 HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
3119 3178 HeapWord* const src_region_end = src_region_beg + RegionSize;
3120 3179
3121 3180 HeapWord* addr = src_region_beg;
3122 3181 if (dest_addr == src_region_destination) {
3123 3182 // Return the first live word in the source region.
3124 3183 if (partial_obj_size == 0) {
3125 3184 addr = bitmap->find_obj_beg(addr, src_region_end);
3126 3185 assert(addr < src_region_end, "no objects start in src region");
3127 3186 }
3128 3187 return addr;
3129 3188 }
3130 3189
3131 3190 // Must skip some live data.
3132 3191 size_t words_to_skip = dest_addr - src_region_destination;
3133 3192 assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
3134 3193
3135 3194 if (partial_obj_size >= words_to_skip) {
3136 3195 // All the live words to skip are part of the partial object.
3137 3196 addr += words_to_skip;
3138 3197 if (partial_obj_size == words_to_skip) {
3139 3198 // Find the first live word past the partial object.
3140 3199 addr = bitmap->find_obj_beg(addr, src_region_end);
3141 3200 assert(addr < src_region_end, "wrong src region");
3142 3201 }
3143 3202 return addr;
3144 3203 }
3145 3204
3146 3205 // Skip over the partial object (if any).
3147 3206 if (partial_obj_size != 0) {
3148 3207 words_to_skip -= partial_obj_size;
3149 3208 addr += partial_obj_size;
3150 3209 }
3151 3210
3152 3211 // Skip over live words due to objects that start in the region.
3153 3212 addr = skip_live_words(addr, src_region_end, words_to_skip);
3154 3213 assert(addr < src_region_end, "wrong src region");
3155 3214 return addr;
3156 3215 }
3157 3216
3158 3217 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
3159 3218 SpaceId src_space_id,
3160 3219 size_t beg_region,
3161 3220 HeapWord* end_addr)
3162 3221 {
3163 3222 ParallelCompactData& sd = summary_data();
3164 3223
3165 3224 #ifdef ASSERT
3166 3225 MutableSpace* const src_space = _space_info[src_space_id].space();
3167 3226 HeapWord* const beg_addr = sd.region_to_addr(beg_region);
3168 3227 assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
3169 3228 "src_space_id does not match beg_addr");
3170 3229 assert(src_space->contains(end_addr) || end_addr == src_space->end(),
3171 3230 "src_space_id does not match end_addr");
3172 3231 #endif // #ifdef ASSERT
3173 3232
3174 3233 RegionData* const beg = sd.region(beg_region);
3175 3234 RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
3176 3235
3177 3236 // Regions up to new_top() are enqueued if they become available.
3178 3237 HeapWord* const new_top = _space_info[src_space_id].new_top();
3179 3238 RegionData* const enqueue_end =
3180 3239 sd.addr_to_region_ptr(sd.region_align_up(new_top));
3181 3240
3182 3241 for (RegionData* cur = beg; cur < end; ++cur) {
3183 3242 assert(cur->data_size() > 0, "region must have live data");
3184 3243 cur->decrement_destination_count();
3185 3244 if (cur < enqueue_end && cur->available() && cur->claim()) {
3186 3245 cm->push_region(sd.region(cur));
3187 3246 }
3188 3247 }
3189 3248 }
3190 3249
3191 3250 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
3192 3251 SpaceId& src_space_id,
3193 3252 HeapWord*& src_space_top,
3194 3253 HeapWord* end_addr)
3195 3254 {
3196 3255 typedef ParallelCompactData::RegionData RegionData;
3197 3256
3198 3257 ParallelCompactData& sd = PSParallelCompact::summary_data();
3199 3258 const size_t region_size = ParallelCompactData::RegionSize;
3200 3259
3201 3260 size_t src_region_idx = 0;
3202 3261
3203 3262 // Skip empty regions (if any) up to the top of the space.
3204 3263 HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
3205 3264 RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
3206 3265 HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
3207 3266 const RegionData* const top_region_ptr =
3208 3267 sd.addr_to_region_ptr(top_aligned_up);
3209 3268 while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
3210 3269 ++src_region_ptr;
3211 3270 }
3212 3271
3213 3272 if (src_region_ptr < top_region_ptr) {
3214 3273 // The next source region is in the current space. Update src_region_idx
3215 3274 // and the source address to match src_region_ptr.
3216 3275 src_region_idx = sd.region(src_region_ptr);
3217 3276 HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
3218 3277 if (src_region_addr > closure.source()) {
3219 3278 closure.set_source(src_region_addr);
3220 3279 }
3221 3280 return src_region_idx;
3222 3281 }
3223 3282
3224 3283 // Switch to a new source space and find the first non-empty region.
3225 3284 unsigned int space_id = src_space_id + 1;
3226 3285 assert(space_id < last_space_id, "not enough spaces");
3227 3286
3228 3287 HeapWord* const destination = closure.destination();
3229 3288
3230 3289 do {
3231 3290 MutableSpace* space = _space_info[space_id].space();
3232 3291 HeapWord* const bottom = space->bottom();
3233 3292 const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
3234 3293
3235 3294 // Iterate over the spaces that do not compact into themselves.
3236 3295 if (bottom_cp->destination() != bottom) {
3237 3296 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
3238 3297 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
3239 3298
3240 3299 for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
3241 3300 if (src_cp->live_obj_size() > 0) {
3242 3301 // Found it.
3243 3302 assert(src_cp->destination() == destination,
3244 3303 "first live obj in the space must match the destination");
3245 3304 assert(src_cp->partial_obj_size() == 0,
3246 3305 "a space cannot begin with a partial obj");
3247 3306
3248 3307 src_space_id = SpaceId(space_id);
3249 3308 src_space_top = space->top();
3250 3309 const size_t src_region_idx = sd.region(src_cp);
3251 3310 closure.set_source(sd.region_to_addr(src_region_idx));
3252 3311 return src_region_idx;
3253 3312 } else {
3254 3313 assert(src_cp->data_size() == 0, "sanity");
3255 3314 }
3256 3315 }
3257 3316 }
3258 3317 } while (++space_id < last_space_id);
3259 3318
3260 3319 assert(false, "no source region was found");
3261 3320 return 0;
3262 3321 }
3263 3322
3264 3323 void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
3265 3324 {
3266 3325 typedef ParMarkBitMap::IterationStatus IterationStatus;
3267 3326 const size_t RegionSize = ParallelCompactData::RegionSize;
3268 3327 ParMarkBitMap* const bitmap = mark_bitmap();
3269 3328 ParallelCompactData& sd = summary_data();
3270 3329 RegionData* const region_ptr = sd.region(region_idx);
3271 3330
3272 3331 // Get the items needed to construct the closure.
3273 3332 HeapWord* dest_addr = sd.region_to_addr(region_idx);
3274 3333 SpaceId dest_space_id = space_id(dest_addr);
3275 3334 ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
3276 3335 HeapWord* new_top = _space_info[dest_space_id].new_top();
3277 3336 assert(dest_addr < new_top, "sanity");
3278 3337 const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
3279 3338
3280 3339 // Get the source region and related info.
3281 3340 size_t src_region_idx = region_ptr->source_region();
3282 3341 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
3283 3342 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
3284 3343
3285 3344 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3286 3345 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
3287 3346
3288 3347 // Adjust src_region_idx to prepare for decrementing destination counts (the
3289 3348 // destination count is not decremented when a region is copied to itself).
3290 3349 if (src_region_idx == region_idx) {
3291 3350 src_region_idx += 1;
3292 3351 }
3293 3352
3294 3353 if (bitmap->is_unmarked(closure.source())) {
3295 3354 // The first source word is in the middle of an object; copy the remainder
3296 3355 // of the object or as much as will fit. The fact that pointer updates were
3297 3356 // deferred will be noted when the object header is processed.
3298 3357 HeapWord* const old_src_addr = closure.source();
3299 3358 closure.copy_partial_obj();
3300 3359 if (closure.is_full()) {
3301 3360 decrement_destination_counts(cm, src_space_id, src_region_idx,
3302 3361 closure.source());
3303 3362 region_ptr->set_deferred_obj_addr(NULL);
3304 3363 region_ptr->set_completed();
3305 3364 return;
3306 3365 }
3307 3366
3308 3367 HeapWord* const end_addr = sd.region_align_down(closure.source());
3309 3368 if (sd.region_align_down(old_src_addr) != end_addr) {
3310 3369 // The partial object was copied from more than one source region.
3311 3370 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
3312 3371
3313 3372 // Move to the next source region, possibly switching spaces as well. All
3314 3373 // args except end_addr may be modified.
3315 3374 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3316 3375 end_addr);
3317 3376 }
3318 3377 }
3319 3378
3320 3379 do {
3321 3380 HeapWord* const cur_addr = closure.source();
3322 3381 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
3323 3382 src_space_top);
3324 3383 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
3325 3384
3326 3385 if (status == ParMarkBitMap::incomplete) {
3327 3386 // The last obj that starts in the source region does not end in the
3328 3387 // region.
3329 3388 assert(closure.source() < end_addr, "sanity");
3330 3389 HeapWord* const obj_beg = closure.source();
3331 3390 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
3332 3391 src_space_top);
3333 3392 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
3334 3393 if (obj_end < range_end) {
3335 3394 // The end was found; the entire object will fit.
3336 3395 status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
3337 3396 assert(status != ParMarkBitMap::would_overflow, "sanity");
3338 3397 } else {
3339 3398 // The end was not found; the object will not fit.
3340 3399 assert(range_end < src_space_top, "obj cannot cross space boundary");
3341 3400 status = ParMarkBitMap::would_overflow;
3342 3401 }
3343 3402 }
3344 3403
3345 3404 if (status == ParMarkBitMap::would_overflow) {
3346 3405 // The last object did not fit. Note that interior oop updates were
3347 3406 // deferred, then copy enough of the object to fill the region.
3348 3407 region_ptr->set_deferred_obj_addr(closure.destination());
3349 3408 status = closure.copy_until_full(); // copies from closure.source()
3350 3409
3351 3410 decrement_destination_counts(cm, src_space_id, src_region_idx,
3352 3411 closure.source());
3353 3412 region_ptr->set_completed();
3354 3413 return;
3355 3414 }
3356 3415
3357 3416 if (status == ParMarkBitMap::full) {
3358 3417 decrement_destination_counts(cm, src_space_id, src_region_idx,
3359 3418 closure.source());
3360 3419 region_ptr->set_deferred_obj_addr(NULL);
3361 3420 region_ptr->set_completed();
3362 3421 return;
3363 3422 }
↓ open down ↓ |
649 lines elided |
↑ open up ↑ |
3364 3423
3365 3424 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
3366 3425
3367 3426 // Move to the next source region, possibly switching spaces as well. All
3368 3427 // args except end_addr may be modified.
3369 3428 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3370 3429 end_addr);
3371 3430 } while (true);
3372 3431 }
3373 3432
3433 +void PSParallelCompact::fill_blocks(size_t region_idx)
3434 +{
3435 + // Fill in the block table elements for the specified region. Each block
3436 + // table element holds the number of live words in the region that are to the
3437 + // left of the first object that starts in the block. Thus only blocks in
3438 + // which an object starts need to be filled.
3439 + //
3440 + // The algorithm scans the section of the bitmap that corresponds to the
3441 + // region, keeping a running total of the live words. When an object start is
3442 + // found, if it's the first to start in the block that contains it, the
3443 + // current total is written to the block table element.
3444 + const size_t Log2BlockSize = ParallelCompactData::Log2BlockSize;
3445 + const size_t Log2RegionSize = ParallelCompactData::Log2RegionSize;
3446 + const size_t RegionSize = ParallelCompactData::RegionSize;
3447 +
3448 + ParallelCompactData& sd = summary_data();
3449 + const size_t partial_obj_size = sd.region(region_idx)->partial_obj_size();
3450 + if (partial_obj_size >= RegionSize) {
3451 + return; // No objects start in this region.
3452 + }
3453 +
3454 + // Ensure the first loop iteration decides that the block has changed.
3455 + size_t cur_block = sd.block_count();
3456 +
3457 + const ParMarkBitMap* const bitmap = mark_bitmap();
3458 +
3459 + const size_t Log2BitsPerBlock = Log2BlockSize - LogMinObjAlignment;
3460 + assert((size_t)1 << Log2BitsPerBlock ==
3461 + bitmap->words_to_bits(ParallelCompactData::BlockSize), "sanity");
3462 +
3463 + size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
3464 + const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
3465 + size_t live_bits = bitmap->words_to_bits(partial_obj_size);
3466 + beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
3467 + while (beg_bit < range_end) {
3468 + const size_t new_block = beg_bit >> Log2BitsPerBlock;
3469 + if (new_block != cur_block) {
3470 + cur_block = new_block;
3471 + sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
3472 + }
3473 +
3474 + const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
3475 + if (end_bit < range_end - 1) {
3476 + live_bits += end_bit - beg_bit + 1;
3477 + beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
3478 + } else {
3479 + return;
3480 + }
3481 + }
3482 +}
3483 +
3374 3484 void
3375 3485 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
3376 3486 const MutableSpace* sp = space(space_id);
3377 3487 if (sp->is_empty()) {
3378 3488 return;
3379 3489 }
3380 3490
3381 3491 ParallelCompactData& sd = PSParallelCompact::summary_data();
3382 3492 ParMarkBitMap* const bitmap = mark_bitmap();
3383 3493 HeapWord* const dp_addr = dense_prefix(space_id);
3384 3494 HeapWord* beg_addr = sp->bottom();
3385 3495 HeapWord* end_addr = sp->top();
3386 3496
3387 3497 assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
3388 3498
3389 3499 const size_t beg_region = sd.addr_to_region_idx(beg_addr);
3390 3500 const size_t dp_region = sd.addr_to_region_idx(dp_addr);
3391 3501 if (beg_region < dp_region) {
3392 3502 update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
3393 3503 }
3394 3504
3395 3505 // The destination of the first live object that starts in the region is one
3396 3506 // past the end of the partial object entering the region (if any).
3397 3507 HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
3398 3508 HeapWord* const new_top = _space_info[space_id].new_top();
3399 3509 assert(new_top >= dest_addr, "bad new_top value");
3400 3510 const size_t words = pointer_delta(new_top, dest_addr);
3401 3511
3402 3512 if (words > 0) {
3403 3513 ObjectStartArray* start_array = _space_info[space_id].start_array();
3404 3514 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3405 3515
3406 3516 ParMarkBitMap::IterationStatus status;
3407 3517 status = bitmap->iterate(&closure, dest_addr, end_addr);
3408 3518 assert(status == ParMarkBitMap::full, "iteration not complete");
3409 3519 assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
3410 3520 "live objects skipped because closure is full");
3411 3521 }
3412 3522 }
3413 3523
3414 3524 jlong PSParallelCompact::millis_since_last_gc() {
3415 3525 // We need a monotonically non-deccreasing time in ms but
3416 3526 // os::javaTimeMillis() does not guarantee monotonicity.
3417 3527 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3418 3528 jlong ret_val = now - _time_of_last_gc;
3419 3529 // XXX See note in genCollectedHeap::millis_since_last_gc().
3420 3530 if (ret_val < 0) {
3421 3531 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);)
3422 3532 return 0;
3423 3533 }
3424 3534 return ret_val;
3425 3535 }
3426 3536
3427 3537 void PSParallelCompact::reset_millis_since_last_gc() {
3428 3538 // We need a monotonically non-deccreasing time in ms but
3429 3539 // os::javaTimeMillis() does not guarantee monotonicity.
3430 3540 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3431 3541 }
3432 3542
3433 3543 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3434 3544 {
3435 3545 if (source() != destination()) {
3436 3546 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3437 3547 Copy::aligned_conjoint_words(source(), destination(), words_remaining());
3438 3548 }
3439 3549 update_state(words_remaining());
3440 3550 assert(is_full(), "sanity");
3441 3551 return ParMarkBitMap::full;
3442 3552 }
3443 3553
3444 3554 void MoveAndUpdateClosure::copy_partial_obj()
3445 3555 {
3446 3556 size_t words = words_remaining();
3447 3557
3448 3558 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3449 3559 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3450 3560 if (end_addr < range_end) {
3451 3561 words = bitmap()->obj_size(source(), end_addr);
3452 3562 }
3453 3563
3454 3564 // This test is necessary; if omitted, the pointer updates to a partial object
3455 3565 // that crosses the dense prefix boundary could be overwritten.
3456 3566 if (source() != destination()) {
3457 3567 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3458 3568 Copy::aligned_conjoint_words(source(), destination(), words);
3459 3569 }
3460 3570 update_state(words);
3461 3571 }
3462 3572
3463 3573 ParMarkBitMapClosure::IterationStatus
3464 3574 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3465 3575 assert(destination() != NULL, "sanity");
3466 3576 assert(bitmap()->obj_size(addr) == words, "bad size");
3467 3577
3468 3578 _source = addr;
3469 3579 assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
3470 3580 destination(), "wrong destination");
3471 3581
3472 3582 if (words > words_remaining()) {
3473 3583 return ParMarkBitMap::would_overflow;
3474 3584 }
3475 3585
3476 3586 // The start_array must be updated even if the object is not moving.
3477 3587 if (_start_array != NULL) {
3478 3588 _start_array->allocate_block(destination());
3479 3589 }
3480 3590
3481 3591 if (destination() != source()) {
3482 3592 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3483 3593 Copy::aligned_conjoint_words(source(), destination(), words);
3484 3594 }
3485 3595
3486 3596 oop moved_oop = (oop) destination();
3487 3597 moved_oop->update_contents(compaction_manager());
3488 3598 assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
3489 3599
3490 3600 update_state(words);
3491 3601 assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
3492 3602 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3493 3603 }
3494 3604
3495 3605 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3496 3606 ParCompactionManager* cm,
3497 3607 PSParallelCompact::SpaceId space_id) :
3498 3608 ParMarkBitMapClosure(mbm, cm),
3499 3609 _space_id(space_id),
3500 3610 _start_array(PSParallelCompact::start_array(space_id))
3501 3611 {
3502 3612 }
3503 3613
3504 3614 // Updates the references in the object to their new values.
3505 3615 ParMarkBitMapClosure::IterationStatus
3506 3616 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3507 3617 do_addr(addr);
3508 3618 return ParMarkBitMap::incomplete;
3509 3619 }
3510 3620
3511 3621 // Prepare for compaction. This method is executed once
3512 3622 // (i.e., by a single thread) before compaction.
3513 3623 // Save the updated location of the intArrayKlassObj for
3514 3624 // filling holes in the dense prefix.
3515 3625 void PSParallelCompact::compact_prologue() {
3516 3626 _updated_int_array_klass_obj = (klassOop)
3517 3627 summary_data().calc_new_pointer(Universe::intArrayKlassObj());
3518 3628 }
↓ open down ↓ |
135 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX