Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
+++ new/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
27 27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
28 28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
29 29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
30 30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
31 31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
32 32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
33 33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
34 34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
35 35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
36 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
37 37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
38 38 #include "gc_implementation/shared/gcHeapSummary.hpp"
39 39 #include "gc_implementation/shared/gcWhen.hpp"
40 40 #include "memory/gcLocker.inline.hpp"
41 41 #include "oops/oop.inline.hpp"
42 42 #include "runtime/handles.inline.hpp"
43 43 #include "runtime/java.hpp"
44 44 #include "runtime/vmThread.hpp"
45 45 #include "services/memTracker.hpp"
46 46 #include "utilities/vmError.hpp"
47 47
48 48 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
49 49 PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
50 50 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL;
51 51 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
52 52 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
53 53 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
54 54 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
55 55
56 56 static void trace_gen_sizes(const char* const str,
57 57 size_t pg_min, size_t pg_max,
58 58 size_t og_min, size_t og_max,
59 59 size_t yg_min, size_t yg_max)
60 60 {
61 61 if (TracePageSizes) {
62 62 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " "
63 63 SIZE_FORMAT "," SIZE_FORMAT " "
64 64 SIZE_FORMAT "," SIZE_FORMAT " "
65 65 SIZE_FORMAT,
66 66 str, pg_min / K, pg_max / K,
67 67 og_min / K, og_max / K,
68 68 yg_min / K, yg_max / K,
69 69 (pg_max + og_max + yg_max) / K);
70 70 }
71 71 }
72 72
73 73 jint ParallelScavengeHeap::initialize() {
74 74 CollectedHeap::pre_initialize();
75 75
76 76 // Cannot be initialized until after the flags are parsed
77 77 // GenerationSizer flag_parser;
78 78 _collector_policy = new GenerationSizer();
79 79
80 80 size_t yg_min_size = _collector_policy->min_young_gen_size();
81 81 size_t yg_max_size = _collector_policy->max_young_gen_size();
82 82 size_t og_min_size = _collector_policy->min_old_gen_size();
83 83 size_t og_max_size = _collector_policy->max_old_gen_size();
84 84 // Why isn't there a min_perm_gen_size()?
85 85 size_t pg_min_size = _collector_policy->perm_gen_size();
86 86 size_t pg_max_size = _collector_policy->max_perm_gen_size();
87 87
88 88 trace_gen_sizes("ps heap raw",
89 89 pg_min_size, pg_max_size,
90 90 og_min_size, og_max_size,
91 91 yg_min_size, yg_max_size);
92 92
93 93 // The ReservedSpace ctor used below requires that the page size for the perm
94 94 // gen is <= the page size for the rest of the heap (young + old gens).
95 95 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
96 96 yg_max_size + og_max_size,
97 97 8);
98 98 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
99 99 pg_max_size, 16),
100 100 og_page_sz);
101 101
102 102 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz);
103 103 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz);
104 104 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
105 105
106 106 // Update sizes to reflect the selected page size(s).
107 107 //
108 108 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it
109 109 // should check UseAdaptiveSizePolicy. Changes from generationSizer could
110 110 // move to the common code.
111 111 yg_min_size = align_size_up(yg_min_size, yg_align);
112 112 yg_max_size = align_size_up(yg_max_size, yg_align);
113 113 size_t yg_cur_size =
114 114 align_size_up(_collector_policy->young_gen_size(), yg_align);
115 115 yg_cur_size = MAX2(yg_cur_size, yg_min_size);
116 116
117 117 og_min_size = align_size_up(og_min_size, og_align);
118 118 // Align old gen size down to preserve specified heap size.
119 119 assert(og_align == yg_align, "sanity");
120 120 og_max_size = align_size_down(og_max_size, og_align);
121 121 og_max_size = MAX2(og_max_size, og_min_size);
122 122 size_t og_cur_size =
123 123 align_size_down(_collector_policy->old_gen_size(), og_align);
124 124 og_cur_size = MAX2(og_cur_size, og_min_size);
↓ open down ↓ |
124 lines elided |
↑ open up ↑ |
125 125
126 126 pg_min_size = align_size_up(pg_min_size, pg_align);
127 127 pg_max_size = align_size_up(pg_max_size, pg_align);
128 128 size_t pg_cur_size = pg_min_size;
129 129
130 130 trace_gen_sizes("ps heap rnd",
131 131 pg_min_size, pg_max_size,
132 132 og_min_size, og_max_size,
133 133 yg_min_size, yg_max_size);
134 134
135 - const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
135 + size_t total_reserved = 0;
136 +
137 + total_reserved = add_and_check_overflow(total_reserved, pg_max_size);
138 + total_reserved = add_and_check_overflow(total_reserved, og_max_size);
139 + total_reserved = add_and_check_overflow(total_reserved, yg_max_size);
140 +
136 141 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
137 142
138 143 // The main part of the heap (old gen + young gen) can often use a larger page
139 144 // size than is needed or wanted for the perm gen. Use the "compound
140 145 // alignment" ReservedSpace ctor to avoid having to use the same page size for
141 146 // all gens.
142 147
143 148 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
144 149 og_align, addr);
145 150
146 151 if (UseCompressedOops) {
147 152 if (addr != NULL && !heap_rs.is_reserved()) {
148 153 // Failed to reserve at specified address - the requested memory
149 154 // region is taken already, for example, by 'java' launcher.
150 155 // Try again to reserver heap higher.
151 156 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
152 157 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
153 158 og_align, addr);
154 159 if (addr != NULL && !heap_rs0.is_reserved()) {
155 160 // Failed to reserve at specified address again - give up.
156 161 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
157 162 assert(addr == NULL, "");
158 163 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
159 164 og_align, addr);
160 165 heap_rs = heap_rs1;
161 166 } else {
162 167 heap_rs = heap_rs0;
163 168 }
164 169 }
165 170 }
166 171
167 172 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
168 173
169 174 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
170 175 heap_rs.base(), pg_max_size);
171 176 os::trace_page_sizes("ps main", og_min_size + yg_min_size,
172 177 og_max_size + yg_max_size, og_page_sz,
173 178 heap_rs.base() + pg_max_size,
174 179 heap_rs.size() - pg_max_size);
175 180 if (!heap_rs.is_reserved()) {
176 181 vm_shutdown_during_initialization(
177 182 "Could not reserve enough space for object heap");
178 183 return JNI_ENOMEM;
179 184 }
180 185
181 186 _reserved = MemRegion((HeapWord*)heap_rs.base(),
182 187 (HeapWord*)(heap_rs.base() + heap_rs.size()));
183 188
184 189 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
185 190 _barrier_set = barrier_set;
186 191 oopDesc::set_bs(_barrier_set);
187 192 if (_barrier_set == NULL) {
188 193 vm_shutdown_during_initialization(
189 194 "Could not reserve enough space for barrier set");
190 195 return JNI_ENOMEM;
191 196 }
192 197
193 198 // Initial young gen size is 4 Mb
194 199 //
195 200 // XXX - what about flag_parser.young_gen_size()?
196 201 const size_t init_young_size = align_size_up(4 * M, yg_align);
197 202 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
198 203
199 204 // Split the reserved space into perm gen and the main heap (everything else).
200 205 // The main heap uses a different alignment.
201 206 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
202 207 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);
203 208
204 209 // Make up the generations
205 210 // Calculate the maximum size that a generation can grow. This
206 211 // includes growth into the other generation. Note that the
207 212 // parameter _max_gen_size is kept as the maximum
208 213 // size of the generation as the boundaries currently stand.
209 214 // _max_gen_size is still used as that value.
210 215 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
211 216 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
212 217
213 218 _gens = new AdjoiningGenerations(main_rs,
214 219 og_cur_size,
215 220 og_min_size,
216 221 og_max_size,
217 222 yg_cur_size,
218 223 yg_min_size,
219 224 yg_max_size,
220 225 yg_align);
221 226
222 227 _old_gen = _gens->old_gen();
223 228 _young_gen = _gens->young_gen();
224 229
225 230 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
226 231 const size_t old_capacity = _old_gen->capacity_in_bytes();
227 232 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
228 233 _size_policy =
229 234 new PSAdaptiveSizePolicy(eden_capacity,
230 235 initial_promo_size,
231 236 young_gen()->to_space()->capacity_in_bytes(),
232 237 intra_heap_alignment(),
233 238 max_gc_pause_sec,
234 239 max_gc_minor_pause_sec,
235 240 GCTimeRatio
236 241 );
237 242
238 243 _perm_gen = new PSPermGen(perm_rs,
239 244 pg_align,
240 245 pg_cur_size,
241 246 pg_cur_size,
242 247 pg_max_size,
243 248 "perm", 2);
244 249
245 250 assert(!UseAdaptiveGCBoundary ||
246 251 (old_gen()->virtual_space()->high_boundary() ==
247 252 young_gen()->virtual_space()->low_boundary()),
248 253 "Boundaries must meet");
249 254 // initialize the policy counters - 2 collectors, 3 generations
250 255 _gc_policy_counters =
251 256 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
252 257 _psh = this;
253 258
254 259 // Set up the GCTaskManager
255 260 _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
256 261
257 262 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
258 263 return JNI_ENOMEM;
259 264 }
260 265
261 266 return JNI_OK;
262 267 }
263 268
264 269 void ParallelScavengeHeap::post_initialize() {
265 270 // Need to init the tenuring threshold
266 271 PSScavenge::initialize();
267 272 if (UseParallelOldGC) {
268 273 PSParallelCompact::post_initialize();
269 274 } else {
270 275 PSMarkSweep::initialize();
271 276 }
272 277 PSPromotionManager::initialize();
273 278 }
274 279
275 280 void ParallelScavengeHeap::update_counters() {
276 281 young_gen()->update_counters();
277 282 old_gen()->update_counters();
278 283 perm_gen()->update_counters();
279 284 }
280 285
281 286 size_t ParallelScavengeHeap::capacity() const {
282 287 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
283 288 return value;
284 289 }
285 290
286 291 size_t ParallelScavengeHeap::used() const {
287 292 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
288 293 return value;
289 294 }
290 295
291 296 bool ParallelScavengeHeap::is_maximal_no_gc() const {
292 297 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
293 298 }
294 299
295 300
296 301 size_t ParallelScavengeHeap::permanent_capacity() const {
297 302 return perm_gen()->capacity_in_bytes();
298 303 }
299 304
300 305 size_t ParallelScavengeHeap::permanent_used() const {
301 306 return perm_gen()->used_in_bytes();
302 307 }
303 308
304 309 size_t ParallelScavengeHeap::max_capacity() const {
305 310 size_t estimated = reserved_region().byte_size();
306 311 estimated -= perm_gen()->reserved().byte_size();
307 312 if (UseAdaptiveSizePolicy) {
308 313 estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
309 314 } else {
310 315 estimated -= young_gen()->to_space()->capacity_in_bytes();
311 316 }
312 317 return MAX2(estimated, capacity());
313 318 }
314 319
315 320 bool ParallelScavengeHeap::is_in(const void* p) const {
316 321 if (young_gen()->is_in(p)) {
317 322 return true;
318 323 }
319 324
320 325 if (old_gen()->is_in(p)) {
321 326 return true;
322 327 }
323 328
324 329 if (perm_gen()->is_in(p)) {
325 330 return true;
326 331 }
327 332
328 333 return false;
329 334 }
330 335
331 336 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
332 337 if (young_gen()->is_in_reserved(p)) {
333 338 return true;
334 339 }
335 340
336 341 if (old_gen()->is_in_reserved(p)) {
337 342 return true;
338 343 }
339 344
340 345 if (perm_gen()->is_in_reserved(p)) {
341 346 return true;
342 347 }
343 348
344 349 return false;
345 350 }
346 351
347 352 bool ParallelScavengeHeap::is_scavengable(const void* addr) {
348 353 return is_in_young((oop)addr);
349 354 }
350 355
351 356 #ifdef ASSERT
352 357 // Don't implement this by using is_in_young(). This method is used
353 358 // in some cases to check that is_in_young() is correct.
354 359 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
355 360 assert(is_in_reserved(p) || p == NULL,
356 361 "Does not work if address is non-null and outside of the heap");
357 362 // The order of the generations is perm (low addr), old, young (high addr)
358 363 return p >= old_gen()->reserved().end();
359 364 }
360 365 #endif
361 366
362 367 // There are two levels of allocation policy here.
363 368 //
364 369 // When an allocation request fails, the requesting thread must invoke a VM
365 370 // operation, transfer control to the VM thread, and await the results of a
366 371 // garbage collection. That is quite expensive, and we should avoid doing it
367 372 // multiple times if possible.
368 373 //
369 374 // To accomplish this, we have a basic allocation policy, and also a
370 375 // failed allocation policy.
371 376 //
372 377 // The basic allocation policy controls how you allocate memory without
373 378 // attempting garbage collection. It is okay to grab locks and
374 379 // expand the heap, if that can be done without coming to a safepoint.
375 380 // It is likely that the basic allocation policy will not be very
376 381 // aggressive.
377 382 //
378 383 // The failed allocation policy is invoked from the VM thread after
379 384 // the basic allocation policy is unable to satisfy a mem_allocate
380 385 // request. This policy needs to cover the entire range of collection,
381 386 // heap expansion, and out-of-memory conditions. It should make every
382 387 // attempt to allocate the requested memory.
383 388
384 389 // Basic allocation policy. Should never be called at a safepoint, or
385 390 // from the VM thread.
386 391 //
387 392 // This method must handle cases where many mem_allocate requests fail
388 393 // simultaneously. When that happens, only one VM operation will succeed,
389 394 // and the rest will not be executed. For that reason, this method loops
390 395 // during failed allocation attempts. If the java heap becomes exhausted,
391 396 // we rely on the size_policy object to force a bail out.
392 397 HeapWord* ParallelScavengeHeap::mem_allocate(
393 398 size_t size,
394 399 bool* gc_overhead_limit_was_exceeded) {
395 400 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
396 401 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
397 402 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
398 403
399 404 // In general gc_overhead_limit_was_exceeded should be false so
400 405 // set it so here and reset it to true only if the gc time
401 406 // limit is being exceeded as checked below.
402 407 *gc_overhead_limit_was_exceeded = false;
403 408
404 409 HeapWord* result = young_gen()->allocate(size);
405 410
406 411 uint loop_count = 0;
407 412 uint gc_count = 0;
408 413
409 414 while (result == NULL) {
410 415 // We don't want to have multiple collections for a single filled generation.
411 416 // To prevent this, each thread tracks the total_collections() value, and if
412 417 // the count has changed, does not do a new collection.
413 418 //
414 419 // The collection count must be read only while holding the heap lock. VM
415 420 // operations also hold the heap lock during collections. There is a lock
416 421 // contention case where thread A blocks waiting on the Heap_lock, while
417 422 // thread B is holding it doing a collection. When thread A gets the lock,
418 423 // the collection count has already changed. To prevent duplicate collections,
419 424 // The policy MUST attempt allocations during the same period it reads the
420 425 // total_collections() value!
421 426 {
422 427 MutexLocker ml(Heap_lock);
423 428 gc_count = Universe::heap()->total_collections();
424 429
425 430 result = young_gen()->allocate(size);
426 431 if (result != NULL) {
427 432 return result;
428 433 }
429 434
430 435 // If certain conditions hold, try allocating from the old gen.
431 436 result = mem_allocate_old_gen(size);
432 437 if (result != NULL) {
433 438 return result;
434 439 }
435 440
436 441 // Failed to allocate without a gc.
437 442 if (GC_locker::is_active_and_needs_gc()) {
438 443 // If this thread is not in a jni critical section, we stall
439 444 // the requestor until the critical section has cleared and
440 445 // GC allowed. When the critical section clears, a GC is
441 446 // initiated by the last thread exiting the critical section; so
442 447 // we retry the allocation sequence from the beginning of the loop,
443 448 // rather than causing more, now probably unnecessary, GC attempts.
444 449 JavaThread* jthr = JavaThread::current();
445 450 if (!jthr->in_critical()) {
446 451 MutexUnlocker mul(Heap_lock);
447 452 GC_locker::stall_until_clear();
448 453 continue;
449 454 } else {
450 455 if (CheckJNICalls) {
451 456 fatal("Possible deadlock due to allocating while"
452 457 " in jni critical section");
453 458 }
454 459 return NULL;
455 460 }
456 461 }
457 462 }
458 463
459 464 if (result == NULL) {
460 465 // Generate a VM operation
461 466 VM_ParallelGCFailedAllocation op(size, gc_count);
462 467 VMThread::execute(&op);
463 468
464 469 // Did the VM operation execute? If so, return the result directly.
465 470 // This prevents us from looping until time out on requests that can
466 471 // not be satisfied.
467 472 if (op.prologue_succeeded()) {
468 473 assert(Universe::heap()->is_in_or_null(op.result()),
469 474 "result not in heap");
470 475
471 476 // If GC was locked out during VM operation then retry allocation
472 477 // and/or stall as necessary.
473 478 if (op.gc_locked()) {
474 479 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
475 480 continue; // retry and/or stall as necessary
476 481 }
477 482
478 483 // Exit the loop if the gc time limit has been exceeded.
479 484 // The allocation must have failed above ("result" guarding
480 485 // this path is NULL) and the most recent collection has exceeded the
481 486 // gc overhead limit (although enough may have been collected to
482 487 // satisfy the allocation). Exit the loop so that an out-of-memory
483 488 // will be thrown (return a NULL ignoring the contents of
484 489 // op.result()),
485 490 // but clear gc_overhead_limit_exceeded so that the next collection
486 491 // starts with a clean slate (i.e., forgets about previous overhead
487 492 // excesses). Fill op.result() with a filler object so that the
488 493 // heap remains parsable.
489 494 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
490 495 const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
491 496 assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
492 497 if (limit_exceeded && softrefs_clear) {
493 498 *gc_overhead_limit_was_exceeded = true;
494 499 size_policy()->set_gc_overhead_limit_exceeded(false);
495 500 if (PrintGCDetails && Verbose) {
496 501 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
497 502 "return NULL because gc_overhead_limit_exceeded is set");
498 503 }
499 504 if (op.result() != NULL) {
500 505 CollectedHeap::fill_with_object(op.result(), size);
501 506 }
502 507 return NULL;
503 508 }
504 509
505 510 return op.result();
506 511 }
507 512 }
508 513
509 514 // The policy object will prevent us from looping forever. If the
510 515 // time spent in gc crosses a threshold, we will bail out.
511 516 loop_count++;
512 517 if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
513 518 (loop_count % QueuedAllocationWarningCount == 0)) {
514 519 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
515 520 " size=%d", loop_count, size);
516 521 }
517 522 }
518 523
519 524 return result;
520 525 }
521 526
522 527 // A "death march" is a series of ultra-slow allocations in which a full gc is
523 528 // done before each allocation, and after the full gc the allocation still
524 529 // cannot be satisfied from the young gen. This routine detects that condition;
525 530 // it should be called after a full gc has been done and the allocation
526 531 // attempted from the young gen. The parameter 'addr' should be the result of
527 532 // that young gen allocation attempt.
528 533 void
529 534 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
530 535 if (addr != NULL) {
531 536 _death_march_count = 0; // death march has ended
532 537 } else if (_death_march_count == 0) {
533 538 if (should_alloc_in_eden(size)) {
534 539 _death_march_count = 1; // death march has started
535 540 }
536 541 }
537 542 }
538 543
539 544 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
540 545 if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
541 546 // Size is too big for eden, or gc is locked out.
542 547 return old_gen()->allocate(size);
543 548 }
544 549
545 550 // If a "death march" is in progress, allocate from the old gen a limited
546 551 // number of times before doing a GC.
547 552 if (_death_march_count > 0) {
548 553 if (_death_march_count < 64) {
549 554 ++_death_march_count;
550 555 return old_gen()->allocate(size);
551 556 } else {
552 557 _death_march_count = 0;
553 558 }
554 559 }
555 560 return NULL;
556 561 }
557 562
558 563 // Failed allocation policy. Must be called from the VM thread, and
559 564 // only at a safepoint! Note that this method has policy for allocation
560 565 // flow, and NOT collection policy. So we do not check for gc collection
561 566 // time over limit here, that is the responsibility of the heap specific
562 567 // collection methods. This method decides where to attempt allocations,
563 568 // and when to attempt collections, but no collection specific policy.
564 569 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
565 570 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
566 571 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
567 572 assert(!Universe::heap()->is_gc_active(), "not reentrant");
568 573 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
569 574
570 575 // We assume that allocation in eden will fail unless we collect.
571 576
572 577 // First level allocation failure, scavenge and allocate in young gen.
573 578 GCCauseSetter gccs(this, GCCause::_allocation_failure);
574 579 const bool invoked_full_gc = PSScavenge::invoke();
575 580 HeapWord* result = young_gen()->allocate(size);
576 581
577 582 // Second level allocation failure.
578 583 // Mark sweep and allocate in young generation.
579 584 if (result == NULL && !invoked_full_gc) {
580 585 invoke_full_gc(false);
581 586 result = young_gen()->allocate(size);
582 587 }
583 588
584 589 death_march_check(result, size);
585 590
586 591 // Third level allocation failure.
587 592 // After mark sweep and young generation allocation failure,
588 593 // allocate in old generation.
589 594 if (result == NULL) {
590 595 result = old_gen()->allocate(size);
591 596 }
592 597
593 598 // Fourth level allocation failure. We're running out of memory.
594 599 // More complete mark sweep and allocate in young generation.
595 600 if (result == NULL) {
596 601 invoke_full_gc(true);
597 602 result = young_gen()->allocate(size);
598 603 }
599 604
600 605 // Fifth level allocation failure.
601 606 // After more complete mark sweep, allocate in old generation.
602 607 if (result == NULL) {
603 608 result = old_gen()->allocate(size);
604 609 }
605 610
606 611 return result;
607 612 }
608 613
609 614 //
610 615 // This is the policy loop for allocating in the permanent generation.
611 616 // If the initial allocation fails, we create a vm operation which will
612 617 // cause a collection.
613 618 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
614 619 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
615 620 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
616 621 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
617 622
618 623 HeapWord* result;
619 624
620 625 uint loop_count = 0;
621 626 uint gc_count = 0;
622 627 uint full_gc_count = 0;
623 628
624 629 do {
625 630 // We don't want to have multiple collections for a single filled generation.
626 631 // To prevent this, each thread tracks the total_collections() value, and if
627 632 // the count has changed, does not do a new collection.
628 633 //
629 634 // The collection count must be read only while holding the heap lock. VM
630 635 // operations also hold the heap lock during collections. There is a lock
631 636 // contention case where thread A blocks waiting on the Heap_lock, while
632 637 // thread B is holding it doing a collection. When thread A gets the lock,
633 638 // the collection count has already changed. To prevent duplicate collections,
634 639 // The policy MUST attempt allocations during the same period it reads the
635 640 // total_collections() value!
636 641 {
637 642 MutexLocker ml(Heap_lock);
638 643 gc_count = Universe::heap()->total_collections();
639 644 full_gc_count = Universe::heap()->total_full_collections();
640 645
641 646 result = perm_gen()->allocate_permanent(size);
642 647
643 648 if (result != NULL) {
644 649 return result;
645 650 }
646 651
647 652 if (GC_locker::is_active_and_needs_gc()) {
648 653 // If this thread is not in a jni critical section, we stall
649 654 // the requestor until the critical section has cleared and
650 655 // GC allowed. When the critical section clears, a GC is
651 656 // initiated by the last thread exiting the critical section; so
652 657 // we retry the allocation sequence from the beginning of the loop,
653 658 // rather than causing more, now probably unnecessary, GC attempts.
654 659 JavaThread* jthr = JavaThread::current();
655 660 if (!jthr->in_critical()) {
656 661 MutexUnlocker mul(Heap_lock);
657 662 GC_locker::stall_until_clear();
658 663 continue;
659 664 } else {
660 665 if (CheckJNICalls) {
661 666 fatal("Possible deadlock due to allocating while"
662 667 " in jni critical section");
663 668 }
664 669 return NULL;
665 670 }
666 671 }
667 672 }
668 673
669 674 if (result == NULL) {
670 675
671 676 // Exit the loop if the gc time limit has been exceeded.
672 677 // The allocation must have failed above (result must be NULL),
673 678 // and the most recent collection must have exceeded the
674 679 // gc time limit. Exit the loop so that an out-of-memory
675 680 // will be thrown (returning a NULL will do that), but
676 681 // clear gc_overhead_limit_exceeded so that the next collection
677 682 // will succeeded if the applications decides to handle the
678 683 // out-of-memory and tries to go on.
679 684 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
680 685 if (limit_exceeded) {
681 686 size_policy()->set_gc_overhead_limit_exceeded(false);
682 687 if (PrintGCDetails && Verbose) {
683 688 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
684 689 " return NULL because gc_overhead_limit_exceeded is set");
685 690 }
686 691 assert(result == NULL, "Allocation did not fail");
687 692 return NULL;
688 693 }
689 694
690 695 // Generate a VM operation
691 696 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
692 697 VMThread::execute(&op);
693 698
694 699 // Did the VM operation execute? If so, return the result directly.
695 700 // This prevents us from looping until time out on requests that can
696 701 // not be satisfied.
697 702 if (op.prologue_succeeded()) {
698 703 assert(Universe::heap()->is_in_permanent_or_null(op.result()),
699 704 "result not in heap");
700 705 // If GC was locked out during VM operation then retry allocation
701 706 // and/or stall as necessary.
702 707 if (op.gc_locked()) {
703 708 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
704 709 continue; // retry and/or stall as necessary
705 710 }
706 711 // If a NULL results is being returned, an out-of-memory
707 712 // will be thrown now. Clear the gc_overhead_limit_exceeded
708 713 // flag to avoid the following situation.
709 714 // gc_overhead_limit_exceeded is set during a collection
710 715 // the collection fails to return enough space and an OOM is thrown
711 716 // a subsequent GC prematurely throws an out-of-memory because
712 717 // the gc_overhead_limit_exceeded counts did not start
713 718 // again from 0.
714 719 if (op.result() == NULL) {
715 720 size_policy()->reset_gc_overhead_limit_count();
716 721 }
717 722 return op.result();
718 723 }
719 724 }
720 725
721 726 // The policy object will prevent us from looping forever. If the
722 727 // time spent in gc crosses a threshold, we will bail out.
723 728 loop_count++;
724 729 if ((QueuedAllocationWarningCount > 0) &&
725 730 (loop_count % QueuedAllocationWarningCount == 0)) {
726 731 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
727 732 " size=%d", loop_count, size);
728 733 }
729 734 } while (result == NULL);
730 735
731 736 return result;
732 737 }
733 738
734 739 //
735 740 // This is the policy code for permanent allocations which have failed
736 741 // and require a collection. Note that just as in failed_mem_allocate,
737 742 // we do not set collection policy, only where & when to allocate and
738 743 // collect.
739 744 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) {
740 745 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
741 746 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
742 747 assert(!Universe::heap()->is_gc_active(), "not reentrant");
743 748 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
744 749 assert(size > perm_gen()->free_in_words(), "Allocation should fail");
745 750
746 751 // We assume (and assert!) that an allocation at this point will fail
747 752 // unless we collect.
748 753
749 754 // First level allocation failure. Mark-sweep and allocate in perm gen.
750 755 GCCauseSetter gccs(this, GCCause::_allocation_failure);
751 756 invoke_full_gc(false);
752 757 HeapWord* result = perm_gen()->allocate_permanent(size);
753 758
754 759 // Second level allocation failure. We're running out of memory.
755 760 if (result == NULL) {
756 761 invoke_full_gc(true);
757 762 result = perm_gen()->allocate_permanent(size);
758 763 }
759 764
760 765 return result;
761 766 }
762 767
763 768 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
764 769 CollectedHeap::ensure_parsability(retire_tlabs);
765 770 young_gen()->eden_space()->ensure_parsability();
766 771 }
767 772
768 773 size_t ParallelScavengeHeap::unsafe_max_alloc() {
769 774 return young_gen()->eden_space()->free_in_bytes();
770 775 }
771 776
772 777 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
773 778 return young_gen()->eden_space()->tlab_capacity(thr);
774 779 }
775 780
776 781 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
777 782 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
778 783 }
779 784
780 785 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
781 786 return young_gen()->allocate(size);
782 787 }
783 788
784 789 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
785 790 CollectedHeap::accumulate_statistics_all_tlabs();
786 791 }
787 792
788 793 void ParallelScavengeHeap::resize_all_tlabs() {
789 794 CollectedHeap::resize_all_tlabs();
790 795 }
791 796
792 797 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
793 798 // We don't need barriers for stores to objects in the
794 799 // young gen and, a fortiori, for initializing stores to
795 800 // objects therein.
796 801 return is_in_young(new_obj);
797 802 }
798 803
799 804 // This method is used by System.gc() and JVMTI.
800 805 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
801 806 assert(!Heap_lock->owned_by_self(),
802 807 "this thread should not own the Heap_lock");
803 808
804 809 unsigned int gc_count = 0;
805 810 unsigned int full_gc_count = 0;
806 811 {
807 812 MutexLocker ml(Heap_lock);
808 813 // This value is guarded by the Heap_lock
809 814 gc_count = Universe::heap()->total_collections();
810 815 full_gc_count = Universe::heap()->total_full_collections();
811 816 }
812 817
813 818 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
814 819 VMThread::execute(&op);
815 820 }
816 821
817 822 // This interface assumes that it's being called by the
818 823 // vm thread. It collects the heap assuming that the
819 824 // heap lock is already held and that we are executing in
820 825 // the context of the vm thread.
821 826 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) {
822 827 assert(Thread::current()->is_VM_thread(), "Precondition#1");
823 828 assert(Heap_lock->is_locked(), "Precondition#2");
824 829 GCCauseSetter gcs(this, cause);
825 830 switch (cause) {
826 831 case GCCause::_heap_inspection:
827 832 case GCCause::_heap_dump: {
828 833 HandleMark hm;
829 834 invoke_full_gc(false);
830 835 break;
831 836 }
832 837 default: // XXX FIX ME
833 838 ShouldNotReachHere();
834 839 }
835 840 }
836 841
837 842
838 843 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) {
839 844 Unimplemented();
840 845 }
841 846
842 847 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
843 848 young_gen()->object_iterate(cl);
844 849 old_gen()->object_iterate(cl);
845 850 perm_gen()->object_iterate(cl);
846 851 }
847 852
848 853 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) {
849 854 Unimplemented();
850 855 }
851 856
852 857 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) {
853 858 perm_gen()->object_iterate(cl);
854 859 }
855 860
856 861 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
857 862 if (young_gen()->is_in_reserved(addr)) {
858 863 assert(young_gen()->is_in(addr),
859 864 "addr should be in allocated part of young gen");
860 865 // called from os::print_location by find or VMError
861 866 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
862 867 Unimplemented();
863 868 } else if (old_gen()->is_in_reserved(addr)) {
864 869 assert(old_gen()->is_in(addr),
865 870 "addr should be in allocated part of old gen");
866 871 return old_gen()->start_array()->object_start((HeapWord*)addr);
867 872 } else if (perm_gen()->is_in_reserved(addr)) {
868 873 assert(perm_gen()->is_in(addr),
869 874 "addr should be in allocated part of perm gen");
870 875 return perm_gen()->start_array()->object_start((HeapWord*)addr);
871 876 }
872 877 return 0;
873 878 }
874 879
875 880 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
876 881 return oop(addr)->size();
877 882 }
878 883
879 884 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
880 885 return block_start(addr) == addr;
881 886 }
882 887
883 888 jlong ParallelScavengeHeap::millis_since_last_gc() {
884 889 return UseParallelOldGC ?
885 890 PSParallelCompact::millis_since_last_gc() :
886 891 PSMarkSweep::millis_since_last_gc();
887 892 }
888 893
889 894 void ParallelScavengeHeap::prepare_for_verify() {
890 895 ensure_parsability(false); // no need to retire TLABs for verification
891 896 }
892 897
893 898 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
894 899 PSOldGen* old = old_gen();
895 900 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
896 901 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
897 902 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
898 903
899 904 PSYoungGen* young = young_gen();
900 905 VirtualSpaceSummary young_summary(young->reserved().start(),
901 906 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
902 907
903 908 MutableSpace* eden = young_gen()->eden_space();
904 909 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
905 910
906 911 MutableSpace* from = young_gen()->from_space();
907 912 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
908 913
909 914 MutableSpace* to = young_gen()->to_space();
910 915 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
911 916
912 917 VirtualSpaceSummary heap_summary = create_heap_space_summary();
913 918 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
914 919 }
915 920
916 921 VirtualSpaceSummary ParallelScavengeHeap::create_perm_gen_space_summary() {
917 922 PSVirtualSpace* space = perm_gen()->virtual_space();
918 923 return VirtualSpaceSummary(
919 924 (HeapWord*)space->low_boundary(),
920 925 (HeapWord*)space->high(),
921 926 (HeapWord*)space->high_boundary());
922 927 }
923 928
924 929 void ParallelScavengeHeap::print_on(outputStream* st) const {
925 930 young_gen()->print_on(st);
926 931 old_gen()->print_on(st);
927 932 perm_gen()->print_on(st);
928 933 }
929 934
930 935 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
931 936 PSScavenge::gc_task_manager()->threads_do(tc);
932 937 }
933 938
934 939 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
935 940 PSScavenge::gc_task_manager()->print_threads_on(st);
936 941 }
937 942
938 943 void ParallelScavengeHeap::print_tracing_info() const {
939 944 if (TraceGen0Time) {
940 945 double time = PSScavenge::accumulated_time()->seconds();
941 946 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
942 947 }
943 948 if (TraceGen1Time) {
944 949 double time = PSMarkSweep::accumulated_time()->seconds();
945 950 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
946 951 }
947 952 }
948 953
949 954
950 955 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
951 956 // Why do we need the total_collections()-filter below?
952 957 if (total_collections() > 0) {
953 958 if (!silent) {
954 959 gclog_or_tty->print("permanent ");
955 960 }
956 961 perm_gen()->verify();
957 962
958 963 if (!silent) {
959 964 gclog_or_tty->print("tenured ");
960 965 }
961 966 old_gen()->verify();
962 967
963 968 if (!silent) {
964 969 gclog_or_tty->print("eden ");
965 970 }
966 971 young_gen()->verify();
967 972 }
968 973 }
969 974
970 975 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
971 976 if (PrintGCDetails && Verbose) {
972 977 gclog_or_tty->print(" " SIZE_FORMAT
973 978 "->" SIZE_FORMAT
974 979 "(" SIZE_FORMAT ")",
975 980 prev_used, used(), capacity());
976 981 } else {
977 982 gclog_or_tty->print(" " SIZE_FORMAT "K"
978 983 "->" SIZE_FORMAT "K"
979 984 "(" SIZE_FORMAT "K)",
980 985 prev_used / K, used() / K, capacity() / K);
981 986 }
982 987 }
983 988
984 989 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
985 990 const PSHeapSummary& heap_summary = create_ps_heap_summary();
986 991 const PermGenSummary& perm_gen_summary = create_perm_gen_summary();
987 992 gc_tracer->report_gc_heap_summary(when, heap_summary, perm_gen_summary);
988 993 }
989 994
990 995 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
991 996 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
992 997 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
993 998 return _psh;
994 999 }
995 1000
996 1001 // Before delegating the resize to the young generation,
997 1002 // the reserved space for the young and old generations
998 1003 // may be changed to accomodate the desired resize.
999 1004 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
1000 1005 size_t survivor_size) {
1001 1006 if (UseAdaptiveGCBoundary) {
1002 1007 if (size_policy()->bytes_absorbed_from_eden() != 0) {
1003 1008 size_policy()->reset_bytes_absorbed_from_eden();
1004 1009 return; // The generation changed size already.
1005 1010 }
1006 1011 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
1007 1012 }
1008 1013
1009 1014 // Delegate the resize to the generation.
1010 1015 _young_gen->resize(eden_size, survivor_size);
1011 1016 }
1012 1017
1013 1018 // Before delegating the resize to the old generation,
1014 1019 // the reserved space for the young and old generations
1015 1020 // may be changed to accomodate the desired resize.
1016 1021 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
1017 1022 if (UseAdaptiveGCBoundary) {
1018 1023 if (size_policy()->bytes_absorbed_from_eden() != 0) {
1019 1024 size_policy()->reset_bytes_absorbed_from_eden();
1020 1025 return; // The generation changed size already.
1021 1026 }
1022 1027 gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
1023 1028 }
1024 1029
1025 1030 // Delegate the resize to the generation.
1026 1031 _old_gen->resize(desired_free_space);
1027 1032 }
1028 1033
1029 1034 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
1030 1035 // nothing particular
1031 1036 }
1032 1037
1033 1038 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
1034 1039 // nothing particular
1035 1040 }
1036 1041
1037 1042 #ifndef PRODUCT
1038 1043 void ParallelScavengeHeap::record_gen_tops_before_GC() {
1039 1044 if (ZapUnusedHeapArea) {
1040 1045 young_gen()->record_spaces_top();
1041 1046 old_gen()->record_spaces_top();
1042 1047 perm_gen()->record_spaces_top();
1043 1048 }
1044 1049 }
1045 1050
1046 1051 void ParallelScavengeHeap::gen_mangle_unused_area() {
1047 1052 if (ZapUnusedHeapArea) {
1048 1053 young_gen()->eden_space()->mangle_unused_area();
1049 1054 young_gen()->to_space()->mangle_unused_area();
1050 1055 young_gen()->from_space()->mangle_unused_area();
1051 1056 old_gen()->object_space()->mangle_unused_area();
1052 1057 perm_gen()->object_space()->mangle_unused_area();
1053 1058 }
1054 1059 }
1055 1060 #endif
↓ open down ↓ |
910 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX