hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp

Print this page
rev 611 : Merge

*** 1,10 **** #ifdef USE_PRAGMA_IDENT_SRC #pragma ident "@(#)parallelScavengeHeap.cpp 1.95 07/10/04 10:49:31 JVM" #endif /* ! * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,10 ---- #ifdef USE_PRAGMA_IDENT_SRC #pragma ident "@(#)parallelScavengeHeap.cpp 1.95 07/10/04 10:49:31 JVM" #endif /* ! * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 109,119 **** // The main part of the heap (old gen + young gen) can often use a larger page // size than is needed or wanted for the perm gen. Use the "compound // alignment" ReservedSpace ctor to avoid having to use the same page size for // all gens. ! ReservedSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, og_align); os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, heap_rs.base(), pg_max_size); os::trace_page_sizes("ps main", og_min_size + yg_min_size, og_max_size + yg_max_size, og_page_sz, --- 109,119 ---- // The main part of the heap (old gen + young gen) can often use a larger page // size than is needed or wanted for the perm gen. Use the "compound // alignment" ReservedSpace ctor to avoid having to use the same page size for // all gens. ! ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, og_align); os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, heap_rs.base(), pg_max_size); os::trace_page_sizes("ps main", og_min_size + yg_min_size, og_max_size + yg_max_size, og_page_sz,
*** 174,184 **** const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); _size_policy = new PSAdaptiveSizePolicy(eden_capacity, initial_promo_size, young_gen()->to_space()->capacity_in_bytes(), ! intra_generation_alignment(), max_gc_pause_sec, max_gc_minor_pause_sec, GCTimeRatio ); --- 174,184 ---- const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); _size_policy = new PSAdaptiveSizePolicy(eden_capacity, initial_promo_size, young_gen()->to_space()->capacity_in_bytes(), ! intra_heap_alignment(), max_gc_pause_sec, max_gc_minor_pause_sec, GCTimeRatio );
*** 211,224 **** void ParallelScavengeHeap::post_initialize() { // Need to init the tenuring threshold PSScavenge::initialize(); if (UseParallelOldGC) { PSParallelCompact::post_initialize(); - if (VerifyParallelOldWithMarkSweep) { - // Will be used for verification of par old. - PSMarkSweep::initialize(); - } } else { PSMarkSweep::initialize(); } PSPromotionManager::initialize(); } --- 211,220 ----
*** 403,413 **** // TLAB allocation directly in the old gen. if (result != NULL) { return result; } if (!is_tlab && ! size >= (young_gen()->eden_space()->capacity_in_words() / 2)) { result = old_gen()->allocate(size, is_tlab); if (result != NULL) { return result; } } --- 399,409 ---- // TLAB allocation directly in the old gen. if (result != NULL) { return result; } if (!is_tlab && ! size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { result = old_gen()->allocate(size, is_tlab); if (result != NULL) { return result; } }
*** 591,600 **** --- 587,621 ---- MutexLocker ml(Heap_lock); gc_count = Universe::heap()->total_collections(); full_gc_count = Universe::heap()->total_full_collections(); result = perm_gen()->allocate_permanent(size); + + if (result != NULL) { + return result; + } + + if (GC_locker::is_active_and_needs_gc()) { + // If this thread is not in a jni critical section, we stall + // the requestor until the critical section has cleared and + // GC allowed. When the critical section clears, a GC is + // initiated by the last thread exiting the critical section; so + // we retry the allocation sequence from the beginning of the loop, + // rather than causing more, now probably unnecessary, GC attempts. + JavaThread* jthr = JavaThread::current(); + if (!jthr->in_critical()) { + MutexUnlocker mul(Heap_lock); + GC_locker::stall_until_clear(); + continue; + } else { + if (CheckJNICalls) { + fatal("Possible deadlock due to allocating while" + " in jni critical section"); + } + return NULL; + } + } } if (result == NULL) { // Exit the loop if the gc time limit has been exceeded.
*** 623,632 **** --- 644,659 ---- // This prevents us from looping until time out on requests that can // not be satisfied. if (op.prologue_succeeded()) { assert(Universe::heap()->is_in_permanent_or_null(op.result()), "result not in heap"); + // If GC was locked out during VM operation then retry allocation + // and/or stall as necessary. + if (op.gc_locked()) { + assert(op.result() == NULL, "must be NULL if gc_locked() is true"); + continue; // retry and/or stall as necessary + } // If a NULL results is being returned, an out-of-memory // will be thrown now. Clear the gc_time_limit_exceeded // flag to avoid the following situation. // gc_time_limit_exceeded is set during a collection // the collection fails to return enough space and an OOM is thrown
*** 908,912 **** --- 935,959 ---- } // Delegate the resize to the generation. _old_gen->resize(desired_free_space); } + + #ifndef PRODUCT + void ParallelScavengeHeap::record_gen_tops_before_GC() { + if (ZapUnusedHeapArea) { + young_gen()->record_spaces_top(); + old_gen()->record_spaces_top(); + perm_gen()->record_spaces_top(); + } + } + + void ParallelScavengeHeap::gen_mangle_unused_area() { + if (ZapUnusedHeapArea) { + young_gen()->eden_space()->mangle_unused_area(); + young_gen()->to_space()->mangle_unused_area(); + young_gen()->from_space()->mangle_unused_area(); + old_gen()->object_space()->mangle_unused_area(); + perm_gen()->object_space()->mangle_unused_area(); + } + } + #endif