< prev index next >

src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp

Print this page
rev 8068 : imported patch parallelscavenge_cleanup

*** 746,756 **** return true; } HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) { assert(addr != NULL, "Should detect NULL oop earlier"); ! assert(PSParallelCompact::gc_heap()->is_in(addr), "not in heap"); assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked"); // Region covering the object. RegionData* const region_ptr = addr_to_region_ptr(addr); HeapWord* result = region_ptr->destination(); --- 746,756 ---- return true; } HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) { assert(addr != NULL, "Should detect NULL oop earlier"); ! assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap"); assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked"); // Region covering the object. RegionData* const region_ptr = addr_to_region_ptr(addr); HeapWord* result = region_ptr->destination();
*** 834,846 **** void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) { klass->oops_do(&PSParallelCompact::_adjust_pointer_closure); } void PSParallelCompact::post_initialize() { ! ParallelScavengeHeap* heap = gc_heap(); ! assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); ! MemRegion mr = heap->reserved_region(); _ref_processor = new ReferenceProcessor(mr, // span ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing (int) ParallelGCThreads, // mt processing degree --- 834,844 ---- void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) { klass->oops_do(&PSParallelCompact::_adjust_pointer_closure); } void PSParallelCompact::post_initialize() { ! ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); MemRegion mr = heap->reserved_region(); _ref_processor = new ReferenceProcessor(mr, // span ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing (int) ParallelGCThreads, // mt processing degree
*** 853,864 **** // Initialize static fields in ParCompactionManager. ParCompactionManager::initialize(mark_bitmap()); } bool PSParallelCompact::initialize() { ! ParallelScavengeHeap* heap = gc_heap(); ! assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); MemRegion mr = heap->reserved_region(); // Was the old gen get allocated successfully? if (!heap->old_gen()->is_allocated()) { return false; --- 851,861 ---- // Initialize static fields in ParCompactionManager. ParCompactionManager::initialize(mark_bitmap()); } bool PSParallelCompact::initialize() { ! ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); MemRegion mr = heap->reserved_region(); // Was the old gen get allocated successfully? if (!heap->old_gen()->is_allocated()) { return false;
*** 888,898 **** void PSParallelCompact::initialize_space_info() { memset(&_space_info, 0, sizeof(_space_info)); ! ParallelScavengeHeap* heap = gc_heap(); PSYoungGen* young_gen = heap->young_gen(); _space_info[old_space_id].set_space(heap->old_gen()->object_space()); _space_info[eden_space_id].set_space(young_gen->eden_space()); _space_info[from_space_id].set_space(young_gen->from_space()); --- 885,895 ---- void PSParallelCompact::initialize_space_info() { memset(&_space_info, 0, sizeof(_space_info)); ! ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSYoungGen* young_gen = heap->young_gen(); _space_info[old_space_id].set_space(heap->old_gen()->object_space()); _space_info[eden_space_id].set_space(young_gen->eden_space()); _space_info[from_space_id].set_space(young_gen->from_space());
*** 971,981 **** // Update the from & to space pointers in space_info, since they are swapped // at each young gen gc. Do the update unconditionally (even though a // promotion failure does not swap spaces) because an unknown number of minor // collections will have swapped the spaces an unknown number of times. GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ! ParallelScavengeHeap* heap = gc_heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); pre_gc_values->fill(heap); --- 968,978 ---- // Update the from & to space pointers in space_info, since they are swapped // at each young gen gc. Do the update unconditionally (even though a // promotion failure does not swap spaces) because an unknown number of minor // collections will have swapped the spaces an unknown number of times. GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ! ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); pre_gc_values->fill(heap);
*** 1026,1036 **** MutableSpace* const eden_space = _space_info[eden_space_id].space(); MutableSpace* const from_space = _space_info[from_space_id].space(); MutableSpace* const to_space = _space_info[to_space_id].space(); ! ParallelScavengeHeap* heap = gc_heap(); bool eden_empty = eden_space->is_empty(); if (!eden_empty) { eden_empty = absorb_live_data_from_eden(heap->size_policy(), heap->young_gen(), heap->old_gen()); } --- 1023,1033 ---- MutableSpace* const eden_space = _space_info[eden_space_id].space(); MutableSpace* const from_space = _space_info[from_space_id].space(); MutableSpace* const to_space = _space_info[to_space_id].space(); ! ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); bool eden_empty = eden_space->is_empty(); if (!eden_empty) { eden_empty = absorb_live_data_from_eden(heap->size_policy(), heap->young_gen(), heap->old_gen()); }
*** 1964,1974 **** void PSParallelCompact::invoke(bool maximum_heap_compaction) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); ! ParallelScavengeHeap* heap = gc_heap(); GCCause::Cause gc_cause = heap->gc_cause(); assert(!heap->is_gc_active(), "not reentrant"); PSAdaptiveSizePolicy* policy = heap->size_policy(); IsGCActiveMark mark; --- 1961,1971 ---- void PSParallelCompact::invoke(bool maximum_heap_compaction) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); ! ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); GCCause::Cause gc_cause = heap->gc_cause(); assert(!heap->is_gc_active(), "not reentrant"); PSAdaptiveSizePolicy* policy = heap->size_policy(); IsGCActiveMark mark;
*** 1992,2002 **** if (GC_locker::check_active_before_gc()) { return false; } ! ParallelScavengeHeap* heap = gc_heap(); _gc_timer.register_gc_start(); _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); TimeStamp marking_start; --- 1989,1999 ---- if (GC_locker::check_active_before_gc()) { return false; } ! ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _gc_timer.register_gc_start(); _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); TimeStamp marking_start;
*** 2345,2355 **** bool maximum_heap_compaction, ParallelOldTracer *gc_tracer) { // Recursively traverse all live objects and mark them GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ! ParallelScavengeHeap* heap = gc_heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); TaskQueueSetSuper* qset = ParCompactionManager::region_array(); ParallelTaskTerminator terminator(active_gc_threads, qset); --- 2342,2352 ---- bool maximum_heap_compaction, ParallelOldTracer *gc_tracer) { // Recursively traverse all live objects and mark them GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ! ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); TaskQueueSetSuper* qset = ParCompactionManager::region_array(); ParallelTaskTerminator terminator(active_gc_threads, qset);
*** 2685,2696 **** void PSParallelCompact::compact() { // trace("5"); GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ! ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ! assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSOldGen* old_gen = heap->old_gen(); old_gen->start_array()->reset(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); TaskQueueSetSuper* qset = ParCompactionManager::region_array(); --- 2682,2692 ---- void PSParallelCompact::compact() { // trace("5"); GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ! ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSOldGen* old_gen = heap->old_gen(); old_gen->start_array()->reset(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); TaskQueueSetSuper* qset = ParCompactionManager::region_array();
*** 2837,2847 **** // Return the SpaceId for the space containing addr. If addr is not in the // heap, last_space_id is returned. In debug mode it expects the address to be // in the heap and asserts such. PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) { ! assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap"); for (unsigned int id = old_space_id; id < last_space_id; ++id) { if (_space_info[id].space()->contains(addr)) { return SpaceId(id); } --- 2833,2843 ---- // Return the SpaceId for the space containing addr. If addr is not in the // heap, last_space_id is returned. In debug mode it expects the address to be // in the heap and asserts such. PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) { ! assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap"); for (unsigned int id = old_space_id; id < last_space_id; ++id) { if (_space_info[id].space()->contains(addr)) { return SpaceId(id); }
< prev index next >