< prev index next >

src/share/vm/gc/parallel/psParallelCompact.cpp

Print this page
rev 12854 : [mq]: gcinterface.patch


2030   ObjectStartArray* const start_array = old_gen->start_array();
2031   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2032     start_array->allocate_block(p);
2033   }
2034 
2035   // Could update the promoted average here, but it is not typically updated at
2036   // full GCs and the value to use is unclear.  Something like
2037   //
2038   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2039 
2040   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2041   return true;
2042 }
2043 
2044 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2045   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2046     "shouldn't return NULL");
2047   return ParallelScavengeHeap::gc_task_manager();
2048 }
2049 




















2050 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2051                                       bool maximum_heap_compaction,
2052                                       ParallelOldTracer *gc_tracer) {
2053   // Recursively traverse all live objects and mark them
2054   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
2055 
2056   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2057   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2058   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2059   TaskQueueSetSuper* qset = ParCompactionManager::stack_array();
2060   ParallelTaskTerminator terminator(active_gc_threads, qset);
2061 
2062   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
2063   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2064 
2065   // Need new claim bits before marking starts.
2066   ClassLoaderDataGraph::clear_claimed_marks();
2067 
2068   {
2069     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
2070 
2071     ParallelScavengeHeap::ParStrongRootsScope psrs;
2072 
2073     GCTaskQueue* q = GCTaskQueue::create();
2074 
2075     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2076     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2077     // We scan the thread roots in parallel
2078     Threads::create_thread_roots_marking_tasks(q);

2079     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2080     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2081     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2082     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2083     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2084     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2085     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2086 
2087     if (active_gc_threads > 1) {
2088       for (uint j = 0; j < active_gc_threads; j++) {
2089         q->enqueue(new StealMarkingTask(&terminator));
2090       }
2091     }
2092 
2093     gc_task_manager()->execute_and_wait(q);
2094   }
2095 
2096   // Process reference objects found during marking
2097   {
2098     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);




2030   ObjectStartArray* const start_array = old_gen->start_array();
2031   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2032     start_array->allocate_block(p);
2033   }
2034 
2035   // Could update the promoted average here, but it is not typically updated at
2036   // full GCs and the value to use is unclear.  Something like
2037   //
2038   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2039 
2040   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2041   return true;
2042 }
2043 
2044 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2045   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2046     "shouldn't return NULL");
2047   return ParallelScavengeHeap::gc_task_manager();
2048 }
2049 
2050 class CreateThreadRootsMarkingTasksClosure : public ThreadClosure {
2051 
2052 private:
2053   GCTaskQueue* _queue;
2054 
2055 public:
2056 
2057   CreateThreadRootsMarkingTasksClosure(GCTaskQueue* q) : _queue(q) {
2058   }
2059 
2060   void do_thread(Thread* thread) {
2061     if (thread->is_Java_thread()) {
2062       _queue->enqueue(new ThreadRootsMarkingTask((JavaThread*) thread));
2063     } else if (thread->is_VM_thread()) {
2064       _queue->enqueue(new ThreadRootsMarkingTask((VMThread*) thread));
2065     }
2066   }
2067 
2068 };
2069 
2070 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2071                                       bool maximum_heap_compaction,
2072                                       ParallelOldTracer *gc_tracer) {
2073   // Recursively traverse all live objects and mark them
2074   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
2075 
2076   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2077   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2078   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2079   TaskQueueSetSuper* qset = ParCompactionManager::stack_array();
2080   ParallelTaskTerminator terminator(active_gc_threads, qset);
2081 
2082   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
2083   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2084 
2085   // Need new claim bits before marking starts.
2086   ClassLoaderDataGraph::clear_claimed_marks();
2087 
2088   {
2089     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
2090 
2091     ParallelScavengeHeap::ParStrongRootsScope psrs;
2092 
2093     GCTaskQueue* q = GCTaskQueue::create();
2094 
2095     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2096     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2097     // We scan the thread roots in parallel
2098     CreateThreadRootsMarkingTasksClosure cl(q);
2099     Threads::threads_do(&cl);
2100     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2101     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2102     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2103     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2104     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2105     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2106     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2107 
2108     if (active_gc_threads > 1) {
2109       for (uint j = 0; j < active_gc_threads; j++) {
2110         q->enqueue(new StealMarkingTask(&terminator));
2111       }
2112     }
2113 
2114     gc_task_manager()->execute_and_wait(q);
2115   }
2116 
2117   // Process reference objects found during marking
2118   {
2119     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);


< prev index next >