372 } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
373 _heuristics = new ShenandoahPassiveHeuristics();
374 } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
375 _heuristics = new ShenandoahCompactHeuristics();
376 } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
377 _heuristics = new ShenandoahTraversalHeuristics();
378 } else {
379 vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
380 }
381
382 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
383 vm_exit_during_initialization(
384 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
385 _heuristics->name()));
386 }
387 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
388 vm_exit_during_initialization(
389 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
390 _heuristics->name()));
391 }
392
393 if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) {
394 vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier");
395 }
396 log_info(gc, init)("Shenandoah heuristics: %s",
397 _heuristics->name());
398 } else {
399 ShouldNotReachHere();
400 }
401
402 }
403
404 #ifdef _MSC_VER
405 #pragma warning( push )
406 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
407 #endif
408
409 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
410 CollectedHeap(),
411 _initial_size(0),
412 _used(0),
413 _committed(0),
414 _bytes_allocated_since_gc_start(0),
415 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
773 // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
774
775 size_t tries = 0;
776
777 while (result == NULL && _progress_last_gc.is_set()) {
778 tries++;
779 control_thread()->handle_alloc_failure(req.size());
780 result = allocate_memory_under_lock(req, in_new_region);
781 }
782
783 while (result == NULL && tries <= ShenandoahFullGCThreshold) {
784 tries++;
785 control_thread()->handle_alloc_failure(req.size());
786 result = allocate_memory_under_lock(req, in_new_region);
787 }
788
789 } else {
790 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
791 result = allocate_memory_under_lock(req, in_new_region);
792 // Do not call handle_alloc_failure() here, because we cannot block.
793 // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
794 }
795
796 if (in_new_region) {
797 control_thread()->notify_heap_changed();
798 }
799
800 if (result != NULL) {
801 size_t requested = req.size();
802 size_t actual = req.actual_size();
803
804 assert (req.is_lab_alloc() || (requested == actual),
805 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
806 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
807
808 if (req.is_mutator_alloc()) {
809 notify_mutator_alloc_words(actual, false);
810
811 // If we requested more than we were granted, give the rest back to pacer.
812 // This only matters if we are in the same pacing epoch: do not try to unpace
813 // over the budget for the other phase.
1087 workers()->threads_do(&cl);
1088 }
1089
1090 void ShenandoahHeap::resize_tlabs() {
1091 CollectedHeap::resize_all_tlabs();
1092 }
1093
1094 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1095 private:
1096 ShenandoahRootEvacuator* _rp;
1097
1098 public:
1099 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1100 AbstractGangTask("Shenandoah evacuate and update roots"),
1101 _rp(rp) {}
1102
1103 void work(uint worker_id) {
1104 ShenandoahParallelWorkerSession worker_session(worker_id);
1105 ShenandoahEvacOOMScope oom_evac_scope;
1106 ShenandoahEvacuateUpdateRootsClosure cl;
1107
1108 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1109 _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1110 }
1111 };
1112
1113 void ShenandoahHeap::evacuate_and_update_roots() {
1114 #if defined(COMPILER2) || INCLUDE_JVMCI
1115 DerivedPointerTable::clear();
1116 #endif
1117 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1118
1119 {
1120 ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1121 ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1122 workers()->run_task(&roots_task);
1123 }
1124
1125 #if defined(COMPILER2) || INCLUDE_JVMCI
1126 DerivedPointerTable::update_pointers();
1127 #endif
2044 }
2045
2046 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2047 assert (is_full_gc_in_progress(), "should be");
2048 _full_gc_move_in_progress.set_cond(in_progress);
2049 }
2050
2051 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2052 set_gc_state_mask(UPDATEREFS, in_progress);
2053 }
2054
2055 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2056 ShenandoahCodeRoots::add_nmethod(nm);
2057 }
2058
2059 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2060 ShenandoahCodeRoots::remove_nmethod(nm);
2061 }
2062
2063 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2064 o = ShenandoahBarrierSet::barrier_set()->write_barrier(o);
2065 ShenandoahHeapLocker locker(lock());
2066 heap_region_containing(o)->make_pinned();
2067 return o;
2068 }
2069
2070 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2071 o = ShenandoahBarrierSet::barrier_set()->read_barrier(o);
2072 ShenandoahHeapLocker locker(lock());
2073 heap_region_containing(o)->make_unpinned();
2074 }
2075
2076 GCTimer* ShenandoahHeap::gc_timer() const {
2077 return _gc_timer;
2078 }
2079
2080 #ifdef ASSERT
2081 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2082 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2083
2084 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2085 if (UseDynamicNumberOfGCThreads ||
2086 (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2087 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2088 } else {
2089 // Use ParallelGCThreads inside safepoints
2090 assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2091 }
|
372 } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
373 _heuristics = new ShenandoahPassiveHeuristics();
374 } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
375 _heuristics = new ShenandoahCompactHeuristics();
376 } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
377 _heuristics = new ShenandoahTraversalHeuristics();
378 } else {
379 vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
380 }
381
382 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
383 vm_exit_during_initialization(
384 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
385 _heuristics->name()));
386 }
387 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
388 vm_exit_during_initialization(
389 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
390 _heuristics->name()));
391 }
392 log_info(gc, init)("Shenandoah heuristics: %s",
393 _heuristics->name());
394 } else {
395 ShouldNotReachHere();
396 }
397
398 }
399
400 #ifdef _MSC_VER
401 #pragma warning( push )
402 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
403 #endif
404
405 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
406 CollectedHeap(),
407 _initial_size(0),
408 _used(0),
409 _committed(0),
410 _bytes_allocated_since_gc_start(0),
411 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
769 // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
770
771 size_t tries = 0;
772
773 while (result == NULL && _progress_last_gc.is_set()) {
774 tries++;
775 control_thread()->handle_alloc_failure(req.size());
776 result = allocate_memory_under_lock(req, in_new_region);
777 }
778
779 while (result == NULL && tries <= ShenandoahFullGCThreshold) {
780 tries++;
781 control_thread()->handle_alloc_failure(req.size());
782 result = allocate_memory_under_lock(req, in_new_region);
783 }
784
785 } else {
786 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
787 result = allocate_memory_under_lock(req, in_new_region);
788 // Do not call handle_alloc_failure() here, because we cannot block.
789 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
790 }
791
792 if (in_new_region) {
793 control_thread()->notify_heap_changed();
794 }
795
796 if (result != NULL) {
797 size_t requested = req.size();
798 size_t actual = req.actual_size();
799
800 assert (req.is_lab_alloc() || (requested == actual),
801 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
802 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
803
804 if (req.is_mutator_alloc()) {
805 notify_mutator_alloc_words(actual, false);
806
807 // If we requested more than we were granted, give the rest back to pacer.
808 // This only matters if we are in the same pacing epoch: do not try to unpace
809 // over the budget for the other phase.
1083 workers()->threads_do(&cl);
1084 }
1085
1086 void ShenandoahHeap::resize_tlabs() {
1087 CollectedHeap::resize_all_tlabs();
1088 }
1089
1090 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1091 private:
1092 ShenandoahRootEvacuator* _rp;
1093
1094 public:
1095 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1096 AbstractGangTask("Shenandoah evacuate and update roots"),
1097 _rp(rp) {}
1098
1099 void work(uint worker_id) {
1100 ShenandoahParallelWorkerSession worker_session(worker_id);
1101 ShenandoahEvacOOMScope oom_evac_scope;
1102 ShenandoahEvacuateUpdateRootsClosure cl;
1103 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1104 _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1105 }
1106 };
1107
1108 void ShenandoahHeap::evacuate_and_update_roots() {
1109 #if defined(COMPILER2) || INCLUDE_JVMCI
1110 DerivedPointerTable::clear();
1111 #endif
1112 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1113
1114 {
1115 ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1116 ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1117 workers()->run_task(&roots_task);
1118 }
1119
1120 #if defined(COMPILER2) || INCLUDE_JVMCI
1121 DerivedPointerTable::update_pointers();
1122 #endif
2039 }
2040
2041 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2042 assert (is_full_gc_in_progress(), "should be");
2043 _full_gc_move_in_progress.set_cond(in_progress);
2044 }
2045
2046 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2047 set_gc_state_mask(UPDATEREFS, in_progress);
2048 }
2049
2050 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2051 ShenandoahCodeRoots::add_nmethod(nm);
2052 }
2053
2054 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2055 ShenandoahCodeRoots::remove_nmethod(nm);
2056 }
2057
2058 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2059 ShenandoahHeapLocker locker(lock());
2060 heap_region_containing(o)->make_pinned();
2061 return o;
2062 }
2063
2064 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2065 ShenandoahHeapLocker locker(lock());
2066 heap_region_containing(o)->make_unpinned();
2067 }
2068
2069 GCTimer* ShenandoahHeap::gc_timer() const {
2070 return _gc_timer;
2071 }
2072
2073 #ifdef ASSERT
2074 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2075 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2076
2077 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2078 if (UseDynamicNumberOfGCThreads ||
2079 (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2080 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2081 } else {
2082 // Use ParallelGCThreads inside safepoints
2083 assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2084 }
|