< prev index next >
src/hotspot/share/runtime/sweeper.cpp
Print this page
rev 51807 : imported patch nmethod-marking.patch
rev 51808 : 8132849: Increased stop time in cleanup phase because of single-threaded walk of thread stacks in NMethodSweeper::mark_active_nmethods()
rev 51809 : [mq]: JDK-8132849-01.patch
*** 26,44 ****
--- 26,48 ----
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "compiler/compileBroker.hpp"
+ #include "gc/shared/collectedHeap.hpp"
+ #include "gc/shared/workgroup.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
+ #include "memory/universe.hpp"
#include "oops/method.hpp"
#include "runtime/atomic.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/interfaceSupport.inline.hpp"
+ #include "runtime/handshake.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/thread.inline.hpp"
*** 195,218 ****
--- 199,263 ----
}
bool NMethodSweeper::wait_for_stack_scanning() {
return _current.end();
}
+ class ThreadToCodeBlobClosure : public ThreadClosure {
+ private:
+ CodeBlobClosure* _cl;
+ public:
+ ThreadToCodeBlobClosure(CodeBlobClosure* cl) : _cl(cl) {}
+ void do_thread(Thread* thread) {
+ if (thread->is_Java_thread() &&
+ ! thread->is_Code_cache_sweeper_thread()) {
+ JavaThread* jt = (JavaThread*) thread;
+ jt->nmethods_do(_cl);
+ }
+ }
+ };
+
+ class NMethodMarkingTask : public AbstractGangTask {
+ private:
+ ThreadToCodeBlobClosure* _cl;
+ public:
+ NMethodMarkingTask(ThreadToCodeBlobClosure* cl) :
+ AbstractGangTask("Parallel NMethod Marking"),
+ _cl(cl) {}
+ void work(uint worker_id) {
+ Threads::possibly_parallel_threads_do(true, _cl);
+ }
+ };
+
/**
* Scans the stacks of all Java threads and marks activations of not-entrant methods.
* No need to synchronize access, since 'mark_active_nmethods' is always executed at a
* safepoint.
*/
void NMethodSweeper::mark_active_nmethods() {
CodeBlobClosure* cl = prepare_mark_active_nmethods();
if (cl != NULL) {
+ WorkGang* workers = Universe::heap()->get_safepoint_workers();
+ if (workers != NULL) {
+ ThreadToCodeBlobClosure tcl(cl);
+ NMethodMarkingTask task(&tcl);
+ workers->run_task(&task);
+ } else {
Threads::nmethods_do(cl);
}
+ }
}
CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() {
+ #ifdef ASSERT
+ if (ThreadLocalHandshakes) {
+ assert(Thread::current()->is_Code_cache_sweeper_thread(), "must be executed under CodeCache_lock and in sweeper thread");
+ assert_lock_strong(CodeCache_lock);
+ } else {
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
+ }
+ #endif
+
// If we do not want to reclaim not-entrant or zombie methods there is no need
// to scan stacks
if (!MethodFlushing) {
return NULL;
}
*** 256,267 ****
--- 301,324 ----
* methods. Stack scanning is mandatory for the sweeper to make progress.
*/
void NMethodSweeper::do_stack_scanning() {
assert(!CodeCache_lock->owned_by_self(), "just checking");
if (wait_for_stack_scanning()) {
+ if (ThreadLocalHandshakes) {
+ CodeBlobClosure* code_cl;
+ {
+ MutexLockerEx ccl(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ code_cl = prepare_mark_active_nmethods();
+ }
+ if (code_cl != NULL) {
+ ThreadToCodeBlobClosure tcl(code_cl);
+ Handshake::execute(&tcl);
+ }
+ } else {
VM_MarkActiveNMethods op;
VMThread::execute(&op);
+ }
_should_sweep = true;
}
}
void NMethodSweeper::sweeper_loop() {
< prev index next >