< prev index next >
src/share/vm/gc/parallel/gcTaskManager.cpp
Print this page
rev 10651 : 6858051: Create GC worker threads dynamically
Reviewed-by:
*** 1,7 ****
/*
! * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
--- 1,7 ----
/*
! * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*** 23,40 ****
*/
#include "precompiled.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/gcTaskThread.hpp"
! #include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/gcId.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.inline.hpp"
//
// GCTask
//
--- 23,41 ----
*/
#include "precompiled.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/gcTaskThread.hpp"
! #include "gc/shared/adaptiveSizePolicy.inline.hpp"
#include "gc/shared/gcId.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.inline.hpp"
+ #include "runtime/os.hpp"
//
// GCTask
//
*** 369,382 ****
// GCTaskManager
//
GCTaskManager::GCTaskManager(uint workers) :
_workers(workers),
_active_workers(0),
! _idle_workers(0) {
initialize();
}
void GCTaskManager::initialize() {
if (TraceGCTaskManager) {
tty->print_cr("GCTaskManager::initialize: workers: %u", workers());
}
assert(workers() != 0, "no workers");
--- 370,400 ----
// GCTaskManager
//
GCTaskManager::GCTaskManager(uint workers) :
_workers(workers),
_active_workers(0),
! _idle_workers(0),
! _created_workers(0) {
initialize();
}
+ GCTaskThread* GCTaskManager::install_worker(uint t) {
+ GCTaskThread* new_worker = GCTaskThread::create(this, t, _processor_assignment[t]);
+ set_thread(t, new_worker);
+ return new_worker;
+ }
+
+ bool GCTaskManager::add_workers(bool initializing) {
+ os::ThreadType worker_type = os::pgc_thread;
+ return AdaptiveSizePolicy::add_workers(this,
+ _active_workers,
+ (uint) _workers,
+ _created_workers,
+ worker_type,
+ initializing);
+ }
+
void GCTaskManager::initialize() {
if (TraceGCTaskManager) {
tty->print_cr("GCTaskManager::initialize: workers: %u", workers());
}
assert(workers() != 0, "no workers");
*** 391,447 ****
_resource_flag = NEW_C_HEAP_ARRAY(bool, workers(), mtGC);
{
// Set up worker threads.
// Distribute the workers among the available processors,
// unless we were told not to, or if the os doesn't want to.
! uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);
if (!BindGCTaskThreadsToCPUs ||
! !os::distribute_processes(workers(), processor_assignment)) {
for (uint a = 0; a < workers(); a += 1) {
! processor_assignment[a] = sentinel_worker();
}
}
_thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);
! for (uint t = 0; t < workers(); t += 1) {
! set_thread(t, GCTaskThread::create(this, t, processor_assignment[t]));
! }
if (TraceGCTaskThread) {
tty->print("GCTaskManager::initialize: distribution:");
for (uint t = 0; t < workers(); t += 1) {
! tty->print(" %u", processor_assignment[t]);
}
tty->cr();
}
- FREE_C_HEAP_ARRAY(uint, processor_assignment);
}
reset_busy_workers();
set_unblocked();
for (uint w = 0; w < workers(); w += 1) {
set_resource_flag(w, false);
}
reset_delivered_tasks();
reset_completed_tasks();
reset_barriers();
reset_emptied_queue();
- for (uint s = 0; s < workers(); s += 1) {
- thread(s)->start();
- }
}
GCTaskManager::~GCTaskManager() {
assert(busy_workers() == 0, "still have busy workers");
assert(queue()->is_empty(), "still have queued work");
NoopGCTask::destroy(_noop_task);
_noop_task = NULL;
if (_thread != NULL) {
! for (uint i = 0; i < workers(); i += 1) {
GCTaskThread::destroy(thread(i));
set_thread(i, NULL);
}
FREE_C_HEAP_ARRAY(GCTaskThread*, _thread);
_thread = NULL;
}
if (_resource_flag != NULL) {
FREE_C_HEAP_ARRAY(bool, _resource_flag);
_resource_flag = NULL;
}
if (queue() != NULL) {
--- 409,466 ----
_resource_flag = NEW_C_HEAP_ARRAY(bool, workers(), mtGC);
{
// Set up worker threads.
// Distribute the workers among the available processors,
// unless we were told not to, or if the os doesn't want to.
! _processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);
if (!BindGCTaskThreadsToCPUs ||
! !os::distribute_processes(workers(), _processor_assignment)) {
for (uint a = 0; a < workers(); a += 1) {
! _processor_assignment[a] = sentinel_worker();
}
}
+
_thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);
! _active_workers = AdaptiveSizePolicy::initial_number_of_workers();
! add_workers(true);
!
if (TraceGCTaskThread) {
tty->print("GCTaskManager::initialize: distribution:");
for (uint t = 0; t < workers(); t += 1) {
! tty->print(" %u", _processor_assignment[t]);
}
tty->cr();
}
}
reset_busy_workers();
set_unblocked();
for (uint w = 0; w < workers(); w += 1) {
set_resource_flag(w, false);
}
reset_delivered_tasks();
reset_completed_tasks();
reset_barriers();
reset_emptied_queue();
}
GCTaskManager::~GCTaskManager() {
assert(busy_workers() == 0, "still have busy workers");
assert(queue()->is_empty(), "still have queued work");
NoopGCTask::destroy(_noop_task);
_noop_task = NULL;
if (_thread != NULL) {
! for (uint i = 0; i < created_workers(); i += 1) {
GCTaskThread::destroy(thread(i));
set_thread(i, NULL);
}
FREE_C_HEAP_ARRAY(GCTaskThread*, _thread);
_thread = NULL;
}
+ if (_processor_assignment != NULL) {
+ FREE_C_HEAP_ARRAY(uint, _processor_assignment);
+ _processor_assignment = NULL;
+ }
if (_resource_flag != NULL) {
FREE_C_HEAP_ARRAY(bool, _resource_flag);
_resource_flag = NULL;
}
if (queue() != NULL) {
*** 464,473 ****
--- 483,495 ----
assert(!all_workers_active() || active_workers() == ParallelGCThreads,
"all_workers_active() is incorrect: "
"active %d ParallelGCThreads %u", active_workers(),
ParallelGCThreads);
+ _active_workers = MIN2(_active_workers, _workers);
+ // "add_workers" does not guarantee any additional workers
+ add_workers(false);
log_trace(gc, task)("GCTaskManager::set_active_gang(): "
"all_workers_active() %d workers %d "
"active %d ParallelGCThreads %u",
all_workers_active(), workers(), active_workers(),
ParallelGCThreads);
*** 493,521 ****
// number of idle_workers. The idle_workers are stuck in
// idle tasks and will no longer be release (since a new GC
// is starting). Try later to release enough idle_workers
// to allow the desired number of active_workers.
more_inactive_workers =
! workers() - active_workers() - idle_workers();
if (more_inactive_workers < 0) {
int reduced_active_workers = active_workers() + more_inactive_workers;
set_active_workers(reduced_active_workers);
more_inactive_workers = 0;
}
log_trace(gc, task)("JT: %d workers %d active %d idle %d more %d",
Threads::number_of_non_daemon_threads(),
! workers(),
active_workers(),
idle_workers(),
more_inactive_workers);
}
GCTaskQueue* q = GCTaskQueue::create();
for(uint i = 0; i < (uint) more_inactive_workers; i++) {
q->enqueue(IdleGCTask::create_on_c_heap());
increment_idle_workers();
}
! assert(workers() == active_workers() + idle_workers(),
"total workers should equal active + inactive");
add_list(q);
// GCTaskQueue* q was created in a ResourceArea so a
// destroy() call is not needed.
}
--- 515,543 ----
// number of idle_workers. The idle_workers are stuck in
// idle tasks and will no longer be release (since a new GC
// is starting). Try later to release enough idle_workers
// to allow the desired number of active_workers.
more_inactive_workers =
! created_workers() - active_workers() - idle_workers();
if (more_inactive_workers < 0) {
int reduced_active_workers = active_workers() + more_inactive_workers;
set_active_workers(reduced_active_workers);
more_inactive_workers = 0;
}
log_trace(gc, task)("JT: %d workers %d active %d idle %d more %d",
Threads::number_of_non_daemon_threads(),
! created_workers(),
active_workers(),
idle_workers(),
more_inactive_workers);
}
GCTaskQueue* q = GCTaskQueue::create();
for(uint i = 0; i < (uint) more_inactive_workers; i++) {
q->enqueue(IdleGCTask::create_on_c_heap());
increment_idle_workers();
}
! assert(created_workers() == active_workers() + idle_workers(),
"total workers should equal active + inactive");
add_list(q);
// GCTaskQueue* q was created in a ResourceArea so a
// destroy() call is not needed.
}
*** 549,571 ****
}
}
void GCTaskManager::threads_do(ThreadClosure* tc) {
assert(tc != NULL, "Null ThreadClosure");
! uint num_thr = workers();
for (uint i = 0; i < num_thr; i++) {
tc->do_thread(thread(i));
}
}
GCTaskThread* GCTaskManager::thread(uint which) {
! assert(which < workers(), "index out of bounds");
assert(_thread[which] != NULL, "shouldn't have null thread");
return _thread[which];
}
void GCTaskManager::set_thread(uint which, GCTaskThread* value) {
assert(which < workers(), "index out of bounds");
assert(value != NULL, "shouldn't have null thread");
_thread[which] = value;
}
--- 571,594 ----
}
}
void GCTaskManager::threads_do(ThreadClosure* tc) {
assert(tc != NULL, "Null ThreadClosure");
! uint num_thr = created_workers();
for (uint i = 0; i < num_thr; i++) {
tc->do_thread(thread(i));
}
}
GCTaskThread* GCTaskManager::thread(uint which) {
! assert(which < created_workers(), "index out of bounds");
assert(_thread[which] != NULL, "shouldn't have null thread");
return _thread[which];
}
void GCTaskManager::set_thread(uint which, GCTaskThread* value) {
+ // "_add_workers" may not have been updated yet so use workers()
assert(which < workers(), "index out of bounds");
assert(value != NULL, "shouldn't have null thread");
_thread[which] = value;
}
*** 722,732 ****
return _busy_workers;
}
void GCTaskManager::release_all_resources() {
// If you want this to be done atomically, do it in a WaitForBarrierGCTask.
! for (uint i = 0; i < workers(); i += 1) {
set_resource_flag(i, true);
}
}
bool GCTaskManager::should_release_resources(uint which) {
--- 745,755 ----
return _busy_workers;
}
void GCTaskManager::release_all_resources() {
// If you want this to be done atomically, do it in a WaitForBarrierGCTask.
! for (uint i = 0; i < created_workers(); i += 1) {
set_resource_flag(i, true);
}
}
bool GCTaskManager::should_release_resources(uint which) {
*** 770,784 ****
// We have to release the barrier tasks!
WaitForBarrierGCTask::destroy(fin);
}
bool GCTaskManager::resource_flag(uint which) {
! assert(which < workers(), "index out of bounds");
return _resource_flag[which];
}
void GCTaskManager::set_resource_flag(uint which, bool value) {
assert(which < workers(), "index out of bounds");
_resource_flag[which] = value;
}
//
--- 793,808 ----
// We have to release the barrier tasks!
WaitForBarrierGCTask::destroy(fin);
}
bool GCTaskManager::resource_flag(uint which) {
! assert(which < created_workers(), "index out of bounds");
return _resource_flag[which];
}
void GCTaskManager::set_resource_flag(uint which, bool value) {
+ // "_created_workers" may not have been updated yet so use workers()
assert(which < workers(), "index out of bounds");
_resource_flag[which] = value;
}
//
< prev index next >