< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 12666 : imported patch 8168467-use-taskentry-as-mark-stack-elem
rev 12667 : imported patch 8168467-kim-review

*** 144,162 **** bool G1CMMarkStack::resize(size_t new_capacity) { assert(is_empty(), "Only resize when stack is empty."); assert(new_capacity <= _max_chunk_capacity, "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); ! OopChunk* new_base = MmapArrayAllocator<OopChunk, mtGC>::allocate_or_null(new_capacity); if (new_base == NULL) { ! log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(OopChunk)); return false; } // Release old mapping. if (_base != NULL) { ! MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity); } _base = new_base; _chunk_capacity = new_capacity; set_empty(); --- 144,162 ---- bool G1CMMarkStack::resize(size_t new_capacity) { assert(is_empty(), "Only resize when stack is empty."); assert(new_capacity <= _max_chunk_capacity, "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); ! TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::allocate_or_null(new_capacity); if (new_base == NULL) { ! log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); return false; } // Release old mapping. if (_base != NULL) { ! MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity); } _base = new_base; _chunk_capacity = new_capacity; set_empty();
*** 164,183 **** return true; } size_t G1CMMarkStack::capacity_alignment() { ! return (size_t)lcm(os::vm_allocation_granularity(), sizeof(OopChunk)) / sizeof(void*); } bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); ! size_t const OopChunkSizeInVoidStar = sizeof(OopChunk) / sizeof(void*); ! _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / OopChunkSizeInVoidStar; ! size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / OopChunkSizeInVoidStar; guarantee(initial_chunk_capacity <= _max_chunk_capacity, "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, _max_chunk_capacity, initial_chunk_capacity); --- 164,183 ---- return true; } size_t G1CMMarkStack::capacity_alignment() { ! return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); } bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); ! size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); ! _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; ! size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; guarantee(initial_chunk_capacity <= _max_chunk_capacity, "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, _max_chunk_capacity, initial_chunk_capacity);
*** 209,261 **** } } G1CMMarkStack::~G1CMMarkStack() { if (_base != NULL) { ! MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity); } } ! void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) { elem->next = *list; *list = elem; } ! void G1CMMarkStack::add_chunk_to_chunk_list(OopChunk* elem) { MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); add_chunk_to_list(&_chunk_list, elem); _chunks_in_chunk_list++; } ! void G1CMMarkStack::add_chunk_to_free_list(OopChunk* elem) { MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); add_chunk_to_list(&_free_list, elem); } ! G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) { ! OopChunk* result = *list; if (result != NULL) { *list = (*list)->next; } return result; } ! G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); ! OopChunk* result = remove_chunk_from_list(&_chunk_list); if (result != NULL) { _chunks_in_chunk_list--; } return result; } ! G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_free_list() { MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); return remove_chunk_from_list(&_free_list); } ! G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() { // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding // wraparound of _hwm. if (_hwm >= _chunk_capacity) { return NULL; --- 209,261 ---- } } G1CMMarkStack::~G1CMMarkStack() { if (_base != NULL) { ! MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity); } } ! void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { elem->next = *list; *list = elem; } ! void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); add_chunk_to_list(&_chunk_list, elem); _chunks_in_chunk_list++; } ! void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); add_chunk_to_list(&_free_list, elem); } ! G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { ! TaskQueueEntryChunk* result = *list; if (result != NULL) { *list = (*list)->next; } return result; } ! G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); ! TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); if (result != NULL) { _chunks_in_chunk_list--; } return result; } ! G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); return remove_chunk_from_list(&_free_list); } ! G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding // wraparound of _hwm. if (_hwm >= _chunk_capacity) { return NULL;
*** 264,281 **** size_t cur_idx = Atomic::add(1, &_hwm) - 1; if (cur_idx >= _chunk_capacity) { return NULL; } ! OopChunk* result = ::new (&_base[cur_idx]) OopChunk; result->next = NULL; return result; } ! bool G1CMMarkStack::par_push_chunk(oop* ptr_arr) { // Get a new chunk. ! OopChunk* new_chunk = remove_chunk_from_free_list(); if (new_chunk == NULL) { // Did not get a chunk from the free list. Allocate from backing memory. new_chunk = allocate_new_chunk(); } --- 264,281 ---- size_t cur_idx = Atomic::add(1, &_hwm) - 1; if (cur_idx >= _chunk_capacity) { return NULL; } ! TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; result->next = NULL; return result; } ! bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { // Get a new chunk. ! TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); if (new_chunk == NULL) { // Did not get a chunk from the free list. Allocate from backing memory. new_chunk = allocate_new_chunk(); }
*** 283,307 **** if (new_chunk == NULL) { _out_of_memory = true; return false; } ! Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, OopsPerChunk * sizeof(oop)); add_chunk_to_chunk_list(new_chunk); return true; } ! bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) { ! OopChunk* cur = remove_chunk_from_chunk_list(); if (cur == NULL) { return false; } ! Copy::conjoint_memory_atomic(cur->data, ptr_arr, OopsPerChunk * sizeof(oop)); add_chunk_to_free_list(cur); return true; } --- 283,307 ---- if (new_chunk == NULL) { _out_of_memory = true; return false; } ! Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); add_chunk_to_chunk_list(new_chunk); return true; } ! bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { ! TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); if (cur == NULL) { return false; } ! Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); add_chunk_to_free_list(cur); return true; }
*** 2006,2022 **** _g1h(G1CollectedHeap::heap()), _phase(phase), _info(info) { } ! void operator()(oop obj) const { ! guarantee(G1CMObjArrayProcessor::is_array_slice(obj) || obj->is_oop(), "Non-oop " PTR_FORMAT ", phase: %s, info: %d", ! p2i(obj), _phase, _info); ! guarantee(G1CMObjArrayProcessor::is_array_slice(obj) || !_g1h->is_in_cset(obj), "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", ! p2i(obj), _phase, _info); } }; void G1ConcurrentMark::verify_no_cset_oops() { assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); --- 2006,2026 ---- _g1h(G1CollectedHeap::heap()), _phase(phase), _info(info) { } ! void operator()(G1TaskQueueEntry task_entry) const { ! if (task_entry.is_array_slice()) { ! guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); ! return; ! } ! guarantee(task_entry.obj()->is_oop(), "Non-oop " PTR_FORMAT ", phase: %s, info: %d", ! p2i(task_entry.obj()), _phase, _info); ! guarantee(!_g1h->is_in_cset(task_entry.obj()), "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", ! p2i(task_entry.obj()), _phase, _info); } }; void G1ConcurrentMark::verify_no_cset_oops() { assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
*** 2206,2216 **** assert(addr >= _task->finger(), "invariant"); // We move that task's local finger along. _task->move_finger_to(addr); ! _task->scan_object(oop(addr)); // we only partially drain the local queue and global stack _task->drain_local_queue(true); _task->drain_global_stack(true); // if the has_aborted flag has been raised, we need to bail out of --- 2210,2220 ---- assert(addr >= _task->finger(), "invariant"); // We move that task's local finger along. _task->move_finger_to(addr); ! _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); // we only partially drain the local queue and global stack _task->drain_local_queue(true); _task->drain_global_stack(true); // if the has_aborted flag has been raised, we need to bail out of
*** 2397,2416 **** } void G1CMTask::move_entries_to_global_stack() { // Local array where we'll store the entries that will be popped // from the local queue. ! oop buffer[G1CMMarkStack::OopsPerChunk]; size_t n = 0; ! oop obj; ! while (n < G1CMMarkStack::OopsPerChunk && _task_queue->pop_local(obj)) { ! buffer[n] = obj; ++n; } ! if (n < G1CMMarkStack::OopsPerChunk) { ! buffer[n] = NULL; } if (n > 0) { if (!_cm->mark_stack_push(buffer)) { set_has_aborted(); --- 2401,2420 ---- } void G1CMTask::move_entries_to_global_stack() { // Local array where we'll store the entries that will be popped // from the local queue. ! G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; size_t n = 0; ! G1TaskQueueEntry task_entry; ! while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { ! buffer[n] = task_entry; ++n; } ! if (n < G1CMMarkStack::EntriesPerChunk) { ! buffer[n] = G1TaskQueueEntry(); } if (n > 0) { if (!_cm->mark_stack_push(buffer)) { set_has_aborted();
*** 2422,2445 **** } bool G1CMTask::get_entries_from_global_stack() { // Local array where we'll store the entries that will be popped // from the global stack. ! oop buffer[G1CMMarkStack::OopsPerChunk]; if (!_cm->mark_stack_pop(buffer)) { return false; } // We did actually pop at least one entry. ! for (size_t i = 0; i < G1CMMarkStack::OopsPerChunk; ++i) { ! oop elem = buffer[i]; ! if (elem == NULL) { break; } ! assert(G1CMObjArrayProcessor::is_array_slice(elem) || elem->is_oop(), "Element " PTR_FORMAT " must be an array slice or oop", p2i(elem)); ! bool success = _task_queue->push(elem); // We only call this when the local queue is empty or under a // given target limit. So, we do not expect this push to fail. assert(success, "invariant"); } --- 2426,2449 ---- } bool G1CMTask::get_entries_from_global_stack() { // Local array where we'll store the entries that will be popped // from the global stack. ! G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; if (!_cm->mark_stack_pop(buffer)) { return false; } // We did actually pop at least one entry. ! for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { ! G1TaskQueueEntry task_entry = buffer[i]; ! if (task_entry.is_null()) { break; } ! assert(task_entry.is_array_slice() || task_entry.obj()->is_oop(), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); ! bool success = _task_queue->push(task_entry); // We only call this when the local queue is empty or under a // given target limit. So, we do not expect this push to fail. assert(success, "invariant"); }
*** 2462,2479 **** } else { target_size = 0; } if (_task_queue->size() > target_size) { ! oop obj; ! bool ret = _task_queue->pop_local(obj); while (ret) { ! scan_object(obj); if (_task_queue->size() <= target_size || has_aborted()) { ret = false; } else { ! ret = _task_queue->pop_local(obj); } } } } --- 2466,2483 ---- } else { target_size = 0; } if (_task_queue->size() > target_size) { ! G1TaskQueueEntry entry; ! bool ret = _task_queue->pop_local(entry); while (ret) { ! scan_task_entry(entry); if (_task_queue->size() <= target_size || has_aborted()) { ret = false; } else { ! ret = _task_queue->pop_local(entry); } } } }
*** 2550,2561 **** _step_times_ms.sd()); log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", _step_times_ms.maximum(), _step_times_ms.sum()); } ! bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { ! return _task_queues->steal(worker_id, hash_seed, obj); } /***************************************************************************** The do_marking_step(time_target_ms, ...) method is the building --- 2554,2565 ---- _step_times_ms.sd()); log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", _step_times_ms.maximum(), _step_times_ms.sum()); } ! bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) { ! return _task_queues->steal(worker_id, hash_seed, task_entry); } /***************************************************************************** The do_marking_step(time_target_ms, ...) method is the building
*** 2874,2886 **** // We cannot check whether the global stack is empty, since other // tasks might be pushing objects to it concurrently. assert(_cm->out_of_regions() && _task_queue->size() == 0, "only way to reach here"); while (!has_aborted()) { ! oop obj; ! if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { ! scan_object(obj); // And since we're towards the end, let's totally drain the // local queue and global stack. drain_local_queue(false); drain_global_stack(false); --- 2878,2890 ---- // We cannot check whether the global stack is empty, since other // tasks might be pushing objects to it concurrently. assert(_cm->out_of_regions() && _task_queue->size() == 0, "only way to reach here"); while (!has_aborted()) { ! G1TaskQueueEntry entry; ! if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) { ! scan_task_entry(entry); // And since we're towards the end, let's totally drain the // local queue and global stack. drain_local_queue(false); drain_global_stack(false);
< prev index next >