< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 12666 : imported patch 8168467-use-taskentry-as-mark-stack-elem
rev 12667 : imported patch 8168467-kim-review

*** 144,162 **** bool G1CMMarkStack::resize(size_t new_capacity) { assert(is_empty(), "Only resize when stack is empty."); assert(new_capacity <= _max_chunk_capacity, "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); ! OopChunk* new_base = MmapArrayAllocator<OopChunk, mtGC>::allocate_or_null(new_capacity); if (new_base == NULL) { ! log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(OopChunk)); return false; } // Release old mapping. if (_base != NULL) { ! MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity); } _base = new_base; _chunk_capacity = new_capacity; set_empty(); --- 144,162 ---- bool G1CMMarkStack::resize(size_t new_capacity) { assert(is_empty(), "Only resize when stack is empty."); assert(new_capacity <= _max_chunk_capacity, "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); ! TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::allocate_or_null(new_capacity); if (new_base == NULL) { ! log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); return false; } // Release old mapping. if (_base != NULL) { ! MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity); } _base = new_base; _chunk_capacity = new_capacity; set_empty();
*** 164,183 **** return true; } size_t G1CMMarkStack::capacity_alignment() { ! return (size_t)lcm(os::vm_allocation_granularity(), sizeof(OopChunk)) / sizeof(void*); } bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); ! size_t const OopChunkSizeInVoidStar = sizeof(OopChunk) / sizeof(void*); ! _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / OopChunkSizeInVoidStar; ! size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / OopChunkSizeInVoidStar; guarantee(initial_chunk_capacity <= _max_chunk_capacity, "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, _max_chunk_capacity, initial_chunk_capacity); --- 164,183 ---- return true; } size_t G1CMMarkStack::capacity_alignment() { ! return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); } bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); ! size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); ! _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; ! size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; guarantee(initial_chunk_capacity <= _max_chunk_capacity, "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, _max_chunk_capacity, initial_chunk_capacity);
*** 209,261 **** } } G1CMMarkStack::~G1CMMarkStack() { if (_base != NULL) { ! MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity); } } ! void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) { elem->next = *list; *list = elem; } ! void G1CMMarkStack::add_chunk_to_chunk_list(OopChunk* elem) { MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); add_chunk_to_list(&_chunk_list, elem); _chunks_in_chunk_list++; } ! void G1CMMarkStack::add_chunk_to_free_list(OopChunk* elem) { MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); add_chunk_to_list(&_free_list, elem); } ! G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) { ! OopChunk* result = *list; if (result != NULL) { *list = (*list)->next; } return result; } ! G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); ! OopChunk* result = remove_chunk_from_list(&_chunk_list); if (result != NULL) { _chunks_in_chunk_list--; } return result; } ! G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_free_list() { MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); return remove_chunk_from_list(&_free_list); } ! G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() { // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding // wraparound of _hwm. if (_hwm >= _chunk_capacity) { return NULL; --- 209,261 ---- } } G1CMMarkStack::~G1CMMarkStack() { if (_base != NULL) { ! MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity); } } ! void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { elem->next = *list; *list = elem; } ! void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); add_chunk_to_list(&_chunk_list, elem); _chunks_in_chunk_list++; } ! void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); add_chunk_to_list(&_free_list, elem); } ! G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { ! TaskQueueEntryChunk* result = *list; if (result != NULL) { *list = (*list)->next; } return result; } ! G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); ! TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); if (result != NULL) { _chunks_in_chunk_list--; } return result; } ! G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); return remove_chunk_from_list(&_free_list); } ! G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding // wraparound of _hwm. if (_hwm >= _chunk_capacity) { return NULL;
*** 264,281 **** size_t cur_idx = Atomic::add(1, &_hwm) - 1; if (cur_idx >= _chunk_capacity) { return NULL; } ! OopChunk* result = ::new (&_base[cur_idx]) OopChunk; result->next = NULL; return result; } bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { // Get a new chunk. ! OopChunk* new_chunk = remove_chunk_from_free_list(); if (new_chunk == NULL) { // Did not get a chunk from the free list. Allocate from backing memory. new_chunk = allocate_new_chunk(); } --- 264,281 ---- size_t cur_idx = Atomic::add(1, &_hwm) - 1; if (cur_idx >= _chunk_capacity) { return NULL; } ! TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; result->next = NULL; return result; } bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { // Get a new chunk. ! TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); if (new_chunk == NULL) { // Did not get a chunk from the free list. Allocate from backing memory. new_chunk = allocate_new_chunk(); }
*** 291,301 **** return true; } bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { ! OopChunk* cur = remove_chunk_from_chunk_list(); if (cur == NULL) { return false; } --- 291,301 ---- return true; } bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { ! TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); if (cur == NULL) { return false; }
*** 2007,2020 **** _phase(phase), _info(info) { } void operator()(G1TaskQueueEntry task_entry) const { ! guarantee(task_entry.is_array_slice() || task_entry.obj()->is_oop(), "Non-oop " PTR_FORMAT ", phase: %s, info: %d", p2i(task_entry.obj()), _phase, _info); ! guarantee(task_entry.is_array_slice() || !_g1h->is_in_cset(task_entry.obj()), "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", p2i(task_entry.obj()), _phase, _info); } }; --- 2007,2024 ---- _phase(phase), _info(info) { } void operator()(G1TaskQueueEntry task_entry) const { ! if (task_entry.is_array_slice()) { ! guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); ! return; ! } ! guarantee(task_entry.obj()->is_oop(), "Non-oop " PTR_FORMAT ", phase: %s, info: %d", p2i(task_entry.obj()), _phase, _info); ! guarantee(!_g1h->is_in_cset(task_entry.obj()), "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", p2i(task_entry.obj()), _phase, _info); } };
*** 2206,2216 **** assert(addr >= _task->finger(), "invariant"); // We move that task's local finger along. _task->move_finger_to(addr); ! _task->scan_object(oop(addr)); // we only partially drain the local queue and global stack _task->drain_local_queue(true); _task->drain_global_stack(true); // if the has_aborted flag has been raised, we need to bail out of --- 2210,2220 ---- assert(addr >= _task->finger(), "invariant"); // We move that task's local finger along. _task->move_finger_to(addr); ! _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); // we only partially drain the local queue and global stack _task->drain_local_queue(true); _task->drain_global_stack(true); // if the has_aborted flag has been raised, we need to bail out of
*** 2462,2479 **** } else { target_size = 0; } if (_task_queue->size() > target_size) { ! G1TaskQueueEntry obj; ! bool ret = _task_queue->pop_local(obj); while (ret) { ! scan_object(obj); if (_task_queue->size() <= target_size || has_aborted()) { ret = false; } else { ! ret = _task_queue->pop_local(obj); } } } } --- 2466,2483 ---- } else { target_size = 0; } if (_task_queue->size() > target_size) { ! G1TaskQueueEntry entry; ! bool ret = _task_queue->pop_local(entry); while (ret) { ! scan_task_entry(entry); if (_task_queue->size() <= target_size || has_aborted()) { ret = false; } else { ! ret = _task_queue->pop_local(entry); } } } }
*** 2874,2886 **** // We cannot check whether the global stack is empty, since other // tasks might be pushing objects to it concurrently. assert(_cm->out_of_regions() && _task_queue->size() == 0, "only way to reach here"); while (!has_aborted()) { ! G1TaskQueueEntry obj; ! if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { ! scan_object(obj); // And since we're towards the end, let's totally drain the // local queue and global stack. drain_local_queue(false); drain_global_stack(false); --- 2878,2890 ---- // We cannot check whether the global stack is empty, since other // tasks might be pushing objects to it concurrently. assert(_cm->out_of_regions() && _task_queue->size() == 0, "only way to reach here"); while (!has_aborted()) { ! G1TaskQueueEntry entry; ! if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) { ! scan_task_entry(entry); // And since we're towards the end, let's totally drain the // local queue and global stack. drain_local_queue(false); drain_global_stack(false);
< prev index next >