< prev index next >
src/share/vm/gc/g1/g1ConcurrentMark.hpp
Print this page
rev 12666 : imported patch 8168467-use-taskentry-as-mark-stack-elem
rev 12667 : imported patch 8168467-kim-review
*** 1,7 ****
/*
! * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
--- 1,7 ----
/*
! * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*** 36,46 ****
class G1CMTask;
class G1ConcurrentMark;
class ConcurrentGCTimer;
class G1OldTracer;
class G1SurvivorRegions;
! typedef GenericTaskQueue<oop, mtGC> G1CMTaskQueue;
typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
// Closure used by CM during concurrent reference discovery
// and reference processing (during remarking) to determine
// if a particular object is alive. It is primarily used
--- 36,101 ----
class G1CMTask;
class G1ConcurrentMark;
class ConcurrentGCTimer;
class G1OldTracer;
class G1SurvivorRegions;
!
! #ifdef _MSC_VER
! #pragma warning(push)
! // warning C4522: multiple assignment operators specified
! #pragma warning(disable:4522)
! #endif
!
! // This is a container class for either an oop or a continuation address for
! // mark stack entries. Both are pushed onto the mark stack.
! class G1TaskQueueEntry VALUE_OBJ_CLASS_SPEC {
! private:
! void* _holder;
!
! static const uintptr_t ArraySliceBit = 1;
!
! G1TaskQueueEntry(oop obj) : _holder(obj) {
! assert(_holder != NULL, "Not allowed to set NULL task queue element");
! }
! G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { }
! public:
! G1TaskQueueEntry(const G1TaskQueueEntry& other) { _holder = other._holder; }
! G1TaskQueueEntry() : _holder(NULL) { }
!
! static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); }
! static G1TaskQueueEntry from_oop(oop obj) { return G1TaskQueueEntry(obj); }
!
! G1TaskQueueEntry& operator=(const G1TaskQueueEntry& t) {
! _holder = t._holder;
! return *this;
! }
!
! volatile G1TaskQueueEntry& operator=(const volatile G1TaskQueueEntry& t) volatile {
! _holder = t._holder;
! return *this;
! }
!
! oop obj() const {
! assert(!is_array_slice(), "Trying to read array slice " PTR_FORMAT " as oop", p2i(_holder));
! return (oop)_holder;
! }
!
! HeapWord* slice() const {
! assert(is_array_slice(), "Trying to read oop " PTR_FORMAT " as array slice", p2i(_holder));
! return (HeapWord*)((uintptr_t)_holder & ~ArraySliceBit);
! }
!
! bool is_oop() const { return !is_array_slice(); }
! bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; }
! bool is_null() const { return _holder == NULL; }
! };
!
! #ifdef _MSC_VER
! #pragma warning(pop)
! #endif
!
! typedef GenericTaskQueue<G1TaskQueueEntry, mtGC> G1CMTaskQueue;
typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
// Closure used by CM during concurrent reference discovery
// and reference processing (during remarking) to determine
// if a particular object is alive. It is primarily used
*** 164,212 ****
// that all chunks at a lower address are valid chunks, and a singly linked free
// list connecting all empty chunks.
class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
public:
// Number of oops that can fit in a single chunk.
! static const size_t OopsPerChunk = 1024 - 1 /* One reference for the next pointer */;
private:
! struct OopChunk {
! OopChunk* next;
! oop data[OopsPerChunk];
};
size_t _max_chunk_capacity; // Maximum number of OopChunk elements on the stack.
! OopChunk* _base; // Bottom address of allocated memory area.
size_t _chunk_capacity; // Current maximum number of OopChunk elements.
char _pad0[DEFAULT_CACHE_LINE_SIZE];
! OopChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users.
! char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
! OopChunk* volatile _chunk_list; // List of chunks currently containing data.
volatile size_t _chunks_in_chunk_list;
! char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*) - sizeof(size_t)];
volatile size_t _hwm; // High water mark within the reserved space.
char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
// Allocate a new chunk from the reserved memory, using the high water mark. Returns
// NULL if out of memory.
! OopChunk* allocate_new_chunk();
volatile bool _out_of_memory;
// Atomically add the given chunk to the list.
! void add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem);
// Atomically remove and return a chunk from the given list. Returns NULL if the
// list is empty.
! OopChunk* remove_chunk_from_list(OopChunk* volatile* list);
! void add_chunk_to_chunk_list(OopChunk* elem);
! void add_chunk_to_free_list(OopChunk* elem);
! OopChunk* remove_chunk_from_chunk_list();
! OopChunk* remove_chunk_from_free_list();
bool _should_expand;
// Resizes the mark stack to the given new capacity. Releases any previous
// memory if successful.
--- 219,267 ----
// that all chunks at a lower address are valid chunks, and a singly linked free
// list connecting all empty chunks.
class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
public:
// Number of oops that can fit in a single chunk.
! static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */;
private:
! struct TaskQueueEntryChunk {
! TaskQueueEntryChunk* next;
! G1TaskQueueEntry data[EntriesPerChunk];
};
size_t _max_chunk_capacity; // Maximum number of OopChunk elements on the stack.
! TaskQueueEntryChunk* _base; // Bottom address of allocated memory area.
size_t _chunk_capacity; // Current maximum number of OopChunk elements.
char _pad0[DEFAULT_CACHE_LINE_SIZE];
! TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users.
! char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*)];
! TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data.
volatile size_t _chunks_in_chunk_list;
! char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)];
volatile size_t _hwm; // High water mark within the reserved space.
char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
// Allocate a new chunk from the reserved memory, using the high water mark. Returns
// NULL if out of memory.
! TaskQueueEntryChunk* allocate_new_chunk();
volatile bool _out_of_memory;
// Atomically add the given chunk to the list.
! void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem);
// Atomically remove and return a chunk from the given list. Returns NULL if the
// list is empty.
! TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list);
! void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem);
! void add_chunk_to_free_list(TaskQueueEntryChunk* elem);
! TaskQueueEntryChunk* remove_chunk_from_chunk_list();
! TaskQueueEntryChunk* remove_chunk_from_free_list();
bool _should_expand;
// Resizes the mark stack to the given new capacity. Releases any previous
// memory if successful.
*** 220,240 ****
static size_t capacity_alignment();
// Allocate and initialize the mark stack with the given number of oops.
bool initialize(size_t initial_capacity, size_t max_capacity);
! // Pushes the given buffer containing at most OopsPerChunk elements on the mark
! // stack. If less than OopsPerChunk elements are to be pushed, the array must
// be terminated with a NULL.
// Returns whether the buffer contents were successfully pushed to the global mark
// stack.
! bool par_push_chunk(oop* buffer);
// Pops a chunk from this mark stack, copying them into the given buffer. This
! // chunk may contain up to OopsPerChunk elements. If there are less, the last
// element in the array is a NULL pointer.
! bool par_pop_chunk(oop* buffer);
// Return whether the chunk list is empty. Racy due to unsynchronized access to
// _chunk_list.
bool is_empty() const { return _chunk_list == NULL; }
--- 275,295 ----
static size_t capacity_alignment();
// Allocate and initialize the mark stack with the given number of oops.
bool initialize(size_t initial_capacity, size_t max_capacity);
! // Pushes the given buffer containing at most EntriesPerChunk elements on the mark
! // stack. If less than EntriesPerChunk elements are to be pushed, the array must
// be terminated with a NULL.
// Returns whether the buffer contents were successfully pushed to the global mark
// stack.
! bool par_push_chunk(G1TaskQueueEntry* buffer);
// Pops a chunk from this mark stack, copying them into the given buffer. This
! // chunk may contain up to EntriesPerChunk elements. If there are less, the last
// element in the array is a NULL pointer.
! bool par_pop_chunk(G1TaskQueueEntry* buffer);
// Return whether the chunk list is empty. Racy due to unsynchronized access to
// _chunk_list.
bool is_empty() const { return _chunk_list == NULL; }
*** 249,259 ****
// Expand the stack, typically in response to an overflow condition
void expand();
// Return the approximate number of oops on this mark stack. Racy due to
// unsynchronized access to _chunks_in_chunk_list.
! size_t size() const { return _chunks_in_chunk_list * OopsPerChunk; }
void set_empty();
// Apply Fn to every oop on the mark stack. The mark stack must not
// be modified while iterating.
--- 304,314 ----
// Expand the stack, typically in response to an overflow condition
void expand();
// Return the approximate number of oops on this mark stack. Racy due to
// unsynchronized access to _chunks_in_chunk_list.
! size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; }
void set_empty();
// Apply Fn to every oop on the mark stack. The mark stack must not
// be modified while iterating.
*** 529,546 ****
void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
public:
// Manipulation of the global mark stack.
// The push and pop operations are used by tasks for transfers
// between task-local queues and the global mark stack.
! bool mark_stack_push(oop* arr) {
if (!_global_mark_stack.par_push_chunk(arr)) {
set_has_overflown();
return false;
}
return true;
}
! bool mark_stack_pop(oop* arr) {
return _global_mark_stack.par_pop_chunk(arr);
}
size_t mark_stack_size() { return _global_mark_stack.size(); }
size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
bool mark_stack_overflow() { return _global_mark_stack.is_out_of_memory(); }
--- 584,601 ----
void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
public:
// Manipulation of the global mark stack.
// The push and pop operations are used by tasks for transfers
// between task-local queues and the global mark stack.
! bool mark_stack_push(G1TaskQueueEntry* arr) {
if (!_global_mark_stack.par_push_chunk(arr)) {
set_has_overflown();
return false;
}
return true;
}
! bool mark_stack_pop(G1TaskQueueEntry* arr) {
return _global_mark_stack.par_pop_chunk(arr);
}
size_t mark_stack_size() { return _global_mark_stack.size(); }
size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
bool mark_stack_overflow() { return _global_mark_stack.is_out_of_memory(); }
*** 571,581 ****
ret += _accum_task_vtime[i];
return ret;
}
// Attempts to steal an object from the task queues of other tasks
! bool try_stealing(uint worker_id, int* hash_seed, oop& obj);
G1ConcurrentMark(G1CollectedHeap* g1h,
G1RegionToSpaceMapper* prev_bitmap_storage,
G1RegionToSpaceMapper* next_bitmap_storage);
~G1ConcurrentMark();
--- 626,636 ----
ret += _accum_task_vtime[i];
return ret;
}
// Attempts to steal an object from the task queues of other tasks
! bool try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry);
G1ConcurrentMark(G1CollectedHeap* g1h,
G1RegionToSpaceMapper* prev_bitmap_storage,
G1RegionToSpaceMapper* next_bitmap_storage);
~G1ConcurrentMark();
*** 826,836 ****
// Test whether obj might have already been passed over by the
// mark bitmap scan, and so needs to be pushed onto the mark stack.
bool is_below_finger(oop obj, HeapWord* global_finger) const;
! template<bool scan> void process_grey_object(oop obj);
public:
// Apply the closure on the given area of the objArray. Return the number of words
// scanned.
inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
// It resets the task; it should be called right at the beginning of
--- 881,891 ----
// Test whether obj might have already been passed over by the
// mark bitmap scan, and so needs to be pushed onto the mark stack.
bool is_below_finger(oop obj, HeapWord* global_finger) const;
! template<bool scan> void process_grey_task_entry(G1TaskQueueEntry task_entry);
public:
// Apply the closure on the given area of the objArray. Return the number of words
// scanned.
inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
// It resets the task; it should be called right at the beginning of
*** 891,904 ****
// e.g. obj is below its containing region's NTAMS.
// Precondition: obj is a valid heap object.
inline void deal_with_reference(oop obj);
// It scans an object and visits its children.
! inline void scan_object(oop obj);
// It pushes an object on the local queue.
! inline void push(oop obj);
// Move entries to the global stack.
void move_entries_to_global_stack();
// Move entries from the global stack, return true if we were successful to do so.
bool get_entries_from_global_stack();
--- 946,959 ----
// e.g. obj is below its containing region's NTAMS.
// Precondition: obj is a valid heap object.
inline void deal_with_reference(oop obj);
// It scans an object and visits its children.
! inline void scan_task_entry(G1TaskQueueEntry task_entry);
// It pushes an object on the local queue.
! inline void push(G1TaskQueueEntry task_entry);
// Move entries to the global stack.
void move_entries_to_global_stack();
// Move entries from the global stack, return true if we were successful to do so.
bool get_entries_from_global_stack();
< prev index next >