93 #pragma warning(pop)
94 #endif
95
96 typedef GenericTaskQueue<G1TaskQueueEntry, mtGC> G1CMTaskQueue;
97 typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
98
99 // Closure used by CM during concurrent reference discovery
100 // and reference processing (during remarking) to determine
101 // if a particular object is alive. It is primarily used
102 // to determine if referents of discovered reference objects
103 // are alive. An instance is also embedded into the
104 // reference processor as the _is_alive_non_header field
105 class G1CMIsAliveClosure: public BoolObjectClosure {
106 G1CollectedHeap* _g1;
107 public:
108 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
109
110 bool do_object_b(oop obj);
111 };
112
113 // A generic CM bit map. This is essentially a wrapper around the BitMap
114 // class, with one bit per (1<<_shifter) HeapWords.
115
116 class G1CMBitMapRO VALUE_OBJ_CLASS_SPEC {
117 protected:
118 HeapWord* _bmStartWord; // base address of range covered by map
119 size_t _bmWordSize; // map size (in #HeapWords covered)
120 const int _shifter; // map to char or bit
121 BitMapView _bm; // the bit map itself
122
123 public:
124 // constructor
125 G1CMBitMapRO(int shifter);
126
127 // inquiries
128 HeapWord* startWord() const { return _bmStartWord; }
129 // the following is one past the last word in space
130 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
131
132 // read marks
133
134 bool isMarked(HeapWord* addr) const {
135 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
136 "outside underlying space?");
137 return _bm.at(heapWordToOffset(addr));
138 }
139
140 // iteration
141 inline bool iterate(BitMapClosure* cl, MemRegion mr);
142
143 // Return the address corresponding to the next marked bit at or after
144 // "addr", and before "limit", if "limit" is non-NULL. If there is no
145 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
146 HeapWord* getNextMarkedWordAddress(const HeapWord* addr,
147 const HeapWord* limit = NULL) const;
148
149 // conversion utilities
150 HeapWord* offsetToHeapWord(size_t offset) const {
151 return _bmStartWord + (offset << _shifter);
152 }
153 size_t heapWordToOffset(const HeapWord* addr) const {
154 return pointer_delta(addr, _bmStartWord) >> _shifter;
155 }
156
157 // The argument addr should be the start address of a valid object
158 inline HeapWord* nextObject(HeapWord* addr);
159
160 void print_on_error(outputStream* st, const char* prefix) const;
161
162 // debugging
163 NOT_PRODUCT(bool covers(MemRegion rs) const;)
164 };
165
166 class G1CMBitMapMappingChangedListener : public G1MappingChangedListener {
167 private:
168 G1CMBitMap* _bm;
169 public:
170 G1CMBitMapMappingChangedListener() : _bm(NULL) {}
171
172 void set_bitmap(G1CMBitMap* bm) { _bm = bm; }
173
174 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
175 };
176
177 class G1CMBitMap : public G1CMBitMapRO {
178 private:
179 G1CMBitMapMappingChangedListener _listener;
180
181 public:
182 static size_t compute_size(size_t heap_size);
183 // Returns the amount of bytes on the heap between two marks in the bitmap.
184 static size_t mark_distance();
185 // Returns how many bytes (or bits) of the heap a single byte (or bit) of the
186 // mark bitmap corresponds to. This is the same as the mark distance above.
187 static size_t heap_map_factor() {
188 return mark_distance();
189 }
190
191 G1CMBitMap() : G1CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
192
193 // Initializes the underlying BitMap to cover the given area.
194 void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
195
196 // Write marks.
197 inline void mark(HeapWord* addr);
198 inline void clear(HeapWord* addr);
199 inline bool parMark(HeapWord* addr);
200
201 void clear_range(MemRegion mr);
202 };
203
204 // Represents the overflow mark stack used by concurrent marking.
205 //
206 // Stores oops in a huge buffer in virtual memory that is always fully committed.
207 // Resizing may only happen during a STW pause when the stack is empty.
208 //
209 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
210 // stack memory is split into evenly sized chunks of oops. Users can only
211 // add or remove entries on that basis.
212 // Chunks are filled in increasing address order. Not completely filled chunks
213 // have a NULL element as a terminating element.
214 //
215 // Every chunk has a header containing a single pointer element used for memory
216 // management. This wastes some space, but is negligible (< .1% with current sizing).
217 //
218 // Memory management is done using a mix of tracking a high water-mark indicating
219 // that all chunks at a lower address are valid chunks, and a singly linked free
379 friend class G1CMTask;
380
381 protected:
382 ConcurrentMarkThread* _cmThread; // The thread doing the work
383 G1CollectedHeap* _g1h; // The heap
384 uint _parallel_marking_threads; // The number of marking
385 // threads we're using
386 uint _max_parallel_marking_threads; // Max number of marking
387 // threads we'll ever use
388 double _sleep_factor; // How much we have to sleep, with
389 // respect to the work we just did, to
390 // meet the marking overhead goal
391 double _marking_task_overhead; // Marking target overhead for
392 // a single task
393
394 FreeRegionList _cleanup_list;
395
396 // Concurrent marking support structures
397 G1CMBitMap _markBitMap1;
398 G1CMBitMap _markBitMap2;
399 G1CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap
400 G1CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap
401
402 // Heap bounds
403 HeapWord* _heap_start;
404 HeapWord* _heap_end;
405
406 // Root region tracking and claiming
407 G1CMRootRegions _root_regions;
408
409 // For gray objects
410 G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
411 HeapWord* volatile _finger; // The global finger, region aligned,
412 // always points to the end of the
413 // last claimed region
414
415 // Marking tasks
416 uint _max_worker_id;// Maximum worker id
417 uint _active_tasks; // Task num currently active
418 G1CMTask** _tasks; // Task queue array (max_worker_id len)
419 G1CMTaskQueueSet* _task_queues; // Task queue set
609 _accum_task_vtime[i] += vtime;
610 }
611
612 double all_task_accum_vtime() {
613 double ret = 0.0;
614 for (uint i = 0; i < _max_worker_id; ++i)
615 ret += _accum_task_vtime[i];
616 return ret;
617 }
618
619 // Attempts to steal an object from the task queues of other tasks
620 bool try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry);
621
622 G1ConcurrentMark(G1CollectedHeap* g1h,
623 G1RegionToSpaceMapper* prev_bitmap_storage,
624 G1RegionToSpaceMapper* next_bitmap_storage);
625 ~G1ConcurrentMark();
626
627 ConcurrentMarkThread* cmThread() { return _cmThread; }
628
629 G1CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
630 G1CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
631
632 // Returns the number of GC threads to be used in a concurrent
633 // phase based on the number of GC threads being used in a STW
634 // phase.
635 uint scale_parallel_threads(uint n_par_threads);
636
637 // Calculates the number of GC threads to be used in a concurrent phase.
638 uint calc_parallel_marking_threads();
639
640 // The following three are interaction between CM and
641 // G1CollectedHeap
642
643 // This notifies CM that a root during initial-mark needs to be
644 // grayed. It is MT-safe. hr is the region that
645 // contains the object and it's passed optionally from callers who
646 // might already have it (no point in recalculating it).
647 inline void grayRoot(oop obj,
648 HeapRegion* hr = NULL);
649
|
93 #pragma warning(pop)
94 #endif
95
96 typedef GenericTaskQueue<G1TaskQueueEntry, mtGC> G1CMTaskQueue;
97 typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
98
99 // Closure used by CM during concurrent reference discovery
100 // and reference processing (during remarking) to determine
101 // if a particular object is alive. It is primarily used
102 // to determine if referents of discovered reference objects
103 // are alive. An instance is also embedded into the
104 // reference processor as the _is_alive_non_header field
105 class G1CMIsAliveClosure: public BoolObjectClosure {
106 G1CollectedHeap* _g1;
107 public:
108 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
109
110 bool do_object_b(oop obj);
111 };
112
113 // Closure for iteration over bitmaps
114 class G1CMBitMapClosure VALUE_OBJ_CLASS_SPEC {
115 private:
116 G1ConcurrentMark* const _cm;
117 G1CMTask* const _task;
118 public:
119 G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm) : _task(task), _cm(cm) { }
120
121 bool do_addr(HeapWord* const addr);
122 };
123
124 class G1CMBitMapMappingChangedListener : public G1MappingChangedListener {
125 private:
126 G1CMBitMap* _bm;
127 public:
128 G1CMBitMapMappingChangedListener() : _bm(NULL) {}
129
130 void set_bitmap(G1CMBitMap* bm) { _bm = bm; }
131
132 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
133 };
134
135 // A generic mark bitmap for concurrent marking. This is essentially a wrapper
136 // around the BitMap class that is based on HeapWords, with one bit per (1 << _shifter) HeapWords.
137 class G1CMBitMap VALUE_OBJ_CLASS_SPEC {
138 private:
139 MemRegion _covered; // The heap area covered by this bitmap.
140
141 const int _shifter; // Shift amount from heap index to bit index in the bitmap.
142
143 BitMapView _bm; // The actual bitmap.
144
145 G1CMBitMapMappingChangedListener _listener;
146
147 inline void check_mark(HeapWord* addr) NOT_DEBUG_RETURN;
148
149 // Convert from bit offset to address.
150 HeapWord* offset_to_addr(size_t offset) const {
151 return _covered.start() + (offset << _shifter);
152 }
153 // Convert from address to bit offset.
154 size_t addr_to_offset(const HeapWord* addr) const {
155 return pointer_delta(addr, _covered.start()) >> _shifter;
156 }
157 public:
158 static size_t compute_size(size_t heap_size);
159 // Returns the amount of bytes on the heap between two marks in the bitmap.
160 static size_t mark_distance();
161 // Returns how many bytes (or bits) of the heap a single byte (or bit) of the
162 // mark bitmap corresponds to. This is the same as the mark distance above.
163 static size_t heap_map_factor() {
164 return mark_distance();
165 }
166
167 G1CMBitMap() : _covered(), _bm(), _shifter(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
168
169 // Initializes the underlying BitMap to cover the given area.
170 void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
171
172 // read marks
173 bool is_marked(HeapWord* addr) const {
174 assert(_covered.contains(addr),
175 "Address " PTR_FORMAT " is outside underlying space from " PTR_FORMAT " to " PTR_FORMAT,
176 p2i(addr), p2i(_covered.start()), p2i(_covered.end()));
177 return _bm.at(addr_to_offset(addr));
178 }
179
180 // Apply the closure to the addresses that correspond to marked bits in the bitmap.
181 inline bool iterate(G1CMBitMapClosure* cl, MemRegion mr);
182
183 // Return the address corresponding to the next marked bit at or after
184 // "addr", and before "limit", if "limit" is non-NULL. If there is no
185 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
186 inline HeapWord* get_next_marked_addr(const HeapWord* addr,
187 const HeapWord* limit) const;
188
189 // The argument addr should be the start address of a valid object
190 inline HeapWord* addr_after_obj(HeapWord* addr);
191
192 void print_on_error(outputStream* st, const char* prefix) const;
193
194 // Write marks.
195 inline void mark(HeapWord* addr);
196 inline void clear(HeapWord* addr);
197 inline bool par_mark(HeapWord* addr);
198
199 void clear_range(MemRegion mr);
200 };
201
202 // Represents the overflow mark stack used by concurrent marking.
203 //
204 // Stores oops in a huge buffer in virtual memory that is always fully committed.
205 // Resizing may only happen during a STW pause when the stack is empty.
206 //
207 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
208 // stack memory is split into evenly sized chunks of oops. Users can only
209 // add or remove entries on that basis.
210 // Chunks are filled in increasing address order. Not completely filled chunks
211 // have a NULL element as a terminating element.
212 //
213 // Every chunk has a header containing a single pointer element used for memory
214 // management. This wastes some space, but is negligible (< .1% with current sizing).
215 //
216 // Memory management is done using a mix of tracking a high water-mark indicating
217 // that all chunks at a lower address are valid chunks, and a singly linked free
377 friend class G1CMTask;
378
379 protected:
380 ConcurrentMarkThread* _cmThread; // The thread doing the work
381 G1CollectedHeap* _g1h; // The heap
382 uint _parallel_marking_threads; // The number of marking
383 // threads we're using
384 uint _max_parallel_marking_threads; // Max number of marking
385 // threads we'll ever use
386 double _sleep_factor; // How much we have to sleep, with
387 // respect to the work we just did, to
388 // meet the marking overhead goal
389 double _marking_task_overhead; // Marking target overhead for
390 // a single task
391
392 FreeRegionList _cleanup_list;
393
394 // Concurrent marking support structures
395 G1CMBitMap _markBitMap1;
396 G1CMBitMap _markBitMap2;
397 G1CMBitMap* _prevMarkBitMap; // Completed mark bitmap
398 G1CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap
399
400 // Heap bounds
401 HeapWord* _heap_start;
402 HeapWord* _heap_end;
403
404 // Root region tracking and claiming
405 G1CMRootRegions _root_regions;
406
407 // For gray objects
408 G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
409 HeapWord* volatile _finger; // The global finger, region aligned,
410 // always points to the end of the
411 // last claimed region
412
413 // Marking tasks
414 uint _max_worker_id;// Maximum worker id
415 uint _active_tasks; // Task num currently active
416 G1CMTask** _tasks; // Task queue array (max_worker_id len)
417 G1CMTaskQueueSet* _task_queues; // Task queue set
607 _accum_task_vtime[i] += vtime;
608 }
609
610 double all_task_accum_vtime() {
611 double ret = 0.0;
612 for (uint i = 0; i < _max_worker_id; ++i)
613 ret += _accum_task_vtime[i];
614 return ret;
615 }
616
617 // Attempts to steal an object from the task queues of other tasks
618 bool try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry);
619
620 G1ConcurrentMark(G1CollectedHeap* g1h,
621 G1RegionToSpaceMapper* prev_bitmap_storage,
622 G1RegionToSpaceMapper* next_bitmap_storage);
623 ~G1ConcurrentMark();
624
625 ConcurrentMarkThread* cmThread() { return _cmThread; }
626
627 const G1CMBitMap* const prevMarkBitMap() const { return _prevMarkBitMap; }
628 G1CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
629
630 // Returns the number of GC threads to be used in a concurrent
631 // phase based on the number of GC threads being used in a STW
632 // phase.
633 uint scale_parallel_threads(uint n_par_threads);
634
635 // Calculates the number of GC threads to be used in a concurrent phase.
636 uint calc_parallel_marking_threads();
637
638 // The following three are interaction between CM and
639 // G1CollectedHeap
640
641 // This notifies CM that a root during initial-mark needs to be
642 // grayed. It is MT-safe. hr is the region that
643 // contains the object and it's passed optionally from callers who
644 // might already have it (no point in recalculating it).
645 inline void grayRoot(oop obj,
646 HeapRegion* hr = NULL);
647
|