10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_HEAPREGION_HPP
26 #define SHARE_VM_GC_G1_HEAPREGION_HPP
27
28 #include "gc/g1/g1AllocationContext.hpp"
29 #include "gc/g1/g1BlockOffsetTable.hpp"
30 #include "gc/g1/heapRegionType.hpp"
31 #include "gc/g1/survRateGroup.hpp"
32 #include "gc/shared/ageTable.hpp"
33 #include "gc/shared/spaceDecorator.hpp"
34 #include "utilities/macros.hpp"
35
36 // A HeapRegion is the smallest piece of a G1CollectedHeap that
37 // can be collected independently.
38
39 // NOTE: Although a HeapRegion is a Space, its
40 // Space::initDirtyCardClosure method must not be called.
41 // The problem is that the existence of this method breaks
42 // the independence of barrier sets from remembered sets.
43 // The solution is to remove this method from the definition
44 // of a Space.
45
46 // Each heap region is self contained. top() and end() can never
47 // be set beyond the end of the region. For humongous objects,
48 // the first region is a StartsHumongous region. If the humongous
49 // object is larger than a heap region, the following regions will
226
227 // The remembered set for this region.
228 // (Might want to make this "inline" later, to avoid some alloc failure
229 // issues.)
230 HeapRegionRemSet* _rem_set;
231
232 // Auxiliary functions for scan_and_forward support.
233 // See comments for CompactibleSpace for more information.
234 inline HeapWord* scan_limit() const {
235 return top();
236 }
237
238 inline bool scanned_block_is_obj(const HeapWord* addr) const {
239 return true; // Always true, since scan_limit is top
240 }
241
242 inline size_t scanned_block_size(const HeapWord* addr) const {
243 return HeapRegion::block_size(addr); // Avoid virtual call
244 }
245
246 protected:
247 // The index of this region in the heap region sequence.
248 uint _hrm_index;
249
250 AllocationContext_t _allocation_context;
251
252 HeapRegionType _type;
253
254 // For a humongous region, region in which it starts.
255 HeapRegion* _humongous_start_region;
256
257 // True iff an attempt to evacuate an object in the region failed.
258 bool _evacuation_failed;
259
260 // A heap region may be a member one of a number of special subsets, each
261 // represented as linked lists through the field below. Currently, there
262 // is only one set:
263 // The collection set.
264 HeapRegion* _next_in_special_set;
265
410 // since it will also be reclaimed if we collect the region.
411 size_t reclaimable_bytes() {
412 size_t known_live_bytes = live_bytes();
413 assert(known_live_bytes <= capacity(), "sanity");
414 return capacity() - known_live_bytes;
415 }
416
417 // An upper bound on the number of live bytes in the region.
418 size_t max_live_bytes() { return used() - garbage_bytes(); }
419
420 void add_to_marked_bytes(size_t incr_bytes) {
421 _next_marked_bytes = _next_marked_bytes + incr_bytes;
422 }
423
424 void zero_marked_bytes() {
425 _prev_marked_bytes = _next_marked_bytes = 0;
426 }
427
428 const char* get_type_str() const { return _type.get_str(); }
429 const char* get_short_type_str() const { return _type.get_short_str(); }
430
431 bool is_free() const { return _type.is_free(); }
432
433 bool is_young() const { return _type.is_young(); }
434 bool is_eden() const { return _type.is_eden(); }
435 bool is_survivor() const { return _type.is_survivor(); }
436
437 bool is_humongous() const { return _type.is_humongous(); }
438 bool is_starts_humongous() const { return _type.is_starts_humongous(); }
439 bool is_continues_humongous() const { return _type.is_continues_humongous(); }
440
441 bool is_old() const { return _type.is_old(); }
442
443 // A pinned region contains objects which are not moved by garbage collections.
444 // Humongous regions and archive regions are pinned.
445 bool is_pinned() const { return _type.is_pinned(); }
446
447 // An archive region is a pinned region, also tagged as old, which
448 // should not be marked during mark/sweep. This allows the address
449 // space to be shared by JVM instances.
620 assert( surv_rate_group != NULL, "pre-condition" );
621 assert( _surv_rate_group == NULL, "pre-condition" );
622 assert( is_young(), "pre-condition" );
623
624 _surv_rate_group = surv_rate_group;
625 _age_index = surv_rate_group->next_age_index();
626 }
627
628 void uninstall_surv_rate_group() {
629 if (_surv_rate_group != NULL) {
630 assert( _age_index > -1, "pre-condition" );
631 assert( is_young(), "pre-condition" );
632
633 _surv_rate_group = NULL;
634 _age_index = -1;
635 } else {
636 assert( _age_index == -1, "pre-condition" );
637 }
638 }
639
640 void set_free() { _type.set_free(); }
641
642 void set_eden() { _type.set_eden(); }
643 void set_eden_pre_gc() { _type.set_eden_pre_gc(); }
644 void set_survivor() { _type.set_survivor(); }
645
646 void set_old() { _type.set_old(); }
647
648 void set_archive() { _type.set_archive(); }
649
650 // Determine if an object has been allocated since the last
651 // mark performed by the collector. This returns true iff the object
652 // is within the unmarked area of the region.
653 bool obj_allocated_since_prev_marking(oop obj) const {
654 return (HeapWord *) obj >= prev_top_at_mark_start();
655 }
656 bool obj_allocated_since_next_marking(oop obj) const {
657 return (HeapWord *) obj >= next_top_at_mark_start();
658 }
659
660 // Returns the "evacuation_failed" property of the region.
661 bool evacuation_failed() { return _evacuation_failed; }
662
663 // Sets the "evacuation_failed" property of the region.
664 void set_evacuation_failed(bool b) {
665 _evacuation_failed = b;
666
667 if (b) {
668 _next_marked_bytes = 0;
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_HEAPREGION_HPP
26 #define SHARE_VM_GC_G1_HEAPREGION_HPP
27
28 #include "gc/g1/g1AllocationContext.hpp"
29 #include "gc/g1/g1BlockOffsetTable.hpp"
30 #include "gc/g1/g1HeapRegionTraceType.hpp"
31 #include "gc/g1/heapRegionTracer.hpp"
32 #include "gc/g1/heapRegionType.hpp"
33 #include "gc/g1/survRateGroup.hpp"
34 #include "gc/shared/ageTable.hpp"
35 #include "gc/shared/spaceDecorator.hpp"
36 #include "utilities/macros.hpp"
37
38 // A HeapRegion is the smallest piece of a G1CollectedHeap that
39 // can be collected independently.
40
41 // NOTE: Although a HeapRegion is a Space, its
42 // Space::initDirtyCardClosure method must not be called.
43 // The problem is that the existence of this method breaks
44 // the independence of barrier sets from remembered sets.
45 // The solution is to remove this method from the definition
46 // of a Space.
47
48 // Each heap region is self contained. top() and end() can never
49 // be set beyond the end of the region. For humongous objects,
50 // the first region is a StartsHumongous region. If the humongous
51 // object is larger than a heap region, the following regions will
228
229 // The remembered set for this region.
230 // (Might want to make this "inline" later, to avoid some alloc failure
231 // issues.)
232 HeapRegionRemSet* _rem_set;
233
234 // Auxiliary functions for scan_and_forward support.
235 // See comments for CompactibleSpace for more information.
236 inline HeapWord* scan_limit() const {
237 return top();
238 }
239
240 inline bool scanned_block_is_obj(const HeapWord* addr) const {
241 return true; // Always true, since scan_limit is top
242 }
243
244 inline size_t scanned_block_size(const HeapWord* addr) const {
245 return HeapRegion::block_size(addr); // Avoid virtual call
246 }
247
248 void report_region_type_change(G1HeapRegionTraceType::Type to);
249
250 protected:
251 // The index of this region in the heap region sequence.
252 uint _hrm_index;
253
254 AllocationContext_t _allocation_context;
255
256 HeapRegionType _type;
257
258 // For a humongous region, region in which it starts.
259 HeapRegion* _humongous_start_region;
260
261 // True iff an attempt to evacuate an object in the region failed.
262 bool _evacuation_failed;
263
264 // A heap region may be a member one of a number of special subsets, each
265 // represented as linked lists through the field below. Currently, there
266 // is only one set:
267 // The collection set.
268 HeapRegion* _next_in_special_set;
269
414 // since it will also be reclaimed if we collect the region.
415 size_t reclaimable_bytes() {
416 size_t known_live_bytes = live_bytes();
417 assert(known_live_bytes <= capacity(), "sanity");
418 return capacity() - known_live_bytes;
419 }
420
421 // An upper bound on the number of live bytes in the region.
422 size_t max_live_bytes() { return used() - garbage_bytes(); }
423
424 void add_to_marked_bytes(size_t incr_bytes) {
425 _next_marked_bytes = _next_marked_bytes + incr_bytes;
426 }
427
428 void zero_marked_bytes() {
429 _prev_marked_bytes = _next_marked_bytes = 0;
430 }
431
432 const char* get_type_str() const { return _type.get_str(); }
433 const char* get_short_type_str() const { return _type.get_short_str(); }
434 G1HeapRegionTraceType::Type get_trace_type() { return _type.get_trace_type(); }
435
436 bool is_free() const { return _type.is_free(); }
437
438 bool is_young() const { return _type.is_young(); }
439 bool is_eden() const { return _type.is_eden(); }
440 bool is_survivor() const { return _type.is_survivor(); }
441
442 bool is_humongous() const { return _type.is_humongous(); }
443 bool is_starts_humongous() const { return _type.is_starts_humongous(); }
444 bool is_continues_humongous() const { return _type.is_continues_humongous(); }
445
446 bool is_old() const { return _type.is_old(); }
447
448 // A pinned region contains objects which are not moved by garbage collections.
449 // Humongous regions and archive regions are pinned.
450 bool is_pinned() const { return _type.is_pinned(); }
451
452 // An archive region is a pinned region, also tagged as old, which
453 // should not be marked during mark/sweep. This allows the address
454 // space to be shared by JVM instances.
625 assert( surv_rate_group != NULL, "pre-condition" );
626 assert( _surv_rate_group == NULL, "pre-condition" );
627 assert( is_young(), "pre-condition" );
628
629 _surv_rate_group = surv_rate_group;
630 _age_index = surv_rate_group->next_age_index();
631 }
632
633 void uninstall_surv_rate_group() {
634 if (_surv_rate_group != NULL) {
635 assert( _age_index > -1, "pre-condition" );
636 assert( is_young(), "pre-condition" );
637
638 _surv_rate_group = NULL;
639 _age_index = -1;
640 } else {
641 assert( _age_index == -1, "pre-condition" );
642 }
643 }
644
645 void set_free();
646
647 void set_eden();
648 void set_eden_pre_gc();
649 void set_survivor();
650
651 void set_old();
652
653 void set_archive();
654
655 // Determine if an object has been allocated since the last
656 // mark performed by the collector. This returns true iff the object
657 // is within the unmarked area of the region.
658 bool obj_allocated_since_prev_marking(oop obj) const {
659 return (HeapWord *) obj >= prev_top_at_mark_start();
660 }
661 bool obj_allocated_since_next_marking(oop obj) const {
662 return (HeapWord *) obj >= next_top_at_mark_start();
663 }
664
665 // Returns the "evacuation_failed" property of the region.
666 bool evacuation_failed() { return _evacuation_failed; }
667
668 // Sets the "evacuation_failed" property of the region.
669 void set_evacuation_failed(bool b) {
670 _evacuation_failed = b;
671
672 if (b) {
673 _next_marked_bytes = 0;
|