237
238 // A heap region may be a member one of a number of special subsets, each
239 // represented as linked lists through the field below. Currently, there
240 // is only one set:
241 // The collection set.
242 HeapRegion* _next_in_special_set;
243
244 // next region in the young "generation" region set
245 HeapRegion* _next_young_region;
246
247 // Next region whose cards need cleaning
248 HeapRegion* _next_dirty_cards_region;
249
250 // Fields used by the HeapRegionSetBase class and subclasses.
251 HeapRegion* _next;
252 HeapRegion* _prev;
253 #ifdef ASSERT
254 HeapRegionSetBase* _containing_set;
255 #endif // ASSERT
256
257 // For parallel heapRegion traversal.
258 jint _claimed;
259
260 // We use concurrent marking to determine the amount of live data
261 // in each heap region.
262 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
263 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
264
265 // The calculated GC efficiency of the region.
266 double _gc_efficiency;
267
268 int _young_index_in_cset;
269 SurvRateGroup* _surv_rate_group;
270 int _age_index;
271
272 // The start of the unmarked area. The unmarked area extends from this
273 // word until the top and/or end of the region, and is the part
274 // of the region for which no marking was done, i.e. objects may
275 // have been allocated in this part since the last mark phase.
276 // "prev" is the top at the start of the last completed marking.
277 // "next" is the top at the start of the in-progress marking (if any.)
278 HeapWord* _prev_top_at_mark_start;
279 HeapWord* _next_top_at_mark_start;
319
320 static size_t GrainBytes;
321 static size_t GrainWords;
322 static size_t CardsPerRegion;
323
324 static size_t align_up_to_region_byte_size(size_t sz) {
325 return (sz + (size_t) GrainBytes - 1) &
326 ~((1 << (size_t) LogOfHRGrainBytes) - 1);
327 }
328
329 static size_t max_region_size();
330
331 // It sets up the heap region size (GrainBytes / GrainWords), as
332 // well as other related fields that are based on the heap region
333 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
334 // CardsPerRegion). All those fields are considered constant
335 // throughout the JVM's execution, therefore they should only be set
336 // up once during initialization time.
337 static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
338
339 enum ClaimValues {
340 InitialClaimValue = 0,
341 FinalCountClaimValue = 1,
342 NoteEndClaimValue = 2,
343 ScrubRemSetClaimValue = 3,
344 ParVerifyClaimValue = 4,
345 RebuildRSClaimValue = 5,
346 ParEvacFailureClaimValue = 6,
347 AggregateCountClaimValue = 7,
348 VerifyCountClaimValue = 8,
349 ParMarkRootClaimValue = 9
350 };
351
352 // All allocated blocks are occupied by objects in a HeapRegion
353 bool block_is_obj(const HeapWord* p) const;
354
355 // Returns the object size for all valid block starts
356 // and the amount of unallocated words if called on top()
357 size_t block_size(const HeapWord* p) const;
358
359 inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
360 inline HeapWord* allocate_no_bot_updates(size_t word_size);
361
362 // If this region is a member of a HeapRegionManager, the index in that
363 // sequence, otherwise -1.
364 uint hrm_index() const { return _hrm_index; }
365
366 // The number of bytes marked live in the region in the last marking phase.
367 size_t marked_bytes() { return _prev_marked_bytes; }
368 size_t live_bytes() {
369 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
370 }
371
664 }
665 }
666
667 void set_free() { _type.set_free(); }
668
669 void set_eden() { _type.set_eden(); }
670 void set_eden_pre_gc() { _type.set_eden_pre_gc(); }
671 void set_survivor() { _type.set_survivor(); }
672
673 void set_old() { _type.set_old(); }
674
675 // Determine if an object has been allocated since the last
676 // mark performed by the collector. This returns true iff the object
677 // is within the unmarked area of the region.
678 bool obj_allocated_since_prev_marking(oop obj) const {
679 return (HeapWord *) obj >= prev_top_at_mark_start();
680 }
681 bool obj_allocated_since_next_marking(oop obj) const {
682 return (HeapWord *) obj >= next_top_at_mark_start();
683 }
684
685 // For parallel heapRegion traversal.
686 bool claimHeapRegion(int claimValue);
687 jint claim_value() { return _claimed; }
688 // Use this carefully: only when you're sure no one is claiming...
689 void set_claim_value(int claimValue) { _claimed = claimValue; }
690
691 // Returns the "evacuation_failed" property of the region.
692 bool evacuation_failed() { return _evacuation_failed; }
693
694 // Sets the "evacuation_failed" property of the region.
695 void set_evacuation_failed(bool b) {
696 _evacuation_failed = b;
697
698 if (b) {
699 _next_marked_bytes = 0;
700 }
701 }
702
703 // Requires that "mr" be entirely within the region.
704 // Apply "cl->do_object" to all objects that intersect with "mr".
705 // If the iteration encounters an unparseable portion of the region,
706 // or if "cl->abort()" is true after a closure application,
707 // terminate the iteration and return the address of the start of the
708 // subregion that isn't done. (The two can be distinguished by querying
709 // "cl->abort()".) Return of "NULL" indicates that the iteration
|
237
238 // A heap region may be a member one of a number of special subsets, each
239 // represented as linked lists through the field below. Currently, there
240 // is only one set:
241 // The collection set.
242 HeapRegion* _next_in_special_set;
243
244 // next region in the young "generation" region set
245 HeapRegion* _next_young_region;
246
247 // Next region whose cards need cleaning
248 HeapRegion* _next_dirty_cards_region;
249
250 // Fields used by the HeapRegionSetBase class and subclasses.
251 HeapRegion* _next;
252 HeapRegion* _prev;
253 #ifdef ASSERT
254 HeapRegionSetBase* _containing_set;
255 #endif // ASSERT
256
257 // We use concurrent marking to determine the amount of live data
258 // in each heap region.
259 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
260 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
261
262 // The calculated GC efficiency of the region.
263 double _gc_efficiency;
264
265 int _young_index_in_cset;
266 SurvRateGroup* _surv_rate_group;
267 int _age_index;
268
269 // The start of the unmarked area. The unmarked area extends from this
270 // word until the top and/or end of the region, and is the part
271 // of the region for which no marking was done, i.e. objects may
272 // have been allocated in this part since the last mark phase.
273 // "prev" is the top at the start of the last completed marking.
274 // "next" is the top at the start of the in-progress marking (if any.)
275 HeapWord* _prev_top_at_mark_start;
276 HeapWord* _next_top_at_mark_start;
316
317 static size_t GrainBytes;
318 static size_t GrainWords;
319 static size_t CardsPerRegion;
320
321 static size_t align_up_to_region_byte_size(size_t sz) {
322 return (sz + (size_t) GrainBytes - 1) &
323 ~((1 << (size_t) LogOfHRGrainBytes) - 1);
324 }
325
326 static size_t max_region_size();
327
328 // It sets up the heap region size (GrainBytes / GrainWords), as
329 // well as other related fields that are based on the heap region
330 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
331 // CardsPerRegion). All those fields are considered constant
332 // throughout the JVM's execution, therefore they should only be set
333 // up once during initialization time.
334 static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
335
336 // All allocated blocks are occupied by objects in a HeapRegion
337 bool block_is_obj(const HeapWord* p) const;
338
339 // Returns the object size for all valid block starts
340 // and the amount of unallocated words if called on top()
341 size_t block_size(const HeapWord* p) const;
342
343 inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
344 inline HeapWord* allocate_no_bot_updates(size_t word_size);
345
346 // If this region is a member of a HeapRegionManager, the index in that
347 // sequence, otherwise -1.
348 uint hrm_index() const { return _hrm_index; }
349
350 // The number of bytes marked live in the region in the last marking phase.
351 size_t marked_bytes() { return _prev_marked_bytes; }
352 size_t live_bytes() {
353 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
354 }
355
648 }
649 }
650
651 void set_free() { _type.set_free(); }
652
653 void set_eden() { _type.set_eden(); }
654 void set_eden_pre_gc() { _type.set_eden_pre_gc(); }
655 void set_survivor() { _type.set_survivor(); }
656
657 void set_old() { _type.set_old(); }
658
659 // Determine if an object has been allocated since the last
660 // mark performed by the collector. This returns true iff the object
661 // is within the unmarked area of the region.
662 bool obj_allocated_since_prev_marking(oop obj) const {
663 return (HeapWord *) obj >= prev_top_at_mark_start();
664 }
665 bool obj_allocated_since_next_marking(oop obj) const {
666 return (HeapWord *) obj >= next_top_at_mark_start();
667 }
668
669 // Returns the "evacuation_failed" property of the region.
670 bool evacuation_failed() { return _evacuation_failed; }
671
672 // Sets the "evacuation_failed" property of the region.
673 void set_evacuation_failed(bool b) {
674 _evacuation_failed = b;
675
676 if (b) {
677 _next_marked_bytes = 0;
678 }
679 }
680
681 // Requires that "mr" be entirely within the region.
682 // Apply "cl->do_object" to all objects that intersect with "mr".
683 // If the iteration encounters an unparseable portion of the region,
684 // or if "cl->abort()" is true after a closure application,
685 // terminate the iteration and return the address of the start of the
686 // subregion that isn't done. (The two can be distinguished by querying
687 // "cl->abort()".) Return of "NULL" indicates that the iteration
|