219 }
220
221 inline bool scanned_block_is_obj(const HeapWord* addr) const {
222 return true; // Always true, since scan_limit is top
223 }
224
225 inline size_t scanned_block_size(const HeapWord* addr) const {
226 return HeapRegion::block_size(addr); // Avoid virtual call
227 }
228
229 protected:
230 // The index of this region in the heap region sequence.
231 uint _hrm_index;
232
233 AllocationContext_t _allocation_context;
234
235 HeapRegionType _type;
236
237 // For a humongous region, region in which it starts.
238 HeapRegion* _humongous_start_region;
239 // True iff the region is in current collection_set.
240 bool _in_collection_set;
241
242 // True iff an attempt to evacuate an object in the region failed.
243 bool _evacuation_failed;
244
245 // A heap region may be a member one of a number of special subsets, each
246 // represented as linked lists through the field below. Currently, there
247 // is only one set:
248 // The collection set.
249 HeapRegion* _next_in_special_set;
250
251 // next region in the young "generation" region set
252 HeapRegion* _next_young_region;
253
254 // Next region whose cards need cleaning
255 HeapRegion* _next_dirty_cards_region;
256
257 // Fields used by the HeapRegionSetBase class and subclasses.
258 HeapRegion* _next;
259 HeapRegion* _prev;
260 #ifdef ASSERT
470 // series and an single allocation moved its top to new_top. This
471 // ensures that the space (capacity / allocated) taken up by all
472 // humongous regions can be calculated by just looking at the
473 // "starts humongous" regions and by ignoring the "continues
474 // humongous" regions.
475 void set_starts_humongous(HeapWord* new_top, HeapWord* new_end);
476
477 // Makes the current region be a "continues humongous'
478 // region. first_hr is the "start humongous" region of the series
479 // which this region will be part of.
480 void set_continues_humongous(HeapRegion* first_hr);
481
482 // Unsets the humongous-related fields on the region.
483 void clear_humongous();
484
485 // If the region has a remembered set, return a pointer to it.
486 HeapRegionRemSet* rem_set() const {
487 return _rem_set;
488 }
489
490 // True iff the region is in current collection_set.
491 bool in_collection_set() const {
492 return _in_collection_set;
493 }
494 void set_in_collection_set(bool b) {
495 _in_collection_set = b;
496 }
497 HeapRegion* next_in_collection_set() {
498 assert(in_collection_set(), "should only invoke on member of CS.");
499 assert(_next_in_special_set == NULL ||
500 _next_in_special_set->in_collection_set(),
501 "Malformed CS.");
502 return _next_in_special_set;
503 }
504 void set_next_in_collection_set(HeapRegion* r) {
505 assert(in_collection_set(), "should only invoke on member of CS.");
506 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
507 _next_in_special_set = r;
508 }
509
510 void set_allocation_context(AllocationContext_t context) {
511 _allocation_context = context;
512 }
513
514 AllocationContext_t allocation_context() const {
515 return _allocation_context;
516 }
|
219 }
220
221 inline bool scanned_block_is_obj(const HeapWord* addr) const {
222 return true; // Always true, since scan_limit is top
223 }
224
225 inline size_t scanned_block_size(const HeapWord* addr) const {
226 return HeapRegion::block_size(addr); // Avoid virtual call
227 }
228
229 protected:
230 // The index of this region in the heap region sequence.
231 uint _hrm_index;
232
233 AllocationContext_t _allocation_context;
234
235 HeapRegionType _type;
236
237 // For a humongous region, region in which it starts.
238 HeapRegion* _humongous_start_region;
239
240 // True iff an attempt to evacuate an object in the region failed.
241 bool _evacuation_failed;
242
243 // A heap region may be a member one of a number of special subsets, each
244 // represented as linked lists through the field below. Currently, there
245 // is only one set:
246 // The collection set.
247 HeapRegion* _next_in_special_set;
248
249 // next region in the young "generation" region set
250 HeapRegion* _next_young_region;
251
252 // Next region whose cards need cleaning
253 HeapRegion* _next_dirty_cards_region;
254
255 // Fields used by the HeapRegionSetBase class and subclasses.
256 HeapRegion* _next;
257 HeapRegion* _prev;
258 #ifdef ASSERT
468 // series and an single allocation moved its top to new_top. This
469 // ensures that the space (capacity / allocated) taken up by all
470 // humongous regions can be calculated by just looking at the
471 // "starts humongous" regions and by ignoring the "continues
472 // humongous" regions.
473 void set_starts_humongous(HeapWord* new_top, HeapWord* new_end);
474
475 // Makes the current region be a "continues humongous'
476 // region. first_hr is the "start humongous" region of the series
477 // which this region will be part of.
478 void set_continues_humongous(HeapRegion* first_hr);
479
480 // Unsets the humongous-related fields on the region.
481 void clear_humongous();
482
483 // If the region has a remembered set, return a pointer to it.
484 HeapRegionRemSet* rem_set() const {
485 return _rem_set;
486 }
487
488 bool in_collection_set() const;
489
490 HeapRegion* next_in_collection_set() {
491 assert(in_collection_set(), "should only invoke on member of CS.");
492 assert(_next_in_special_set == NULL ||
493 _next_in_special_set->in_collection_set(),
494 "Malformed CS.");
495 return _next_in_special_set;
496 }
497 void set_next_in_collection_set(HeapRegion* r) {
498 assert(in_collection_set(), "should only invoke on member of CS.");
499 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
500 _next_in_special_set = r;
501 }
502
503 void set_allocation_context(AllocationContext_t context) {
504 _allocation_context = context;
505 }
506
507 AllocationContext_t allocation_context() const {
508 return _allocation_context;
509 }
|