< prev index next >

src/hotspot/share/gc/g1/heapRegion.hpp

Print this page




 454   // Unsets the humongous-related fields on the region.
 455   void clear_humongous();
 456 
 457   // If the region has a remembered set, return a pointer to it.
 458   HeapRegionRemSet* rem_set() const {
 459     return _rem_set;
 460   }
 461 
 462   inline bool in_collection_set() const;
 463 
 464   // Methods used by the HeapRegionSetBase class and subclasses.
 465 
 466   // Getter and setter for the next and prev fields used to link regions into
 467   // linked lists.
 468   void set_next(HeapRegion* next) { _next = next; }
 469   HeapRegion* next()              { return _next; }
 470 
 471   void set_prev(HeapRegion* prev) { _prev = prev; }
 472   HeapRegion* prev()              { return _prev; }
 473 


 474   // Every region added to a set is tagged with a reference to that
 475   // set. This is used for doing consistency checking to make sure that
 476   // the contents of a set are as they should be and it's only
 477   // available in non-product builds.
 478 #ifdef ASSERT
 479   void set_containing_set(HeapRegionSetBase* containing_set) {
 480     assert((containing_set == NULL && _containing_set != NULL) ||
 481            (containing_set != NULL && _containing_set == NULL),
 482            "containing_set: " PTR_FORMAT " "
 483            "_containing_set: " PTR_FORMAT,
 484            p2i(containing_set), p2i(_containing_set));
 485 
 486     _containing_set = containing_set;
 487   }
 488 
 489   HeapRegionSetBase* containing_set() { return _containing_set; }
 490 #else // ASSERT
 491   void set_containing_set(HeapRegionSetBase* containing_set) { }
 492 
 493   // containing_set() is only used in asserts so there's no reason
 494   // to provide a dummy version of it.
 495 #endif // ASSERT
 496 
 497 
 498   // Reset the HeapRegion to default values.
 499   // If skip_remset is true, do not clear the remembered set.
 500   // If clear_space is true, clear the HeapRegion's memory.
 501   // If locked is true, assume we are the only thread doing this operation.


 581     if (_surv_rate_group != NULL) {
 582       assert(_age_index > -1, "pre-condition");
 583       assert(is_young(), "pre-condition");
 584 
 585       _surv_rate_group = NULL;
 586       _age_index = -1;
 587     } else {
 588       assert(_age_index == -1, "pre-condition");
 589     }
 590   }
 591 
 592   // Determine if an object has been allocated since the last
 593   // mark performed by the collector. This returns true iff the object
 594   // is within the unmarked area of the region.
 595   bool obj_allocated_since_prev_marking(oop obj) const {
 596     return (HeapWord *) obj >= prev_top_at_mark_start();
 597   }
 598   bool obj_allocated_since_next_marking(oop obj) const {
 599     return (HeapWord *) obj >= next_top_at_mark_start();
 600   }



 601 
 602   // Iterate over the objects overlapping the given memory region, applying cl
 603   // to all references in the region.  This is a helper for
 604   // G1RemSet::refine_card*, and is tightly coupled with them.
 605   // mr must not be empty. Must be trimmed to the allocated/parseable space in this region.
 606   // This region must be old or humongous.
 607   // Returns the next unscanned address if the designated objects were successfully
 608   // processed, NULL if an unparseable part of the heap was encountered (That should
 609   // only happen when invoked concurrently with the mutator).
 610   template <bool is_gc_active, class Closure>
 611   inline HeapWord* oops_on_memregion_seq_iterate_careful(MemRegion mr, Closure* cl);
 612 
 613   size_t recorded_rs_length() const        { return _recorded_rs_length; }
 614   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
 615 
 616   void set_recorded_rs_length(size_t rs_length) {
 617     _recorded_rs_length = rs_length;
 618   }
 619 
 620   void set_predicted_elapsed_time_ms(double ms) {




 454   // Unsets the humongous-related fields on the region.
 455   void clear_humongous();
 456 
 457   // If the region has a remembered set, return a pointer to it.
 458   HeapRegionRemSet* rem_set() const {
 459     return _rem_set;
 460   }
 461 
 462   inline bool in_collection_set() const;
 463 
 464   // Methods used by the HeapRegionSetBase class and subclasses.
 465 
 466   // Getter and setter for the next and prev fields used to link regions into
 467   // linked lists.
 468   void set_next(HeapRegion* next) { _next = next; }
 469   HeapRegion* next()              { return _next; }
 470 
 471   void set_prev(HeapRegion* prev) { _prev = prev; }
 472   HeapRegion* prev()              { return _prev; }
 473 
 474   void unlink_from_list();
 475 
 476   // Every region added to a set is tagged with a reference to that
 477   // set. This is used for doing consistency checking to make sure that
 478   // the contents of a set are as they should be and it's only
 479   // available in non-product builds.
 480 #ifdef ASSERT
 481   void set_containing_set(HeapRegionSetBase* containing_set) {
 482     assert((containing_set != NULL && _containing_set == NULL) ||
 483             containing_set == NULL,
 484            "containing_set: " PTR_FORMAT " "
 485            "_containing_set: " PTR_FORMAT,
 486            p2i(containing_set), p2i(_containing_set));
 487 
 488     _containing_set = containing_set;
 489   }
 490 
 491   HeapRegionSetBase* containing_set() { return _containing_set; }
 492 #else // ASSERT
 493   void set_containing_set(HeapRegionSetBase* containing_set) { }
 494 
 495   // containing_set() is only used in asserts so there's no reason
 496   // to provide a dummy version of it.
 497 #endif // ASSERT
 498 
 499 
 500   // Reset the HeapRegion to default values.
 501   // If skip_remset is true, do not clear the remembered set.
 502   // If clear_space is true, clear the HeapRegion's memory.
 503   // If locked is true, assume we are the only thread doing this operation.


 583     if (_surv_rate_group != NULL) {
 584       assert(_age_index > -1, "pre-condition");
 585       assert(is_young(), "pre-condition");
 586 
 587       _surv_rate_group = NULL;
 588       _age_index = -1;
 589     } else {
 590       assert(_age_index == -1, "pre-condition");
 591     }
 592   }
 593 
 594   // Determine if an object has been allocated since the last
 595   // mark performed by the collector. This returns true iff the object
 596   // is within the unmarked area of the region.
 597   bool obj_allocated_since_prev_marking(oop obj) const {
 598     return (HeapWord *) obj >= prev_top_at_mark_start();
 599   }
 600   bool obj_allocated_since_next_marking(oop obj) const {
 601     return (HeapWord *) obj >= next_top_at_mark_start();
 602   }
 603 
 604   // Update the region state after a failed evacuation.
 605   void handle_evacuation_failed();
 606 
 607   // Iterate over the objects overlapping the given memory region, applying cl
 608   // to all references in the region.  This is a helper for
 609   // G1RemSet::refine_card*, and is tightly coupled with them.
 610   // mr must not be empty. Must be trimmed to the allocated/parseable space in this region.
 611   // This region must be old or humongous.
 612   // Returns the next unscanned address if the designated objects were successfully
 613   // processed, NULL if an unparseable part of the heap was encountered (That should
 614   // only happen when invoked concurrently with the mutator).
 615   template <bool is_gc_active, class Closure>
 616   inline HeapWord* oops_on_memregion_seq_iterate_careful(MemRegion mr, Closure* cl);
 617 
 618   size_t recorded_rs_length() const        { return _recorded_rs_length; }
 619   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
 620 
 621   void set_recorded_rs_length(size_t rs_length) {
 622     _recorded_rs_length = rs_length;
 623   }
 624 
 625   void set_predicted_elapsed_time_ms(double ms) {


< prev index next >